1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+sse2 | FileCheck %s --check-prefixes=SSE
3 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx | FileCheck %s --check-prefixes=AVX
4 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX2
5 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX2-FP
6 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX2-FCP
7 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX512
8 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX512-FCP
9 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX512DQ
10 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX512DQ-FCP
11 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX512BW
12 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX512BW-FCP
13 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX512DQ-BW
14 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX512DQ-BW-FCP
16 ; These patterns are produced by LoopVectorizer for interleaved loads.
18 define void @load_i32_stride7_vf2(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2, ptr %out.vec3, ptr %out.vec4, ptr %out.vec5, ptr %out.vec6) nounwind {
19 ; SSE-LABEL: load_i32_stride7_vf2:
21 ; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax
22 ; SSE-NEXT: movq {{[0-9]+}}(%rsp), %r10
23 ; SSE-NEXT: movdqa (%rdi), %xmm0
24 ; SSE-NEXT: movdqa 16(%rdi), %xmm1
25 ; SSE-NEXT: movdqa 32(%rdi), %xmm2
26 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[3,3,3,3]
27 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,1,1]
28 ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm0[2,3,2,3]
29 ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm0[2,2,3,3]
30 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
31 ; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1]
32 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,1,1,1]
33 ; SSE-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm3[0],xmm5[1],xmm3[1]
34 ; SSE-NEXT: punpckhdq {{.*#+}} xmm6 = xmm6[2],xmm2[2],xmm6[3],xmm2[3]
35 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[3,3,3,3]
36 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,1,1]
37 ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm1[2,3,2,3]
38 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
39 ; SSE-NEXT: movdqa 48(%rdi), %xmm2
40 ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
41 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,1,1]
42 ; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm2[0],xmm7[1],xmm2[1]
43 ; SSE-NEXT: movq %xmm0, (%rsi)
44 ; SSE-NEXT: movq %xmm4, (%rdx)
45 ; SSE-NEXT: movq %xmm5, (%rcx)
46 ; SSE-NEXT: movq %xmm6, (%r8)
47 ; SSE-NEXT: movq %xmm1, (%r9)
48 ; SSE-NEXT: movq %xmm3, (%r10)
49 ; SSE-NEXT: movq %xmm7, (%rax)
52 ; AVX-LABEL: load_i32_stride7_vf2:
54 ; AVX-NEXT: movq {{[0-9]+}}(%rsp), %rax
55 ; AVX-NEXT: movq {{[0-9]+}}(%rsp), %r10
56 ; AVX-NEXT: vmovaps (%rdi), %ymm0
57 ; AVX-NEXT: vmovaps 32(%rdi), %ymm1
58 ; AVX-NEXT: vmovaps (%rdi), %xmm2
59 ; AVX-NEXT: vmovaps 16(%rdi), %xmm3
60 ; AVX-NEXT: vmovaps 32(%rdi), %xmm4
61 ; AVX-NEXT: vshufps {{.*#+}} xmm5 = xmm3[2,3,2,3]
62 ; AVX-NEXT: vblendps {{.*#+}} xmm5 = xmm2[0],xmm5[1],xmm2[2,3]
63 ; AVX-NEXT: vblendps {{.*#+}} xmm6 = xmm4[0],xmm2[1],xmm4[2,3]
64 ; AVX-NEXT: vshufps {{.*#+}} xmm6 = xmm6[1,0,2,3]
65 ; AVX-NEXT: vshufps {{.*#+}} xmm7 = xmm2[2,3,2,3]
66 ; AVX-NEXT: vblendps {{.*#+}} xmm7 = xmm7[0],xmm4[1],xmm7[2,3]
67 ; AVX-NEXT: vblendps {{.*#+}} xmm2 = xmm4[0,1,2],xmm2[3]
68 ; AVX-NEXT: vshufps {{.*#+}} xmm2 = xmm2[3,2,2,3]
69 ; AVX-NEXT: vshufps {{.*#+}} xmm4 = xmm4[2,3,2,3]
70 ; AVX-NEXT: vblendps {{.*#+}} xmm3 = xmm3[0],xmm4[1],xmm3[2,3]
71 ; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm1[0,0],ymm0[1,0],ymm1[4,4],ymm0[5,4]
72 ; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm4[2,0,2,3,6,4,6,7]
73 ; AVX-NEXT: vextractf128 $1, %ymm4, %xmm4
74 ; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm1[1,0],ymm0[2,0],ymm1[5,4],ymm0[6,4]
75 ; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0,2,3,6,4,6,7]
76 ; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0
77 ; AVX-NEXT: vmovlps %xmm5, (%rsi)
78 ; AVX-NEXT: vmovlps %xmm6, (%rdx)
79 ; AVX-NEXT: vmovlps %xmm7, (%rcx)
80 ; AVX-NEXT: vmovlps %xmm2, (%r8)
81 ; AVX-NEXT: vmovlps %xmm3, (%r9)
82 ; AVX-NEXT: vmovlps %xmm4, (%r10)
83 ; AVX-NEXT: vmovlps %xmm0, (%rax)
84 ; AVX-NEXT: vzeroupper
87 ; AVX2-LABEL: load_i32_stride7_vf2:
89 ; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax
90 ; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %r10
91 ; AVX2-NEXT: vmovaps (%rdi), %ymm0
92 ; AVX2-NEXT: vmovaps 32(%rdi), %ymm1
93 ; AVX2-NEXT: vbroadcastss 28(%rdi), %xmm2
94 ; AVX2-NEXT: vmovaps (%rdi), %xmm3
95 ; AVX2-NEXT: vmovaps 32(%rdi), %xmm4
96 ; AVX2-NEXT: vunpcklps {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
97 ; AVX2-NEXT: vblendps {{.*#+}} xmm5 = xmm4[0],xmm3[1],xmm4[2,3]
98 ; AVX2-NEXT: vshufps {{.*#+}} xmm5 = xmm5[1,0,2,3]
99 ; AVX2-NEXT: vbroadcastss 8(%rdi), %xmm6
100 ; AVX2-NEXT: vblendps {{.*#+}} xmm6 = xmm6[0],xmm4[1],xmm6[2,3]
101 ; AVX2-NEXT: vblendps {{.*#+}} xmm3 = xmm4[0,1,2],xmm3[3]
102 ; AVX2-NEXT: vshufps {{.*#+}} xmm3 = xmm3[3,2,2,3]
103 ; AVX2-NEXT: vmovsd {{.*#+}} xmm4 = [4,3,0,0]
104 ; AVX2-NEXT: vblendps {{.*#+}} ymm7 = ymm1[0,1,2,3],ymm0[4,5,6,7]
105 ; AVX2-NEXT: vpermps %ymm7, %ymm4, %ymm4
106 ; AVX2-NEXT: vblendps {{.*#+}} ymm7 = ymm1[0],ymm0[1],ymm1[2,3,4],ymm0[5],ymm1[6,7]
107 ; AVX2-NEXT: vshufps {{.*#+}} ymm7 = ymm7[1,0,2,3,5,4,6,7]
108 ; AVX2-NEXT: vextractf128 $1, %ymm7, %xmm7
109 ; AVX2-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,3,2,3,6,7,6,7]
110 ; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4],ymm1[5],ymm0[6,7]
111 ; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm0
112 ; AVX2-NEXT: vmovlps %xmm2, (%rsi)
113 ; AVX2-NEXT: vmovlps %xmm5, (%rdx)
114 ; AVX2-NEXT: vmovlps %xmm6, (%rcx)
115 ; AVX2-NEXT: vmovlps %xmm3, (%r8)
116 ; AVX2-NEXT: vmovlps %xmm4, (%r9)
117 ; AVX2-NEXT: vmovlps %xmm7, (%r10)
118 ; AVX2-NEXT: vmovlps %xmm0, (%rax)
119 ; AVX2-NEXT: vzeroupper
122 ; AVX2-FP-LABEL: load_i32_stride7_vf2:
124 ; AVX2-FP-NEXT: movq {{[0-9]+}}(%rsp), %rax
125 ; AVX2-FP-NEXT: movq {{[0-9]+}}(%rsp), %r10
126 ; AVX2-FP-NEXT: vmovaps (%rdi), %ymm0
127 ; AVX2-FP-NEXT: vmovaps 32(%rdi), %ymm1
128 ; AVX2-FP-NEXT: vbroadcastss 28(%rdi), %xmm2
129 ; AVX2-FP-NEXT: vmovaps (%rdi), %xmm3
130 ; AVX2-FP-NEXT: vmovaps 32(%rdi), %xmm4
131 ; AVX2-FP-NEXT: vunpcklps {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
132 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm5 = xmm4[0],xmm3[1],xmm4[2,3]
133 ; AVX2-FP-NEXT: vshufps {{.*#+}} xmm5 = xmm5[1,0,2,3]
134 ; AVX2-FP-NEXT: vbroadcastss 8(%rdi), %xmm6
135 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm6 = xmm6[0],xmm4[1],xmm6[2,3]
136 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm3 = xmm4[0,1,2],xmm3[3]
137 ; AVX2-FP-NEXT: vshufps {{.*#+}} xmm3 = xmm3[3,2,2,3]
138 ; AVX2-FP-NEXT: vmovsd {{.*#+}} xmm4 = [4,3,0,0]
139 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm7 = ymm1[0,1,2,3],ymm0[4,5,6,7]
140 ; AVX2-FP-NEXT: vpermps %ymm7, %ymm4, %ymm4
141 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm7 = ymm1[0],ymm0[1],ymm1[2,3,4],ymm0[5],ymm1[6,7]
142 ; AVX2-FP-NEXT: vshufps {{.*#+}} ymm7 = ymm7[1,0,2,3,5,4,6,7]
143 ; AVX2-FP-NEXT: vextractf128 $1, %ymm7, %xmm7
144 ; AVX2-FP-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,3,2,3,6,7,6,7]
145 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4],ymm1[5],ymm0[6,7]
146 ; AVX2-FP-NEXT: vextractf128 $1, %ymm0, %xmm0
147 ; AVX2-FP-NEXT: vmovlps %xmm2, (%rsi)
148 ; AVX2-FP-NEXT: vmovlps %xmm5, (%rdx)
149 ; AVX2-FP-NEXT: vmovlps %xmm6, (%rcx)
150 ; AVX2-FP-NEXT: vmovlps %xmm3, (%r8)
151 ; AVX2-FP-NEXT: vmovlps %xmm4, (%r9)
152 ; AVX2-FP-NEXT: vmovlps %xmm7, (%r10)
153 ; AVX2-FP-NEXT: vmovlps %xmm0, (%rax)
154 ; AVX2-FP-NEXT: vzeroupper
157 ; AVX2-FCP-LABEL: load_i32_stride7_vf2:
159 ; AVX2-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
160 ; AVX2-FCP-NEXT: movq {{[0-9]+}}(%rsp), %r10
161 ; AVX2-FCP-NEXT: vmovaps (%rdi), %ymm0
162 ; AVX2-FCP-NEXT: vmovaps 32(%rdi), %ymm1
163 ; AVX2-FCP-NEXT: vbroadcastss 28(%rdi), %xmm2
164 ; AVX2-FCP-NEXT: vmovaps (%rdi), %xmm3
165 ; AVX2-FCP-NEXT: vmovaps 32(%rdi), %xmm4
166 ; AVX2-FCP-NEXT: vunpcklps {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
167 ; AVX2-FCP-NEXT: vblendps {{.*#+}} xmm5 = xmm4[0],xmm3[1],xmm4[2,3]
168 ; AVX2-FCP-NEXT: vshufps {{.*#+}} xmm5 = xmm5[1,0,2,3]
169 ; AVX2-FCP-NEXT: vbroadcastss 8(%rdi), %xmm6
170 ; AVX2-FCP-NEXT: vblendps {{.*#+}} xmm6 = xmm6[0],xmm4[1],xmm6[2,3]
171 ; AVX2-FCP-NEXT: vblendps {{.*#+}} xmm3 = xmm4[0,1,2],xmm3[3]
172 ; AVX2-FCP-NEXT: vshufps {{.*#+}} xmm3 = xmm3[3,2,2,3]
173 ; AVX2-FCP-NEXT: vmovsd {{.*#+}} xmm4 = [4,3,0,0]
174 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm7 = ymm1[0,1,2,3],ymm0[4,5,6,7]
175 ; AVX2-FCP-NEXT: vpermps %ymm7, %ymm4, %ymm4
176 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm7 = ymm1[0],ymm0[1],ymm1[2,3,4],ymm0[5],ymm1[6,7]
177 ; AVX2-FCP-NEXT: vshufps {{.*#+}} ymm7 = ymm7[1,0,2,3,5,4,6,7]
178 ; AVX2-FCP-NEXT: vextractf128 $1, %ymm7, %xmm7
179 ; AVX2-FCP-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,3,2,3,6,7,6,7]
180 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4],ymm1[5],ymm0[6,7]
181 ; AVX2-FCP-NEXT: vextractf128 $1, %ymm0, %xmm0
182 ; AVX2-FCP-NEXT: vmovlps %xmm2, (%rsi)
183 ; AVX2-FCP-NEXT: vmovlps %xmm5, (%rdx)
184 ; AVX2-FCP-NEXT: vmovlps %xmm6, (%rcx)
185 ; AVX2-FCP-NEXT: vmovlps %xmm3, (%r8)
186 ; AVX2-FCP-NEXT: vmovlps %xmm4, (%r9)
187 ; AVX2-FCP-NEXT: vmovlps %xmm7, (%r10)
188 ; AVX2-FCP-NEXT: vmovlps %xmm0, (%rax)
189 ; AVX2-FCP-NEXT: vzeroupper
190 ; AVX2-FCP-NEXT: retq
192 ; AVX512-LABEL: load_i32_stride7_vf2:
194 ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax
195 ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r10
196 ; AVX512-NEXT: vmovdqa (%rdi), %xmm0
197 ; AVX512-NEXT: vmovdqa 32(%rdi), %xmm1
198 ; AVX512-NEXT: vpinsrd $1, 28(%rdi), %xmm0, %xmm2
199 ; AVX512-NEXT: vmovd %xmm1, %r11d
200 ; AVX512-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,1,1]
201 ; AVX512-NEXT: vpinsrd $1, %r11d, %xmm3, %xmm3
202 ; AVX512-NEXT: vpbroadcastd 8(%rdi), %xmm4
203 ; AVX512-NEXT: vpblendd {{.*#+}} xmm4 = xmm4[0],xmm1[1],xmm4[2,3]
204 ; AVX512-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3]
205 ; AVX512-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[3,2,2,3]
206 ; AVX512-NEXT: vpmovsxbd {{.*#+}} xmm1 = [4,11,0,0]
207 ; AVX512-NEXT: vmovdqa 32(%rdi), %ymm5
208 ; AVX512-NEXT: vmovdqa (%rdi), %ymm6
209 ; AVX512-NEXT: vpermi2d %ymm5, %ymm6, %ymm1
210 ; AVX512-NEXT: vpblendd {{.*#+}} ymm7 = ymm5[0],ymm6[1],ymm5[2,3,4],ymm6[5],ymm5[6,7]
211 ; AVX512-NEXT: vpshufd {{.*#+}} ymm7 = ymm7[1,0,2,3,5,4,6,7]
212 ; AVX512-NEXT: vextracti128 $1, %ymm7, %xmm7
213 ; AVX512-NEXT: vpshufd {{.*#+}} ymm6 = ymm6[2,3,2,3,6,7,6,7]
214 ; AVX512-NEXT: vpblendd {{.*#+}} ymm5 = ymm6[0],ymm5[1],ymm6[2,3,4],ymm5[5],ymm6[6,7]
215 ; AVX512-NEXT: vextracti128 $1, %ymm5, %xmm5
216 ; AVX512-NEXT: vmovq %xmm2, (%rsi)
217 ; AVX512-NEXT: vmovq %xmm3, (%rdx)
218 ; AVX512-NEXT: vmovq %xmm4, (%rcx)
219 ; AVX512-NEXT: vmovq %xmm0, (%r8)
220 ; AVX512-NEXT: vmovq %xmm1, (%r9)
221 ; AVX512-NEXT: vmovq %xmm7, (%r10)
222 ; AVX512-NEXT: vmovq %xmm5, (%rax)
223 ; AVX512-NEXT: vzeroupper
226 ; AVX512-FCP-LABEL: load_i32_stride7_vf2:
227 ; AVX512-FCP: # %bb.0:
228 ; AVX512-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
229 ; AVX512-FCP-NEXT: movq {{[0-9]+}}(%rsp), %r10
230 ; AVX512-FCP-NEXT: vmovdqa (%rdi), %xmm0
231 ; AVX512-FCP-NEXT: vmovdqa 32(%rdi), %xmm1
232 ; AVX512-FCP-NEXT: vpinsrd $1, 28(%rdi), %xmm0, %xmm2
233 ; AVX512-FCP-NEXT: vpbroadcastq {{.*#+}} xmm3 = [1,4,1,4]
234 ; AVX512-FCP-NEXT: vpermi2d %xmm1, %xmm0, %xmm3
235 ; AVX512-FCP-NEXT: vpbroadcastd 8(%rdi), %xmm4
236 ; AVX512-FCP-NEXT: vpblendd {{.*#+}} xmm4 = xmm4[0],xmm1[1],xmm4[2,3]
237 ; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} xmm5 = [7,2,0,0]
238 ; AVX512-FCP-NEXT: vpermi2d %xmm0, %xmm1, %xmm5
239 ; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} xmm0 = [4,11,0,0]
240 ; AVX512-FCP-NEXT: vmovdqa 32(%rdi), %ymm1
241 ; AVX512-FCP-NEXT: vmovdqa (%rdi), %ymm6
242 ; AVX512-FCP-NEXT: vpermi2d %ymm1, %ymm6, %ymm0
243 ; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} xmm7 = [13,4,6,7]
244 ; AVX512-FCP-NEXT: vpermi2d %ymm6, %ymm1, %ymm7
245 ; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} xmm8 = [6,13,6,7]
246 ; AVX512-FCP-NEXT: vpermi2d %ymm1, %ymm6, %ymm8
247 ; AVX512-FCP-NEXT: vmovq %xmm2, (%rsi)
248 ; AVX512-FCP-NEXT: vmovq %xmm3, (%rdx)
249 ; AVX512-FCP-NEXT: vmovq %xmm4, (%rcx)
250 ; AVX512-FCP-NEXT: vmovq %xmm5, (%r8)
251 ; AVX512-FCP-NEXT: vmovq %xmm0, (%r9)
252 ; AVX512-FCP-NEXT: vmovq %xmm7, (%r10)
253 ; AVX512-FCP-NEXT: vmovq %xmm8, (%rax)
254 ; AVX512-FCP-NEXT: vzeroupper
255 ; AVX512-FCP-NEXT: retq
257 ; AVX512DQ-LABEL: load_i32_stride7_vf2:
259 ; AVX512DQ-NEXT: movq {{[0-9]+}}(%rsp), %rax
260 ; AVX512DQ-NEXT: movq {{[0-9]+}}(%rsp), %r10
261 ; AVX512DQ-NEXT: vmovdqa (%rdi), %xmm0
262 ; AVX512DQ-NEXT: vmovdqa 32(%rdi), %xmm1
263 ; AVX512DQ-NEXT: vpinsrd $1, 28(%rdi), %xmm0, %xmm2
264 ; AVX512DQ-NEXT: vmovd %xmm1, %r11d
265 ; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,1,1]
266 ; AVX512DQ-NEXT: vpinsrd $1, %r11d, %xmm3, %xmm3
267 ; AVX512DQ-NEXT: vpbroadcastd 8(%rdi), %xmm4
268 ; AVX512DQ-NEXT: vpblendd {{.*#+}} xmm4 = xmm4[0],xmm1[1],xmm4[2,3]
269 ; AVX512DQ-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3]
270 ; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[3,2,2,3]
271 ; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} xmm1 = [4,11,0,0]
272 ; AVX512DQ-NEXT: vmovdqa 32(%rdi), %ymm5
273 ; AVX512DQ-NEXT: vmovdqa (%rdi), %ymm6
274 ; AVX512DQ-NEXT: vpermi2d %ymm5, %ymm6, %ymm1
275 ; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm7 = ymm5[0],ymm6[1],ymm5[2,3,4],ymm6[5],ymm5[6,7]
276 ; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm7 = ymm7[1,0,2,3,5,4,6,7]
277 ; AVX512DQ-NEXT: vextracti128 $1, %ymm7, %xmm7
278 ; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm6 = ymm6[2,3,2,3,6,7,6,7]
279 ; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm5 = ymm6[0],ymm5[1],ymm6[2,3,4],ymm5[5],ymm6[6,7]
280 ; AVX512DQ-NEXT: vextracti128 $1, %ymm5, %xmm5
281 ; AVX512DQ-NEXT: vmovq %xmm2, (%rsi)
282 ; AVX512DQ-NEXT: vmovq %xmm3, (%rdx)
283 ; AVX512DQ-NEXT: vmovq %xmm4, (%rcx)
284 ; AVX512DQ-NEXT: vmovq %xmm0, (%r8)
285 ; AVX512DQ-NEXT: vmovq %xmm1, (%r9)
286 ; AVX512DQ-NEXT: vmovq %xmm7, (%r10)
287 ; AVX512DQ-NEXT: vmovq %xmm5, (%rax)
288 ; AVX512DQ-NEXT: vzeroupper
289 ; AVX512DQ-NEXT: retq
291 ; AVX512DQ-FCP-LABEL: load_i32_stride7_vf2:
292 ; AVX512DQ-FCP: # %bb.0:
293 ; AVX512DQ-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
294 ; AVX512DQ-FCP-NEXT: movq {{[0-9]+}}(%rsp), %r10
295 ; AVX512DQ-FCP-NEXT: vmovdqa (%rdi), %xmm0
296 ; AVX512DQ-FCP-NEXT: vmovdqa 32(%rdi), %xmm1
297 ; AVX512DQ-FCP-NEXT: vpinsrd $1, 28(%rdi), %xmm0, %xmm2
298 ; AVX512DQ-FCP-NEXT: vpbroadcastq {{.*#+}} xmm3 = [1,4,1,4]
299 ; AVX512DQ-FCP-NEXT: vpermi2d %xmm1, %xmm0, %xmm3
300 ; AVX512DQ-FCP-NEXT: vpbroadcastd 8(%rdi), %xmm4
301 ; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} xmm4 = xmm4[0],xmm1[1],xmm4[2,3]
302 ; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} xmm5 = [7,2,0,0]
303 ; AVX512DQ-FCP-NEXT: vpermi2d %xmm0, %xmm1, %xmm5
304 ; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} xmm0 = [4,11,0,0]
305 ; AVX512DQ-FCP-NEXT: vmovdqa 32(%rdi), %ymm1
306 ; AVX512DQ-FCP-NEXT: vmovdqa (%rdi), %ymm6
307 ; AVX512DQ-FCP-NEXT: vpermi2d %ymm1, %ymm6, %ymm0
308 ; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} xmm7 = [13,4,6,7]
309 ; AVX512DQ-FCP-NEXT: vpermi2d %ymm6, %ymm1, %ymm7
310 ; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} xmm8 = [6,13,6,7]
311 ; AVX512DQ-FCP-NEXT: vpermi2d %ymm1, %ymm6, %ymm8
312 ; AVX512DQ-FCP-NEXT: vmovq %xmm2, (%rsi)
313 ; AVX512DQ-FCP-NEXT: vmovq %xmm3, (%rdx)
314 ; AVX512DQ-FCP-NEXT: vmovq %xmm4, (%rcx)
315 ; AVX512DQ-FCP-NEXT: vmovq %xmm5, (%r8)
316 ; AVX512DQ-FCP-NEXT: vmovq %xmm0, (%r9)
317 ; AVX512DQ-FCP-NEXT: vmovq %xmm7, (%r10)
318 ; AVX512DQ-FCP-NEXT: vmovq %xmm8, (%rax)
319 ; AVX512DQ-FCP-NEXT: vzeroupper
320 ; AVX512DQ-FCP-NEXT: retq
322 ; AVX512BW-LABEL: load_i32_stride7_vf2:
324 ; AVX512BW-NEXT: movq {{[0-9]+}}(%rsp), %rax
325 ; AVX512BW-NEXT: movq {{[0-9]+}}(%rsp), %r10
326 ; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0
327 ; AVX512BW-NEXT: vmovdqa 32(%rdi), %xmm1
328 ; AVX512BW-NEXT: vpinsrd $1, 28(%rdi), %xmm0, %xmm2
329 ; AVX512BW-NEXT: vmovd %xmm1, %r11d
330 ; AVX512BW-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,1,1]
331 ; AVX512BW-NEXT: vpinsrd $1, %r11d, %xmm3, %xmm3
332 ; AVX512BW-NEXT: vpbroadcastd 8(%rdi), %xmm4
333 ; AVX512BW-NEXT: vpblendd {{.*#+}} xmm4 = xmm4[0],xmm1[1],xmm4[2,3]
334 ; AVX512BW-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3]
335 ; AVX512BW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[3,2,2,3]
336 ; AVX512BW-NEXT: vpmovsxbd {{.*#+}} xmm1 = [4,11,0,0]
337 ; AVX512BW-NEXT: vmovdqa 32(%rdi), %ymm5
338 ; AVX512BW-NEXT: vmovdqa (%rdi), %ymm6
339 ; AVX512BW-NEXT: vpermi2d %ymm5, %ymm6, %ymm1
340 ; AVX512BW-NEXT: vpblendd {{.*#+}} ymm7 = ymm5[0],ymm6[1],ymm5[2,3,4],ymm6[5],ymm5[6,7]
341 ; AVX512BW-NEXT: vpshufd {{.*#+}} ymm7 = ymm7[1,0,2,3,5,4,6,7]
342 ; AVX512BW-NEXT: vextracti128 $1, %ymm7, %xmm7
343 ; AVX512BW-NEXT: vpshufd {{.*#+}} ymm6 = ymm6[2,3,2,3,6,7,6,7]
344 ; AVX512BW-NEXT: vpblendd {{.*#+}} ymm5 = ymm6[0],ymm5[1],ymm6[2,3,4],ymm5[5],ymm6[6,7]
345 ; AVX512BW-NEXT: vextracti128 $1, %ymm5, %xmm5
346 ; AVX512BW-NEXT: vmovq %xmm2, (%rsi)
347 ; AVX512BW-NEXT: vmovq %xmm3, (%rdx)
348 ; AVX512BW-NEXT: vmovq %xmm4, (%rcx)
349 ; AVX512BW-NEXT: vmovq %xmm0, (%r8)
350 ; AVX512BW-NEXT: vmovq %xmm1, (%r9)
351 ; AVX512BW-NEXT: vmovq %xmm7, (%r10)
352 ; AVX512BW-NEXT: vmovq %xmm5, (%rax)
353 ; AVX512BW-NEXT: vzeroupper
354 ; AVX512BW-NEXT: retq
356 ; AVX512BW-FCP-LABEL: load_i32_stride7_vf2:
357 ; AVX512BW-FCP: # %bb.0:
358 ; AVX512BW-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
359 ; AVX512BW-FCP-NEXT: movq {{[0-9]+}}(%rsp), %r10
360 ; AVX512BW-FCP-NEXT: vmovdqa (%rdi), %xmm0
361 ; AVX512BW-FCP-NEXT: vmovdqa 32(%rdi), %xmm1
362 ; AVX512BW-FCP-NEXT: vpinsrd $1, 28(%rdi), %xmm0, %xmm2
363 ; AVX512BW-FCP-NEXT: vpbroadcastq {{.*#+}} xmm3 = [1,4,1,4]
364 ; AVX512BW-FCP-NEXT: vpermi2d %xmm1, %xmm0, %xmm3
365 ; AVX512BW-FCP-NEXT: vpbroadcastd 8(%rdi), %xmm4
366 ; AVX512BW-FCP-NEXT: vpblendd {{.*#+}} xmm4 = xmm4[0],xmm1[1],xmm4[2,3]
367 ; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm5 = [7,2,0,0]
368 ; AVX512BW-FCP-NEXT: vpermi2d %xmm0, %xmm1, %xmm5
369 ; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm0 = [4,11,0,0]
370 ; AVX512BW-FCP-NEXT: vmovdqa 32(%rdi), %ymm1
371 ; AVX512BW-FCP-NEXT: vmovdqa (%rdi), %ymm6
372 ; AVX512BW-FCP-NEXT: vpermi2d %ymm1, %ymm6, %ymm0
373 ; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm7 = [13,4,6,7]
374 ; AVX512BW-FCP-NEXT: vpermi2d %ymm6, %ymm1, %ymm7
375 ; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm8 = [6,13,6,7]
376 ; AVX512BW-FCP-NEXT: vpermi2d %ymm1, %ymm6, %ymm8
377 ; AVX512BW-FCP-NEXT: vmovq %xmm2, (%rsi)
378 ; AVX512BW-FCP-NEXT: vmovq %xmm3, (%rdx)
379 ; AVX512BW-FCP-NEXT: vmovq %xmm4, (%rcx)
380 ; AVX512BW-FCP-NEXT: vmovq %xmm5, (%r8)
381 ; AVX512BW-FCP-NEXT: vmovq %xmm0, (%r9)
382 ; AVX512BW-FCP-NEXT: vmovq %xmm7, (%r10)
383 ; AVX512BW-FCP-NEXT: vmovq %xmm8, (%rax)
384 ; AVX512BW-FCP-NEXT: vzeroupper
385 ; AVX512BW-FCP-NEXT: retq
387 ; AVX512DQ-BW-LABEL: load_i32_stride7_vf2:
388 ; AVX512DQ-BW: # %bb.0:
389 ; AVX512DQ-BW-NEXT: movq {{[0-9]+}}(%rsp), %rax
390 ; AVX512DQ-BW-NEXT: movq {{[0-9]+}}(%rsp), %r10
391 ; AVX512DQ-BW-NEXT: vmovdqa (%rdi), %xmm0
392 ; AVX512DQ-BW-NEXT: vmovdqa 32(%rdi), %xmm1
393 ; AVX512DQ-BW-NEXT: vpinsrd $1, 28(%rdi), %xmm0, %xmm2
394 ; AVX512DQ-BW-NEXT: vmovd %xmm1, %r11d
395 ; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,1,1]
396 ; AVX512DQ-BW-NEXT: vpinsrd $1, %r11d, %xmm3, %xmm3
397 ; AVX512DQ-BW-NEXT: vpbroadcastd 8(%rdi), %xmm4
398 ; AVX512DQ-BW-NEXT: vpblendd {{.*#+}} xmm4 = xmm4[0],xmm1[1],xmm4[2,3]
399 ; AVX512DQ-BW-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3]
400 ; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[3,2,2,3]
401 ; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} xmm1 = [4,11,0,0]
402 ; AVX512DQ-BW-NEXT: vmovdqa 32(%rdi), %ymm5
403 ; AVX512DQ-BW-NEXT: vmovdqa (%rdi), %ymm6
404 ; AVX512DQ-BW-NEXT: vpermi2d %ymm5, %ymm6, %ymm1
405 ; AVX512DQ-BW-NEXT: vpblendd {{.*#+}} ymm7 = ymm5[0],ymm6[1],ymm5[2,3,4],ymm6[5],ymm5[6,7]
406 ; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} ymm7 = ymm7[1,0,2,3,5,4,6,7]
407 ; AVX512DQ-BW-NEXT: vextracti128 $1, %ymm7, %xmm7
408 ; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} ymm6 = ymm6[2,3,2,3,6,7,6,7]
409 ; AVX512DQ-BW-NEXT: vpblendd {{.*#+}} ymm5 = ymm6[0],ymm5[1],ymm6[2,3,4],ymm5[5],ymm6[6,7]
410 ; AVX512DQ-BW-NEXT: vextracti128 $1, %ymm5, %xmm5
411 ; AVX512DQ-BW-NEXT: vmovq %xmm2, (%rsi)
412 ; AVX512DQ-BW-NEXT: vmovq %xmm3, (%rdx)
413 ; AVX512DQ-BW-NEXT: vmovq %xmm4, (%rcx)
414 ; AVX512DQ-BW-NEXT: vmovq %xmm0, (%r8)
415 ; AVX512DQ-BW-NEXT: vmovq %xmm1, (%r9)
416 ; AVX512DQ-BW-NEXT: vmovq %xmm7, (%r10)
417 ; AVX512DQ-BW-NEXT: vmovq %xmm5, (%rax)
418 ; AVX512DQ-BW-NEXT: vzeroupper
419 ; AVX512DQ-BW-NEXT: retq
421 ; AVX512DQ-BW-FCP-LABEL: load_i32_stride7_vf2:
422 ; AVX512DQ-BW-FCP: # %bb.0:
423 ; AVX512DQ-BW-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
424 ; AVX512DQ-BW-FCP-NEXT: movq {{[0-9]+}}(%rsp), %r10
425 ; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdi), %xmm0
426 ; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%rdi), %xmm1
427 ; AVX512DQ-BW-FCP-NEXT: vpinsrd $1, 28(%rdi), %xmm0, %xmm2
428 ; AVX512DQ-BW-FCP-NEXT: vpbroadcastq {{.*#+}} xmm3 = [1,4,1,4]
429 ; AVX512DQ-BW-FCP-NEXT: vpermi2d %xmm1, %xmm0, %xmm3
430 ; AVX512DQ-BW-FCP-NEXT: vpbroadcastd 8(%rdi), %xmm4
431 ; AVX512DQ-BW-FCP-NEXT: vpblendd {{.*#+}} xmm4 = xmm4[0],xmm1[1],xmm4[2,3]
432 ; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm5 = [7,2,0,0]
433 ; AVX512DQ-BW-FCP-NEXT: vpermi2d %xmm0, %xmm1, %xmm5
434 ; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm0 = [4,11,0,0]
435 ; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%rdi), %ymm1
436 ; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdi), %ymm6
437 ; AVX512DQ-BW-FCP-NEXT: vpermi2d %ymm1, %ymm6, %ymm0
438 ; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm7 = [13,4,6,7]
439 ; AVX512DQ-BW-FCP-NEXT: vpermi2d %ymm6, %ymm1, %ymm7
440 ; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm8 = [6,13,6,7]
441 ; AVX512DQ-BW-FCP-NEXT: vpermi2d %ymm1, %ymm6, %ymm8
442 ; AVX512DQ-BW-FCP-NEXT: vmovq %xmm2, (%rsi)
443 ; AVX512DQ-BW-FCP-NEXT: vmovq %xmm3, (%rdx)
444 ; AVX512DQ-BW-FCP-NEXT: vmovq %xmm4, (%rcx)
445 ; AVX512DQ-BW-FCP-NEXT: vmovq %xmm5, (%r8)
446 ; AVX512DQ-BW-FCP-NEXT: vmovq %xmm0, (%r9)
447 ; AVX512DQ-BW-FCP-NEXT: vmovq %xmm7, (%r10)
448 ; AVX512DQ-BW-FCP-NEXT: vmovq %xmm8, (%rax)
449 ; AVX512DQ-BW-FCP-NEXT: vzeroupper
450 ; AVX512DQ-BW-FCP-NEXT: retq
451 %wide.vec = load <14 x i32>, ptr %in.vec, align 64
452 %strided.vec0 = shufflevector <14 x i32> %wide.vec, <14 x i32> poison, <2 x i32> <i32 0, i32 7>
453 %strided.vec1 = shufflevector <14 x i32> %wide.vec, <14 x i32> poison, <2 x i32> <i32 1, i32 8>
454 %strided.vec2 = shufflevector <14 x i32> %wide.vec, <14 x i32> poison, <2 x i32> <i32 2, i32 9>
455 %strided.vec3 = shufflevector <14 x i32> %wide.vec, <14 x i32> poison, <2 x i32> <i32 3, i32 10>
456 %strided.vec4 = shufflevector <14 x i32> %wide.vec, <14 x i32> poison, <2 x i32> <i32 4, i32 11>
457 %strided.vec5 = shufflevector <14 x i32> %wide.vec, <14 x i32> poison, <2 x i32> <i32 5, i32 12>
458 %strided.vec6 = shufflevector <14 x i32> %wide.vec, <14 x i32> poison, <2 x i32> <i32 6, i32 13>
459 store <2 x i32> %strided.vec0, ptr %out.vec0, align 64
460 store <2 x i32> %strided.vec1, ptr %out.vec1, align 64
461 store <2 x i32> %strided.vec2, ptr %out.vec2, align 64
462 store <2 x i32> %strided.vec3, ptr %out.vec3, align 64
463 store <2 x i32> %strided.vec4, ptr %out.vec4, align 64
464 store <2 x i32> %strided.vec5, ptr %out.vec5, align 64
465 store <2 x i32> %strided.vec6, ptr %out.vec6, align 64
469 define void @load_i32_stride7_vf4(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2, ptr %out.vec3, ptr %out.vec4, ptr %out.vec5, ptr %out.vec6) nounwind {
470 ; SSE-LABEL: load_i32_stride7_vf4:
472 ; SSE-NEXT: movdqa 96(%rdi), %xmm1
473 ; SSE-NEXT: movdqa 64(%rdi), %xmm0
474 ; SSE-NEXT: movdqa 80(%rdi), %xmm2
475 ; SSE-NEXT: movdqa (%rdi), %xmm11
476 ; SSE-NEXT: movdqa 16(%rdi), %xmm3
477 ; SSE-NEXT: movdqa 32(%rdi), %xmm4
478 ; SSE-NEXT: movdqa 48(%rdi), %xmm6
479 ; SSE-NEXT: pshufd {{.*#+}} xmm10 = xmm3[3,3,3,3]
480 ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm11[1,1,1,1]
481 ; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm11[2,3,2,3]
482 ; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm11[2,2,3,3]
483 ; SSE-NEXT: punpckldq {{.*#+}} xmm11 = xmm11[0],xmm10[0],xmm11[1],xmm10[1]
484 ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm6[2,2,3,3]
485 ; SSE-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm2[0],xmm5[1],xmm2[1]
486 ; SSE-NEXT: movsd {{.*#+}} xmm5 = xmm11[0],xmm5[1]
487 ; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm4[0],xmm7[1],xmm4[1]
488 ; SSE-NEXT: pshufd {{.*#+}} xmm11 = xmm2[2,2,2,2]
489 ; SSE-NEXT: pshufd {{.*#+}} xmm10 = xmm3[1,1,1,1]
490 ; SSE-NEXT: punpckldq {{.*#+}} xmm10 = xmm10[0],xmm6[0],xmm10[1],xmm6[1]
491 ; SSE-NEXT: pshufd {{.*#+}} xmm12 = xmm6[1,1,1,1]
492 ; SSE-NEXT: punpckhdq {{.*#+}} xmm6 = xmm6[2],xmm11[2],xmm6[3],xmm11[3]
493 ; SSE-NEXT: movsd {{.*#+}} xmm6 = xmm7[0],xmm6[1]
494 ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm4[1,1,1,1]
495 ; SSE-NEXT: punpckldq {{.*#+}} xmm8 = xmm8[0],xmm7[0],xmm8[1],xmm7[1]
496 ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm2[2,3,2,3]
497 ; SSE-NEXT: pshufd {{.*#+}} xmm11 = xmm0[0,0,1,1]
498 ; SSE-NEXT: punpckldq {{.*#+}} xmm11 = xmm11[0],xmm7[0],xmm11[1],xmm7[1]
499 ; SSE-NEXT: movsd {{.*#+}} xmm11 = xmm8[0],xmm11[1]
500 ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm1[0,0,1,1]
501 ; SSE-NEXT: movdqa %xmm0, %xmm8
502 ; SSE-NEXT: punpckldq {{.*#+}} xmm8 = xmm8[0],xmm7[0],xmm8[1],xmm7[1]
503 ; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax
504 ; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rdi
505 ; SSE-NEXT: punpckhdq {{.*#+}} xmm9 = xmm9[2],xmm4[2],xmm9[3],xmm4[3]
506 ; SSE-NEXT: movsd {{.*#+}} xmm8 = xmm9[0],xmm8[1]
507 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[3,3,3,3]
508 ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm3[2,3,2,3]
509 ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
510 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm0[2,2,3,3]
511 ; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
512 ; SSE-NEXT: movsd {{.*#+}} xmm4 = xmm3[0],xmm4[1]
513 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,2,2,2]
514 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm3[2],xmm0[3],xmm3[3]
515 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm10[0],xmm0[1]
516 ; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm12[0],xmm7[1],xmm12[1]
517 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
518 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,1,1]
519 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
520 ; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm7[0],xmm2[1]
521 ; SSE-NEXT: movapd %xmm5, (%rsi)
522 ; SSE-NEXT: movapd %xmm6, (%rdx)
523 ; SSE-NEXT: movapd %xmm11, (%rcx)
524 ; SSE-NEXT: movapd %xmm8, (%r8)
525 ; SSE-NEXT: movapd %xmm4, (%r9)
526 ; SSE-NEXT: movapd %xmm0, (%rdi)
527 ; SSE-NEXT: movapd %xmm2, (%rax)
530 ; AVX-LABEL: load_i32_stride7_vf4:
532 ; AVX-NEXT: movq {{[0-9]+}}(%rsp), %rax
533 ; AVX-NEXT: movq {{[0-9]+}}(%rsp), %r10
534 ; AVX-NEXT: vmovaps 32(%rdi), %ymm0
535 ; AVX-NEXT: vmovaps (%rdi), %ymm1
536 ; AVX-NEXT: vblendps {{.*#+}} ymm2 = ymm1[0,1,2,3,4,5],ymm0[6],ymm1[7]
537 ; AVX-NEXT: vextractf128 $1, %ymm2, %xmm2
538 ; AVX-NEXT: vmovaps (%rdi), %xmm3
539 ; AVX-NEXT: vmovaps 32(%rdi), %xmm4
540 ; AVX-NEXT: vmovaps 64(%rdi), %xmm5
541 ; AVX-NEXT: vblendps {{.*#+}} xmm2 = xmm3[0,1],xmm2[2,3]
542 ; AVX-NEXT: vmovaps 80(%rdi), %xmm6
543 ; AVX-NEXT: vblendps {{.*#+}} xmm2 = xmm2[0],xmm6[1],xmm2[2,3]
544 ; AVX-NEXT: vshufps {{.*#+}} xmm2 = xmm2[0,3,2,1]
545 ; AVX-NEXT: vblendps {{.*#+}} xmm7 = xmm4[0],xmm3[1],xmm4[2,3]
546 ; AVX-NEXT: vshufps {{.*#+}} xmm7 = xmm7[1,0],mem[3,3]
547 ; AVX-NEXT: vinsertps {{.*#+}} xmm7 = xmm7[0,1,2],xmm6[2]
548 ; AVX-NEXT: vshufps {{.*#+}} xmm8 = xmm5[0,1,0,1]
549 ; AVX-NEXT: vblendps {{.*#+}} xmm8 = xmm8[0,1,2],xmm6[3]
550 ; AVX-NEXT: vshufps {{.*#+}} xmm9 = xmm3[2,3,2,3]
551 ; AVX-NEXT: vblendps {{.*#+}} xmm9 = xmm9[0],xmm4[1],xmm9[2,3]
552 ; AVX-NEXT: vblendps {{.*#+}} xmm8 = xmm9[0,1],xmm8[2,3]
553 ; AVX-NEXT: vmovaps 96(%rdi), %xmm9
554 ; AVX-NEXT: vblendps {{.*#+}} xmm10 = xmm9[0],xmm5[1],xmm9[2,3]
555 ; AVX-NEXT: vblendps {{.*#+}} xmm3 = xmm4[0,1,2],xmm3[3]
556 ; AVX-NEXT: vblendps {{.*#+}} xmm3 = xmm10[0,1],xmm3[2,3]
557 ; AVX-NEXT: vshufps {{.*#+}} xmm3 = xmm3[3,2,1,0]
558 ; AVX-NEXT: vshufps {{.*#+}} xmm10 = xmm9[0,1,0,1]
559 ; AVX-NEXT: vblendps {{.*#+}} xmm10 = xmm5[0,1,2],xmm10[3]
560 ; AVX-NEXT: vshufps {{.*#+}} xmm4 = xmm4[2,3,2,3]
561 ; AVX-NEXT: vblendps {{.*#+}} xmm4 = mem[0],xmm4[1],mem[2,3]
562 ; AVX-NEXT: vblendps {{.*#+}} xmm4 = xmm4[0,1],xmm10[2,3]
563 ; AVX-NEXT: vblendps {{.*#+}} xmm5 = xmm9[0,1,2],xmm5[3]
564 ; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm0[0,0],ymm1[1,0],ymm0[4,4],ymm1[5,4]
565 ; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm10[2,0,2,3,6,4,6,7]
566 ; AVX-NEXT: vextractf128 $1, %ymm10, %xmm10
567 ; AVX-NEXT: vshufps {{.*#+}} xmm5 = xmm10[0,1],xmm5[3,2]
568 ; AVX-NEXT: vshufps {{.*#+}} xmm6 = xmm6[0,1,0,1]
569 ; AVX-NEXT: vblendps {{.*#+}} xmm6 = xmm6[0,1,2],xmm9[3]
570 ; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm0[1,0],ymm1[2,0],ymm0[5,4],ymm1[6,4]
571 ; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0,2,3,6,4,6,7]
572 ; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0
573 ; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm6[2,3]
574 ; AVX-NEXT: vmovaps %xmm2, (%rsi)
575 ; AVX-NEXT: vmovaps %xmm7, (%rdx)
576 ; AVX-NEXT: vmovaps %xmm8, (%rcx)
577 ; AVX-NEXT: vmovaps %xmm3, (%r8)
578 ; AVX-NEXT: vmovaps %xmm4, (%r9)
579 ; AVX-NEXT: vmovaps %xmm5, (%r10)
580 ; AVX-NEXT: vmovaps %xmm0, (%rax)
581 ; AVX-NEXT: vzeroupper
584 ; AVX2-LABEL: load_i32_stride7_vf4:
586 ; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax
587 ; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %r10
588 ; AVX2-NEXT: vmovaps (%rdi), %ymm0
589 ; AVX2-NEXT: vmovaps 32(%rdi), %ymm1
590 ; AVX2-NEXT: vmovaps {{.*#+}} xmm2 = [0,7,6,u]
591 ; AVX2-NEXT: vblendps {{.*#+}} ymm3 = ymm0[0,1,2,3,4,5],ymm1[6],ymm0[7]
592 ; AVX2-NEXT: vpermps %ymm3, %ymm2, %ymm2
593 ; AVX2-NEXT: vbroadcastss 84(%rdi), %xmm3
594 ; AVX2-NEXT: vblendps {{.*#+}} xmm2 = xmm2[0,1,2],xmm3[3]
595 ; AVX2-NEXT: vmovaps 80(%rdi), %xmm4
596 ; AVX2-NEXT: vshufps {{.*#+}} xmm3 = xmm4[2,2,2,2]
597 ; AVX2-NEXT: vblendps {{.*#+}} ymm5 = ymm1[0],ymm0[1],ymm1[2,3,4],ymm0[5],ymm1[6,7]
598 ; AVX2-NEXT: vshufps {{.*#+}} ymm6 = ymm5[1,0,3,3,5,4,7,7]
599 ; AVX2-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[0,3,2,3]
600 ; AVX2-NEXT: vblendps {{.*#+}} xmm3 = xmm6[0,1,2],xmm3[3]
601 ; AVX2-NEXT: vbroadcastss 8(%rdi), %xmm6
602 ; AVX2-NEXT: vmovaps 32(%rdi), %xmm7
603 ; AVX2-NEXT: vblendps {{.*#+}} xmm6 = xmm6[0],xmm7[1],xmm6[2,3]
604 ; AVX2-NEXT: vmovaps 64(%rdi), %xmm8
605 ; AVX2-NEXT: vbroadcastss %xmm8, %xmm9
606 ; AVX2-NEXT: vunpckhps {{.*#+}} xmm4 = xmm9[2],xmm4[2],xmm9[3],xmm4[3]
607 ; AVX2-NEXT: vblendps {{.*#+}} xmm4 = xmm6[0,1],xmm4[2,3]
608 ; AVX2-NEXT: vblendps {{.*#+}} xmm6 = xmm7[0,1,2],mem[3]
609 ; AVX2-NEXT: vmovaps 96(%rdi), %xmm7
610 ; AVX2-NEXT: vblendps {{.*#+}} xmm9 = xmm7[0],xmm8[1],xmm7[2,3]
611 ; AVX2-NEXT: vblendps {{.*#+}} xmm6 = xmm9[0,1],xmm6[2,3]
612 ; AVX2-NEXT: vshufps {{.*#+}} xmm6 = xmm6[3,2,1,0]
613 ; AVX2-NEXT: vbroadcastss 100(%rdi), %xmm9
614 ; AVX2-NEXT: vblendps {{.*#+}} xmm9 = xmm8[0,1,2],xmm9[3]
615 ; AVX2-NEXT: vmovsd {{.*#+}} xmm10 = [4,3,0,0]
616 ; AVX2-NEXT: vblendps {{.*#+}} ymm11 = ymm1[0,1,2,3],ymm0[4,5,6,7]
617 ; AVX2-NEXT: vpermps %ymm11, %ymm10, %ymm10
618 ; AVX2-NEXT: vblendps {{.*#+}} xmm9 = xmm10[0,1],xmm9[2,3]
619 ; AVX2-NEXT: vblendps {{.*#+}} xmm8 = xmm7[0,1,2],xmm8[3]
620 ; AVX2-NEXT: vshufps {{.*#+}} xmm8 = xmm8[0,1,3,2]
621 ; AVX2-NEXT: vshufps {{.*#+}} ymm5 = ymm5[1,0,2,3,5,4,6,7]
622 ; AVX2-NEXT: vextractf128 $1, %ymm5, %xmm5
623 ; AVX2-NEXT: vblendps {{.*#+}} xmm5 = xmm5[0,1],xmm8[2,3]
624 ; AVX2-NEXT: vbroadcastss 80(%rdi), %ymm8
625 ; AVX2-NEXT: vblendps {{.*#+}} xmm7 = xmm8[0,1,2],xmm7[3]
626 ; AVX2-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,3,2,3,6,7,6,7]
627 ; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4],ymm1[5],ymm0[6,7]
628 ; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm0
629 ; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm7[2,3]
630 ; AVX2-NEXT: vmovaps %xmm2, (%rsi)
631 ; AVX2-NEXT: vmovaps %xmm3, (%rdx)
632 ; AVX2-NEXT: vmovaps %xmm4, (%rcx)
633 ; AVX2-NEXT: vmovaps %xmm6, (%r8)
634 ; AVX2-NEXT: vmovaps %xmm9, (%r9)
635 ; AVX2-NEXT: vmovaps %xmm5, (%r10)
636 ; AVX2-NEXT: vmovaps %xmm0, (%rax)
637 ; AVX2-NEXT: vzeroupper
640 ; AVX2-FP-LABEL: load_i32_stride7_vf4:
642 ; AVX2-FP-NEXT: movq {{[0-9]+}}(%rsp), %rax
643 ; AVX2-FP-NEXT: movq {{[0-9]+}}(%rsp), %r10
644 ; AVX2-FP-NEXT: vmovaps (%rdi), %ymm0
645 ; AVX2-FP-NEXT: vmovaps 32(%rdi), %ymm1
646 ; AVX2-FP-NEXT: vmovaps {{.*#+}} xmm2 = [0,7,6,u]
647 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm3 = ymm0[0,1,2,3,4,5],ymm1[6],ymm0[7]
648 ; AVX2-FP-NEXT: vpermps %ymm3, %ymm2, %ymm2
649 ; AVX2-FP-NEXT: vbroadcastss 84(%rdi), %xmm3
650 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm2 = xmm2[0,1,2],xmm3[3]
651 ; AVX2-FP-NEXT: vmovaps 80(%rdi), %xmm4
652 ; AVX2-FP-NEXT: vshufps {{.*#+}} xmm3 = xmm4[2,2,2,2]
653 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm5 = ymm1[0],ymm0[1],ymm1[2,3,4],ymm0[5],ymm1[6,7]
654 ; AVX2-FP-NEXT: vshufps {{.*#+}} ymm6 = ymm5[1,0,3,3,5,4,7,7]
655 ; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[0,3,2,3]
656 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm3 = xmm6[0,1,2],xmm3[3]
657 ; AVX2-FP-NEXT: vbroadcastss 8(%rdi), %xmm6
658 ; AVX2-FP-NEXT: vmovaps 32(%rdi), %xmm7
659 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm6 = xmm6[0],xmm7[1],xmm6[2,3]
660 ; AVX2-FP-NEXT: vmovaps 64(%rdi), %xmm8
661 ; AVX2-FP-NEXT: vbroadcastss %xmm8, %xmm9
662 ; AVX2-FP-NEXT: vunpckhps {{.*#+}} xmm4 = xmm9[2],xmm4[2],xmm9[3],xmm4[3]
663 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm4 = xmm6[0,1],xmm4[2,3]
664 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm6 = xmm7[0,1,2],mem[3]
665 ; AVX2-FP-NEXT: vmovaps 96(%rdi), %xmm7
666 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm9 = xmm7[0],xmm8[1],xmm7[2,3]
667 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm6 = xmm9[0,1],xmm6[2,3]
668 ; AVX2-FP-NEXT: vshufps {{.*#+}} xmm6 = xmm6[3,2,1,0]
669 ; AVX2-FP-NEXT: vbroadcastss 100(%rdi), %xmm9
670 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm9 = xmm8[0,1,2],xmm9[3]
671 ; AVX2-FP-NEXT: vmovsd {{.*#+}} xmm10 = [4,3,0,0]
672 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm11 = ymm1[0,1,2,3],ymm0[4,5,6,7]
673 ; AVX2-FP-NEXT: vpermps %ymm11, %ymm10, %ymm10
674 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm9 = xmm10[0,1],xmm9[2,3]
675 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm8 = xmm7[0,1,2],xmm8[3]
676 ; AVX2-FP-NEXT: vshufps {{.*#+}} xmm8 = xmm8[0,1,3,2]
677 ; AVX2-FP-NEXT: vshufps {{.*#+}} ymm5 = ymm5[1,0,2,3,5,4,6,7]
678 ; AVX2-FP-NEXT: vextractf128 $1, %ymm5, %xmm5
679 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm5 = xmm5[0,1],xmm8[2,3]
680 ; AVX2-FP-NEXT: vbroadcastss 80(%rdi), %ymm8
681 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm7 = xmm8[0,1,2],xmm7[3]
682 ; AVX2-FP-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,3,2,3,6,7,6,7]
683 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4],ymm1[5],ymm0[6,7]
684 ; AVX2-FP-NEXT: vextractf128 $1, %ymm0, %xmm0
685 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm7[2,3]
686 ; AVX2-FP-NEXT: vmovaps %xmm2, (%rsi)
687 ; AVX2-FP-NEXT: vmovaps %xmm3, (%rdx)
688 ; AVX2-FP-NEXT: vmovaps %xmm4, (%rcx)
689 ; AVX2-FP-NEXT: vmovaps %xmm6, (%r8)
690 ; AVX2-FP-NEXT: vmovaps %xmm9, (%r9)
691 ; AVX2-FP-NEXT: vmovaps %xmm5, (%r10)
692 ; AVX2-FP-NEXT: vmovaps %xmm0, (%rax)
693 ; AVX2-FP-NEXT: vzeroupper
696 ; AVX2-FCP-LABEL: load_i32_stride7_vf4:
698 ; AVX2-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
699 ; AVX2-FCP-NEXT: movq {{[0-9]+}}(%rsp), %r10
700 ; AVX2-FCP-NEXT: vmovaps (%rdi), %ymm0
701 ; AVX2-FCP-NEXT: vmovaps 32(%rdi), %ymm1
702 ; AVX2-FCP-NEXT: vmovaps {{.*#+}} xmm2 = [0,7,6,u]
703 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm3 = ymm0[0,1,2,3,4,5],ymm1[6],ymm0[7]
704 ; AVX2-FCP-NEXT: vpermps %ymm3, %ymm2, %ymm2
705 ; AVX2-FCP-NEXT: vbroadcastss 84(%rdi), %xmm3
706 ; AVX2-FCP-NEXT: vblendps {{.*#+}} xmm2 = xmm2[0,1,2],xmm3[3]
707 ; AVX2-FCP-NEXT: vbroadcastf128 {{.*#+}} ymm3 = [1,0,7,0,1,0,7,0]
708 ; AVX2-FCP-NEXT: # ymm3 = mem[0,1,0,1]
709 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm4 = ymm1[0],ymm0[1],ymm1[2,3,4],ymm0[5],ymm1[6,7]
710 ; AVX2-FCP-NEXT: vpermps %ymm4, %ymm3, %ymm3
711 ; AVX2-FCP-NEXT: vmovaps 80(%rdi), %xmm5
712 ; AVX2-FCP-NEXT: vshufps {{.*#+}} xmm6 = xmm5[2,2,2,2]
713 ; AVX2-FCP-NEXT: vblendps {{.*#+}} xmm3 = xmm3[0,1,2],xmm6[3]
714 ; AVX2-FCP-NEXT: vbroadcastss 8(%rdi), %xmm6
715 ; AVX2-FCP-NEXT: vmovaps 32(%rdi), %xmm7
716 ; AVX2-FCP-NEXT: vblendps {{.*#+}} xmm6 = xmm6[0],xmm7[1],xmm6[2,3]
717 ; AVX2-FCP-NEXT: vmovaps 64(%rdi), %xmm8
718 ; AVX2-FCP-NEXT: vbroadcastss %xmm8, %xmm9
719 ; AVX2-FCP-NEXT: vunpckhps {{.*#+}} xmm5 = xmm9[2],xmm5[2],xmm9[3],xmm5[3]
720 ; AVX2-FCP-NEXT: vblendps {{.*#+}} xmm5 = xmm6[0,1],xmm5[2,3]
721 ; AVX2-FCP-NEXT: vblendps {{.*#+}} xmm6 = xmm7[0,1,2],mem[3]
722 ; AVX2-FCP-NEXT: vmovaps 96(%rdi), %xmm7
723 ; AVX2-FCP-NEXT: vblendps {{.*#+}} xmm9 = xmm7[0],xmm8[1],xmm7[2,3]
724 ; AVX2-FCP-NEXT: vblendps {{.*#+}} xmm6 = xmm9[0,1],xmm6[2,3]
725 ; AVX2-FCP-NEXT: vshufps {{.*#+}} xmm6 = xmm6[3,2,1,0]
726 ; AVX2-FCP-NEXT: vbroadcastss 100(%rdi), %xmm9
727 ; AVX2-FCP-NEXT: vblendps {{.*#+}} xmm9 = xmm8[0,1,2],xmm9[3]
728 ; AVX2-FCP-NEXT: vmovsd {{.*#+}} xmm10 = [4,3,0,0]
729 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm11 = ymm1[0,1,2,3],ymm0[4,5,6,7]
730 ; AVX2-FCP-NEXT: vpermps %ymm11, %ymm10, %ymm10
731 ; AVX2-FCP-NEXT: vblendps {{.*#+}} xmm9 = xmm10[0,1],xmm9[2,3]
732 ; AVX2-FCP-NEXT: vblendps {{.*#+}} xmm8 = xmm7[0,1,2],xmm8[3]
733 ; AVX2-FCP-NEXT: vshufps {{.*#+}} xmm8 = xmm8[0,1,3,2]
734 ; AVX2-FCP-NEXT: vshufps {{.*#+}} ymm4 = ymm4[1,0,2,3,5,4,6,7]
735 ; AVX2-FCP-NEXT: vextractf128 $1, %ymm4, %xmm4
736 ; AVX2-FCP-NEXT: vblendps {{.*#+}} xmm4 = xmm4[0,1],xmm8[2,3]
737 ; AVX2-FCP-NEXT: vbroadcastss 80(%rdi), %ymm8
738 ; AVX2-FCP-NEXT: vblendps {{.*#+}} xmm7 = xmm8[0,1,2],xmm7[3]
739 ; AVX2-FCP-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,3,2,3,6,7,6,7]
740 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4],ymm1[5],ymm0[6,7]
741 ; AVX2-FCP-NEXT: vextractf128 $1, %ymm0, %xmm0
742 ; AVX2-FCP-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm7[2,3]
743 ; AVX2-FCP-NEXT: vmovaps %xmm2, (%rsi)
744 ; AVX2-FCP-NEXT: vmovaps %xmm3, (%rdx)
745 ; AVX2-FCP-NEXT: vmovaps %xmm5, (%rcx)
746 ; AVX2-FCP-NEXT: vmovaps %xmm6, (%r8)
747 ; AVX2-FCP-NEXT: vmovaps %xmm9, (%r9)
748 ; AVX2-FCP-NEXT: vmovaps %xmm4, (%r10)
749 ; AVX2-FCP-NEXT: vmovaps %xmm0, (%rax)
750 ; AVX2-FCP-NEXT: vzeroupper
751 ; AVX2-FCP-NEXT: retq
753 ; AVX512-LABEL: load_i32_stride7_vf4:
755 ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax
756 ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r10
757 ; AVX512-NEXT: vmovdqa64 (%rdi), %zmm0
758 ; AVX512-NEXT: vmovdqa64 64(%rdi), %zmm1
759 ; AVX512-NEXT: vpmovsxbd {{.*#+}} xmm2 = [0,7,14,21]
760 ; AVX512-NEXT: vpermi2d %zmm1, %zmm0, %zmm2
761 ; AVX512-NEXT: vpmovsxbd {{.*#+}} xmm3 = [1,8,15,22]
762 ; AVX512-NEXT: vpermi2d %zmm1, %zmm0, %zmm3
763 ; AVX512-NEXT: vpmovsxbd {{.*#+}} xmm4 = [2,9,16,23]
764 ; AVX512-NEXT: vpermi2d %zmm1, %zmm0, %zmm4
765 ; AVX512-NEXT: vpmovsxbd {{.*#+}} xmm5 = [3,10,17,24]
766 ; AVX512-NEXT: vpermi2d %zmm1, %zmm0, %zmm5
767 ; AVX512-NEXT: vpmovsxbd {{.*#+}} xmm6 = [4,11,18,25]
768 ; AVX512-NEXT: vpermi2d %zmm1, %zmm0, %zmm6
769 ; AVX512-NEXT: vpmovsxbd {{.*#+}} xmm7 = [5,12,19,26]
770 ; AVX512-NEXT: vpermi2d %zmm1, %zmm0, %zmm7
771 ; AVX512-NEXT: vpmovsxbd {{.*#+}} xmm8 = [6,13,20,27]
772 ; AVX512-NEXT: vpermi2d %zmm1, %zmm0, %zmm8
773 ; AVX512-NEXT: vmovdqa %xmm2, (%rsi)
774 ; AVX512-NEXT: vmovdqa %xmm3, (%rdx)
775 ; AVX512-NEXT: vmovdqa %xmm4, (%rcx)
776 ; AVX512-NEXT: vmovdqa %xmm5, (%r8)
777 ; AVX512-NEXT: vmovdqa %xmm6, (%r9)
778 ; AVX512-NEXT: vmovdqa %xmm7, (%r10)
779 ; AVX512-NEXT: vmovdqa %xmm8, (%rax)
780 ; AVX512-NEXT: vzeroupper
783 ; AVX512-FCP-LABEL: load_i32_stride7_vf4:
784 ; AVX512-FCP: # %bb.0:
785 ; AVX512-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
786 ; AVX512-FCP-NEXT: movq {{[0-9]+}}(%rsp), %r10
787 ; AVX512-FCP-NEXT: vmovdqa64 (%rdi), %zmm0
788 ; AVX512-FCP-NEXT: vmovdqa64 64(%rdi), %zmm1
789 ; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} xmm2 = [0,7,14,21]
790 ; AVX512-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm2
791 ; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} xmm3 = [1,8,15,22]
792 ; AVX512-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm3
793 ; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} xmm4 = [2,9,16,23]
794 ; AVX512-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm4
795 ; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} xmm5 = [3,10,17,24]
796 ; AVX512-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm5
797 ; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} xmm6 = [4,11,18,25]
798 ; AVX512-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm6
799 ; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} xmm7 = [5,12,19,26]
800 ; AVX512-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm7
801 ; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} xmm8 = [6,13,20,27]
802 ; AVX512-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm8
803 ; AVX512-FCP-NEXT: vmovdqa %xmm2, (%rsi)
804 ; AVX512-FCP-NEXT: vmovdqa %xmm3, (%rdx)
805 ; AVX512-FCP-NEXT: vmovdqa %xmm4, (%rcx)
806 ; AVX512-FCP-NEXT: vmovdqa %xmm5, (%r8)
807 ; AVX512-FCP-NEXT: vmovdqa %xmm6, (%r9)
808 ; AVX512-FCP-NEXT: vmovdqa %xmm7, (%r10)
809 ; AVX512-FCP-NEXT: vmovdqa %xmm8, (%rax)
810 ; AVX512-FCP-NEXT: vzeroupper
811 ; AVX512-FCP-NEXT: retq
813 ; AVX512DQ-LABEL: load_i32_stride7_vf4:
815 ; AVX512DQ-NEXT: movq {{[0-9]+}}(%rsp), %rax
816 ; AVX512DQ-NEXT: movq {{[0-9]+}}(%rsp), %r10
817 ; AVX512DQ-NEXT: vmovdqa64 (%rdi), %zmm0
818 ; AVX512DQ-NEXT: vmovdqa64 64(%rdi), %zmm1
819 ; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} xmm2 = [0,7,14,21]
820 ; AVX512DQ-NEXT: vpermi2d %zmm1, %zmm0, %zmm2
821 ; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} xmm3 = [1,8,15,22]
822 ; AVX512DQ-NEXT: vpermi2d %zmm1, %zmm0, %zmm3
823 ; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} xmm4 = [2,9,16,23]
824 ; AVX512DQ-NEXT: vpermi2d %zmm1, %zmm0, %zmm4
825 ; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} xmm5 = [3,10,17,24]
826 ; AVX512DQ-NEXT: vpermi2d %zmm1, %zmm0, %zmm5
827 ; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} xmm6 = [4,11,18,25]
828 ; AVX512DQ-NEXT: vpermi2d %zmm1, %zmm0, %zmm6
829 ; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} xmm7 = [5,12,19,26]
830 ; AVX512DQ-NEXT: vpermi2d %zmm1, %zmm0, %zmm7
831 ; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} xmm8 = [6,13,20,27]
832 ; AVX512DQ-NEXT: vpermi2d %zmm1, %zmm0, %zmm8
833 ; AVX512DQ-NEXT: vmovdqa %xmm2, (%rsi)
834 ; AVX512DQ-NEXT: vmovdqa %xmm3, (%rdx)
835 ; AVX512DQ-NEXT: vmovdqa %xmm4, (%rcx)
836 ; AVX512DQ-NEXT: vmovdqa %xmm5, (%r8)
837 ; AVX512DQ-NEXT: vmovdqa %xmm6, (%r9)
838 ; AVX512DQ-NEXT: vmovdqa %xmm7, (%r10)
839 ; AVX512DQ-NEXT: vmovdqa %xmm8, (%rax)
840 ; AVX512DQ-NEXT: vzeroupper
841 ; AVX512DQ-NEXT: retq
843 ; AVX512DQ-FCP-LABEL: load_i32_stride7_vf4:
844 ; AVX512DQ-FCP: # %bb.0:
845 ; AVX512DQ-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
846 ; AVX512DQ-FCP-NEXT: movq {{[0-9]+}}(%rsp), %r10
847 ; AVX512DQ-FCP-NEXT: vmovdqa64 (%rdi), %zmm0
848 ; AVX512DQ-FCP-NEXT: vmovdqa64 64(%rdi), %zmm1
849 ; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} xmm2 = [0,7,14,21]
850 ; AVX512DQ-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm2
851 ; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} xmm3 = [1,8,15,22]
852 ; AVX512DQ-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm3
853 ; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} xmm4 = [2,9,16,23]
854 ; AVX512DQ-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm4
855 ; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} xmm5 = [3,10,17,24]
856 ; AVX512DQ-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm5
857 ; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} xmm6 = [4,11,18,25]
858 ; AVX512DQ-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm6
859 ; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} xmm7 = [5,12,19,26]
860 ; AVX512DQ-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm7
861 ; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} xmm8 = [6,13,20,27]
862 ; AVX512DQ-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm8
863 ; AVX512DQ-FCP-NEXT: vmovdqa %xmm2, (%rsi)
864 ; AVX512DQ-FCP-NEXT: vmovdqa %xmm3, (%rdx)
865 ; AVX512DQ-FCP-NEXT: vmovdqa %xmm4, (%rcx)
866 ; AVX512DQ-FCP-NEXT: vmovdqa %xmm5, (%r8)
867 ; AVX512DQ-FCP-NEXT: vmovdqa %xmm6, (%r9)
868 ; AVX512DQ-FCP-NEXT: vmovdqa %xmm7, (%r10)
869 ; AVX512DQ-FCP-NEXT: vmovdqa %xmm8, (%rax)
870 ; AVX512DQ-FCP-NEXT: vzeroupper
871 ; AVX512DQ-FCP-NEXT: retq
873 ; AVX512BW-LABEL: load_i32_stride7_vf4:
875 ; AVX512BW-NEXT: movq {{[0-9]+}}(%rsp), %rax
876 ; AVX512BW-NEXT: movq {{[0-9]+}}(%rsp), %r10
877 ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0
878 ; AVX512BW-NEXT: vmovdqa64 64(%rdi), %zmm1
879 ; AVX512BW-NEXT: vpmovsxbd {{.*#+}} xmm2 = [0,7,14,21]
880 ; AVX512BW-NEXT: vpermi2d %zmm1, %zmm0, %zmm2
881 ; AVX512BW-NEXT: vpmovsxbd {{.*#+}} xmm3 = [1,8,15,22]
882 ; AVX512BW-NEXT: vpermi2d %zmm1, %zmm0, %zmm3
883 ; AVX512BW-NEXT: vpmovsxbd {{.*#+}} xmm4 = [2,9,16,23]
884 ; AVX512BW-NEXT: vpermi2d %zmm1, %zmm0, %zmm4
885 ; AVX512BW-NEXT: vpmovsxbd {{.*#+}} xmm5 = [3,10,17,24]
886 ; AVX512BW-NEXT: vpermi2d %zmm1, %zmm0, %zmm5
887 ; AVX512BW-NEXT: vpmovsxbd {{.*#+}} xmm6 = [4,11,18,25]
888 ; AVX512BW-NEXT: vpermi2d %zmm1, %zmm0, %zmm6
889 ; AVX512BW-NEXT: vpmovsxbd {{.*#+}} xmm7 = [5,12,19,26]
890 ; AVX512BW-NEXT: vpermi2d %zmm1, %zmm0, %zmm7
891 ; AVX512BW-NEXT: vpmovsxbd {{.*#+}} xmm8 = [6,13,20,27]
892 ; AVX512BW-NEXT: vpermi2d %zmm1, %zmm0, %zmm8
893 ; AVX512BW-NEXT: vmovdqa %xmm2, (%rsi)
894 ; AVX512BW-NEXT: vmovdqa %xmm3, (%rdx)
895 ; AVX512BW-NEXT: vmovdqa %xmm4, (%rcx)
896 ; AVX512BW-NEXT: vmovdqa %xmm5, (%r8)
897 ; AVX512BW-NEXT: vmovdqa %xmm6, (%r9)
898 ; AVX512BW-NEXT: vmovdqa %xmm7, (%r10)
899 ; AVX512BW-NEXT: vmovdqa %xmm8, (%rax)
900 ; AVX512BW-NEXT: vzeroupper
901 ; AVX512BW-NEXT: retq
903 ; AVX512BW-FCP-LABEL: load_i32_stride7_vf4:
904 ; AVX512BW-FCP: # %bb.0:
905 ; AVX512BW-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
906 ; AVX512BW-FCP-NEXT: movq {{[0-9]+}}(%rsp), %r10
907 ; AVX512BW-FCP-NEXT: vmovdqa64 (%rdi), %zmm0
908 ; AVX512BW-FCP-NEXT: vmovdqa64 64(%rdi), %zmm1
909 ; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm2 = [0,7,14,21]
910 ; AVX512BW-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm2
911 ; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm3 = [1,8,15,22]
912 ; AVX512BW-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm3
913 ; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm4 = [2,9,16,23]
914 ; AVX512BW-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm4
915 ; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm5 = [3,10,17,24]
916 ; AVX512BW-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm5
917 ; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm6 = [4,11,18,25]
918 ; AVX512BW-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm6
919 ; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm7 = [5,12,19,26]
920 ; AVX512BW-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm7
921 ; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm8 = [6,13,20,27]
922 ; AVX512BW-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm8
923 ; AVX512BW-FCP-NEXT: vmovdqa %xmm2, (%rsi)
924 ; AVX512BW-FCP-NEXT: vmovdqa %xmm3, (%rdx)
925 ; AVX512BW-FCP-NEXT: vmovdqa %xmm4, (%rcx)
926 ; AVX512BW-FCP-NEXT: vmovdqa %xmm5, (%r8)
927 ; AVX512BW-FCP-NEXT: vmovdqa %xmm6, (%r9)
928 ; AVX512BW-FCP-NEXT: vmovdqa %xmm7, (%r10)
929 ; AVX512BW-FCP-NEXT: vmovdqa %xmm8, (%rax)
930 ; AVX512BW-FCP-NEXT: vzeroupper
931 ; AVX512BW-FCP-NEXT: retq
933 ; AVX512DQ-BW-LABEL: load_i32_stride7_vf4:
934 ; AVX512DQ-BW: # %bb.0:
935 ; AVX512DQ-BW-NEXT: movq {{[0-9]+}}(%rsp), %rax
936 ; AVX512DQ-BW-NEXT: movq {{[0-9]+}}(%rsp), %r10
937 ; AVX512DQ-BW-NEXT: vmovdqa64 (%rdi), %zmm0
938 ; AVX512DQ-BW-NEXT: vmovdqa64 64(%rdi), %zmm1
939 ; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} xmm2 = [0,7,14,21]
940 ; AVX512DQ-BW-NEXT: vpermi2d %zmm1, %zmm0, %zmm2
941 ; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} xmm3 = [1,8,15,22]
942 ; AVX512DQ-BW-NEXT: vpermi2d %zmm1, %zmm0, %zmm3
943 ; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} xmm4 = [2,9,16,23]
944 ; AVX512DQ-BW-NEXT: vpermi2d %zmm1, %zmm0, %zmm4
945 ; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} xmm5 = [3,10,17,24]
946 ; AVX512DQ-BW-NEXT: vpermi2d %zmm1, %zmm0, %zmm5
947 ; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} xmm6 = [4,11,18,25]
948 ; AVX512DQ-BW-NEXT: vpermi2d %zmm1, %zmm0, %zmm6
949 ; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} xmm7 = [5,12,19,26]
950 ; AVX512DQ-BW-NEXT: vpermi2d %zmm1, %zmm0, %zmm7
951 ; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} xmm8 = [6,13,20,27]
952 ; AVX512DQ-BW-NEXT: vpermi2d %zmm1, %zmm0, %zmm8
953 ; AVX512DQ-BW-NEXT: vmovdqa %xmm2, (%rsi)
954 ; AVX512DQ-BW-NEXT: vmovdqa %xmm3, (%rdx)
955 ; AVX512DQ-BW-NEXT: vmovdqa %xmm4, (%rcx)
956 ; AVX512DQ-BW-NEXT: vmovdqa %xmm5, (%r8)
957 ; AVX512DQ-BW-NEXT: vmovdqa %xmm6, (%r9)
958 ; AVX512DQ-BW-NEXT: vmovdqa %xmm7, (%r10)
959 ; AVX512DQ-BW-NEXT: vmovdqa %xmm8, (%rax)
960 ; AVX512DQ-BW-NEXT: vzeroupper
961 ; AVX512DQ-BW-NEXT: retq
963 ; AVX512DQ-BW-FCP-LABEL: load_i32_stride7_vf4:
964 ; AVX512DQ-BW-FCP: # %bb.0:
965 ; AVX512DQ-BW-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
966 ; AVX512DQ-BW-FCP-NEXT: movq {{[0-9]+}}(%rsp), %r10
967 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rdi), %zmm0
968 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 64(%rdi), %zmm1
969 ; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm2 = [0,7,14,21]
970 ; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm2
971 ; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm3 = [1,8,15,22]
972 ; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm3
973 ; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm4 = [2,9,16,23]
974 ; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm4
975 ; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm5 = [3,10,17,24]
976 ; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm5
977 ; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm6 = [4,11,18,25]
978 ; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm6
979 ; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm7 = [5,12,19,26]
980 ; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm7
981 ; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm8 = [6,13,20,27]
982 ; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm8
983 ; AVX512DQ-BW-FCP-NEXT: vmovdqa %xmm2, (%rsi)
984 ; AVX512DQ-BW-FCP-NEXT: vmovdqa %xmm3, (%rdx)
985 ; AVX512DQ-BW-FCP-NEXT: vmovdqa %xmm4, (%rcx)
986 ; AVX512DQ-BW-FCP-NEXT: vmovdqa %xmm5, (%r8)
987 ; AVX512DQ-BW-FCP-NEXT: vmovdqa %xmm6, (%r9)
988 ; AVX512DQ-BW-FCP-NEXT: vmovdqa %xmm7, (%r10)
989 ; AVX512DQ-BW-FCP-NEXT: vmovdqa %xmm8, (%rax)
990 ; AVX512DQ-BW-FCP-NEXT: vzeroupper
991 ; AVX512DQ-BW-FCP-NEXT: retq
992 %wide.vec = load <28 x i32>, ptr %in.vec, align 64
993 %strided.vec0 = shufflevector <28 x i32> %wide.vec, <28 x i32> poison, <4 x i32> <i32 0, i32 7, i32 14, i32 21>
994 %strided.vec1 = shufflevector <28 x i32> %wide.vec, <28 x i32> poison, <4 x i32> <i32 1, i32 8, i32 15, i32 22>
995 %strided.vec2 = shufflevector <28 x i32> %wide.vec, <28 x i32> poison, <4 x i32> <i32 2, i32 9, i32 16, i32 23>
996 %strided.vec3 = shufflevector <28 x i32> %wide.vec, <28 x i32> poison, <4 x i32> <i32 3, i32 10, i32 17, i32 24>
997 %strided.vec4 = shufflevector <28 x i32> %wide.vec, <28 x i32> poison, <4 x i32> <i32 4, i32 11, i32 18, i32 25>
998 %strided.vec5 = shufflevector <28 x i32> %wide.vec, <28 x i32> poison, <4 x i32> <i32 5, i32 12, i32 19, i32 26>
999 %strided.vec6 = shufflevector <28 x i32> %wide.vec, <28 x i32> poison, <4 x i32> <i32 6, i32 13, i32 20, i32 27>
1000 store <4 x i32> %strided.vec0, ptr %out.vec0, align 64
1001 store <4 x i32> %strided.vec1, ptr %out.vec1, align 64
1002 store <4 x i32> %strided.vec2, ptr %out.vec2, align 64
1003 store <4 x i32> %strided.vec3, ptr %out.vec3, align 64
1004 store <4 x i32> %strided.vec4, ptr %out.vec4, align 64
1005 store <4 x i32> %strided.vec5, ptr %out.vec5, align 64
1006 store <4 x i32> %strided.vec6, ptr %out.vec6, align 64
1010 define void @load_i32_stride7_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2, ptr %out.vec3, ptr %out.vec4, ptr %out.vec5, ptr %out.vec6) nounwind {
1011 ; SSE-LABEL: load_i32_stride7_vf8:
1013 ; SSE-NEXT: subq $24, %rsp
1014 ; SSE-NEXT: movdqa 144(%rdi), %xmm9
1015 ; SSE-NEXT: movdqa 80(%rdi), %xmm5
1016 ; SSE-NEXT: movdqa (%rdi), %xmm12
1017 ; SSE-NEXT: movdqa 16(%rdi), %xmm11
1018 ; SSE-NEXT: movdqa 48(%rdi), %xmm6
1019 ; SSE-NEXT: movdqa 192(%rdi), %xmm8
1020 ; SSE-NEXT: movdqa 160(%rdi), %xmm10
1021 ; SSE-NEXT: movdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1022 ; SSE-NEXT: movdqa 112(%rdi), %xmm15
1023 ; SSE-NEXT: movdqa 128(%rdi), %xmm0
1024 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1025 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,3,3,3]
1026 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm15[1,1,1,1]
1027 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm15[2,3,2,3]
1028 ; SSE-NEXT: movdqa %xmm15, %xmm3
1029 ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1]
1030 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm10[2,2,3,3]
1031 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm8[0],xmm0[1],xmm8[1]
1032 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm3[0],xmm0[1]
1033 ; SSE-NEXT: movapd %xmm0, (%rsp) # 16-byte Spill
1034 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm11[3,3,3,3]
1035 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm12[1,1,1,1]
1036 ; SSE-NEXT: movdqa %xmm12, %xmm4
1037 ; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1]
1038 ; SSE-NEXT: movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1039 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm6[2,2,3,3]
1040 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1]
1041 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm4[0],xmm0[1]
1042 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1043 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm9[0],xmm1[1],xmm9[1]
1044 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm8[2,2,2,2]
1045 ; SSE-NEXT: movdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1046 ; SSE-NEXT: movdqa %xmm10, %xmm4
1047 ; SSE-NEXT: punpckhdq {{.*#+}} xmm4 = xmm4[2],xmm0[2],xmm4[3],xmm0[3]
1048 ; SSE-NEXT: movsd {{.*#+}} xmm4 = xmm1[0],xmm4[1]
1049 ; SSE-NEXT: movapd %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1050 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm5[2,2,2,2]
1051 ; SSE-NEXT: movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1052 ; SSE-NEXT: movdqa %xmm6, %xmm1
1053 ; SSE-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm0[2],xmm1[3],xmm0[3]
1054 ; SSE-NEXT: movdqa 32(%rdi), %xmm4
1055 ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
1056 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm3[0],xmm1[1]
1057 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1058 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm9[1,1,1,1]
1059 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
1060 ; SSE-NEXT: movdqa 176(%rdi), %xmm10
1061 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm8[2,3,2,3]
1062 ; SSE-NEXT: pshufd {{.*#+}} xmm13 = xmm10[0,0,1,1]
1063 ; SSE-NEXT: punpckldq {{.*#+}} xmm13 = xmm13[0],xmm1[0],xmm13[1],xmm1[1]
1064 ; SSE-NEXT: movsd {{.*#+}} xmm13 = xmm2[0],xmm13[1]
1065 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm12[2,3,2,3]
1066 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm4[1,1,1,1]
1067 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
1068 ; SSE-NEXT: movdqa 64(%rdi), %xmm14
1069 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm5[2,3,2,3]
1070 ; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm14[0,0,1,1]
1071 ; SSE-NEXT: punpckldq {{.*#+}} xmm8 = xmm8[0],xmm2[0],xmm8[1],xmm2[1]
1072 ; SSE-NEXT: movsd {{.*#+}} xmm8 = xmm1[0],xmm8[1]
1073 ; SSE-NEXT: movdqa 208(%rdi), %xmm3
1074 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm3[0,0,1,1]
1075 ; SSE-NEXT: movdqa %xmm10, %xmm7
1076 ; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm1[0],xmm7[1],xmm1[1]
1077 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm15[2,2,3,3]
1078 ; SSE-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm9[2],xmm1[3],xmm9[3]
1079 ; SSE-NEXT: movsd {{.*#+}} xmm7 = xmm1[0],xmm7[1]
1080 ; SSE-NEXT: movdqa 96(%rdi), %xmm5
1081 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm5[0,0,1,1]
1082 ; SSE-NEXT: movdqa %xmm14, %xmm15
1083 ; SSE-NEXT: punpckldq {{.*#+}} xmm15 = xmm15[0],xmm0[0],xmm15[1],xmm0[1]
1084 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm12[2,2,3,3]
1085 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm4[2],xmm0[3],xmm4[3]
1086 ; SSE-NEXT: movsd {{.*#+}} xmm15 = xmm0[0],xmm15[1]
1087 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm9[3,3,3,3]
1088 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
1089 ; SSE-NEXT: movdqa %xmm6, %xmm12
1090 ; SSE-NEXT: punpckldq {{.*#+}} xmm12 = xmm12[0],xmm0[0],xmm12[1],xmm0[1]
1091 ; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm10[2,2,3,3]
1092 ; SSE-NEXT: punpckldq {{.*#+}} xmm9 = xmm9[0],xmm3[0],xmm9[1],xmm3[1]
1093 ; SSE-NEXT: movsd {{.*#+}} xmm9 = xmm12[0],xmm9[1]
1094 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm4[3,3,3,3]
1095 ; SSE-NEXT: movdqa %xmm11, %xmm12
1096 ; SSE-NEXT: movdqa %xmm11, %xmm4
1097 ; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1]
1098 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm14[2,2,3,3]
1099 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1]
1100 ; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm4[0],xmm2[1]
1101 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm3[2,2,2,2]
1102 ; SSE-NEXT: punpckhdq {{.*#+}} xmm10 = xmm10[2],xmm4[2],xmm10[3],xmm4[3]
1103 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm6[1,1,1,1]
1104 ; SSE-NEXT: movdqa %xmm6, %xmm11
1105 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
1106 ; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
1107 ; SSE-NEXT: movsd {{.*#+}} xmm10 = xmm4[0],xmm10[1]
1108 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm5[2,2,2,2]
1109 ; SSE-NEXT: punpckhdq {{.*#+}} xmm14 = xmm14[2],xmm4[2],xmm14[3],xmm4[3]
1110 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm12[1,1,1,1]
1111 ; SSE-NEXT: movdqa %xmm12, %xmm6
1112 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1113 ; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1]
1114 ; SSE-NEXT: movsd {{.*#+}} xmm14 = xmm4[0],xmm14[1]
1115 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm1[1,1,1,1]
1116 ; SSE-NEXT: pshufd {{.*#+}} xmm12 = xmm11[2,3,2,3]
1117 ; SSE-NEXT: punpckldq {{.*#+}} xmm12 = xmm12[0],xmm4[0],xmm12[1],xmm4[1]
1118 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,3,2,3]
1119 ; SSE-NEXT: pshufd $80, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
1120 ; SSE-NEXT: # xmm4 = mem[0,0,1,1]
1121 ; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
1122 ; SSE-NEXT: movsd {{.*#+}} xmm4 = xmm12[0],xmm4[1]
1123 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,1,1]
1124 ; SSE-NEXT: pshufd {{.*#+}} xmm12 = xmm6[2,3,2,3]
1125 ; SSE-NEXT: punpckldq {{.*#+}} xmm12 = xmm12[0],xmm3[0],xmm12[1],xmm3[1]
1126 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm5[2,3,2,3]
1127 ; SSE-NEXT: pshufd $80, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
1128 ; SSE-NEXT: # xmm3 = mem[0,0,1,1]
1129 ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
1130 ; SSE-NEXT: movsd {{.*#+}} xmm3 = xmm12[0],xmm3[1]
1131 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
1132 ; SSE-NEXT: movaps %xmm1, (%rsi)
1133 ; SSE-NEXT: movaps (%rsp), %xmm1 # 16-byte Reload
1134 ; SSE-NEXT: movaps %xmm1, 16(%rsi)
1135 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1136 ; SSE-NEXT: movaps %xmm0, (%rdx)
1137 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1138 ; SSE-NEXT: movaps %xmm0, 16(%rdx)
1139 ; SSE-NEXT: movapd %xmm8, (%rcx)
1140 ; SSE-NEXT: movapd %xmm13, 16(%rcx)
1141 ; SSE-NEXT: movapd %xmm15, (%r8)
1142 ; SSE-NEXT: movapd %xmm7, 16(%r8)
1143 ; SSE-NEXT: movapd %xmm2, (%r9)
1144 ; SSE-NEXT: movapd %xmm9, 16(%r9)
1145 ; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax
1146 ; SSE-NEXT: movapd %xmm14, (%rax)
1147 ; SSE-NEXT: movapd %xmm10, 16(%rax)
1148 ; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax
1149 ; SSE-NEXT: movapd %xmm3, (%rax)
1150 ; SSE-NEXT: movapd %xmm4, 16(%rax)
1151 ; SSE-NEXT: addq $24, %rsp
1154 ; AVX-LABEL: load_i32_stride7_vf8:
1156 ; AVX-NEXT: vmovaps 160(%rdi), %ymm4
1157 ; AVX-NEXT: vmovaps 128(%rdi), %ymm8
1158 ; AVX-NEXT: vmovaps 64(%rdi), %ymm11
1159 ; AVX-NEXT: vmovaps 32(%rdi), %ymm0
1160 ; AVX-NEXT: vmovaps (%rdi), %ymm1
1161 ; AVX-NEXT: vmovaps 96(%rdi), %ymm12
1162 ; AVX-NEXT: vmovaps 80(%rdi), %xmm2
1163 ; AVX-NEXT: vunpcklpd {{.*#+}} ymm3 = ymm12[0],ymm2[0],ymm12[2],ymm2[2]
1164 ; AVX-NEXT: vblendps {{.*#+}} ymm5 = ymm1[0,1,2,3,4,5],ymm0[6],ymm1[7]
1165 ; AVX-NEXT: vextractf128 $1, %ymm5, %xmm5
1166 ; AVX-NEXT: vmovaps (%rdi), %xmm13
1167 ; AVX-NEXT: vmovaps 32(%rdi), %xmm9
1168 ; AVX-NEXT: vblendps {{.*#+}} xmm5 = xmm13[0,1],xmm5[2,3]
1169 ; AVX-NEXT: vshufps {{.*#+}} xmm5 = xmm5[0,3,2,3]
1170 ; AVX-NEXT: vblendps {{.*#+}} ymm3 = ymm5[0,1,2],ymm3[3,4,5,6,7]
1171 ; AVX-NEXT: vmovaps 160(%rdi), %xmm5
1172 ; AVX-NEXT: vmovaps 128(%rdi), %xmm6
1173 ; AVX-NEXT: vunpckhpd {{.*#+}} xmm7 = xmm6[1],xmm5[1]
1174 ; AVX-NEXT: vmovaps 192(%rdi), %xmm10
1175 ; AVX-NEXT: vinsertps {{.*#+}} xmm7 = zero,xmm7[1,2],xmm10[1]
1176 ; AVX-NEXT: vinsertf128 $1, %xmm7, %ymm0, %ymm7
1177 ; AVX-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3,4],ymm7[5,6,7]
1178 ; AVX-NEXT: vshufps {{.*#+}} ymm7 = ymm12[1,1],ymm11[2,2],ymm12[5,5],ymm11[6,6]
1179 ; AVX-NEXT: vperm2f128 {{.*#+}} ymm7 = ymm7[2,3,2,3]
1180 ; AVX-NEXT: vblendps {{.*#+}} xmm14 = xmm9[0],xmm13[1],xmm9[2,3]
1181 ; AVX-NEXT: vshufps {{.*#+}} xmm14 = xmm14[1,0],mem[3,3]
1182 ; AVX-NEXT: vblendps {{.*#+}} ymm7 = ymm14[0,1,2],ymm7[3,4,5,6,7]
1183 ; AVX-NEXT: vperm2f128 {{.*#+}} ymm14 = ymm8[2,3],ymm4[0,1]
1184 ; AVX-NEXT: vshufps {{.*#+}} ymm14 = ymm8[0,0],ymm14[3,3],ymm8[4,4],ymm14[7,7]
1185 ; AVX-NEXT: vextractf128 $1, %ymm14, %xmm14
1186 ; AVX-NEXT: vinsertps {{.*#+}} xmm14 = zero,xmm14[1,2],xmm10[2]
1187 ; AVX-NEXT: vinsertf128 $1, %xmm14, %ymm0, %ymm14
1188 ; AVX-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3,4],ymm14[5,6,7]
1189 ; AVX-NEXT: vshufps {{.*#+}} xmm14 = xmm13[2,3,2,3]
1190 ; AVX-NEXT: vblendps {{.*#+}} xmm14 = xmm14[0],xmm9[1],xmm14[2,3]
1191 ; AVX-NEXT: vshufps {{.*#+}} ymm15 = ymm2[3,1],ymm11[0,3],ymm2[7,5],ymm11[4,7]
1192 ; AVX-NEXT: vshufps {{.*#+}} ymm15 = ymm12[2,1],ymm15[2,0],ymm12[6,5],ymm15[6,4]
1193 ; AVX-NEXT: vblendps {{.*#+}} ymm15 = ymm14[0,1],ymm15[2,3,4,5,6,7]
1194 ; AVX-NEXT: vunpcklpd {{.*#+}} ymm14 = ymm8[0],ymm4[0],ymm8[2],ymm4[2]
1195 ; AVX-NEXT: vextractf128 $1, %ymm14, %xmm14
1196 ; AVX-NEXT: vblendps {{.*#+}} xmm10 = xmm14[0,1,2],xmm10[3]
1197 ; AVX-NEXT: vmovaps 192(%rdi), %ymm14
1198 ; AVX-NEXT: vinsertf128 $1, %xmm10, %ymm0, %ymm10
1199 ; AVX-NEXT: vblendps {{.*#+}} ymm10 = ymm15[0,1,2,3,4],ymm10[5,6,7]
1200 ; AVX-NEXT: vmovaps 64(%rdi), %xmm15
1201 ; AVX-NEXT: vshufps {{.*#+}} ymm11 = ymm11[1,0],ymm12[0,0],ymm11[5,4],ymm12[4,4]
1202 ; AVX-NEXT: vshufps {{.*#+}} ymm11 = ymm12[3,1],ymm11[0,2],ymm12[7,5],ymm11[4,6]
1203 ; AVX-NEXT: vblendps {{.*#+}} xmm12 = xmm9[0,1,2],xmm13[3]
1204 ; AVX-NEXT: vshufps {{.*#+}} xmm12 = xmm12[3,2,2,3]
1205 ; AVX-NEXT: vblendps {{.*#+}} ymm11 = ymm12[0,1],ymm11[2,3,4,5,6,7]
1206 ; AVX-NEXT: vshufps {{.*#+}} ymm12 = ymm14[0,1],ymm4[1,3],ymm14[4,5],ymm4[5,7]
1207 ; AVX-NEXT: vshufps {{.*#+}} ymm12 = ymm8[0,2],ymm12[2,0],ymm8[4,6],ymm12[6,4]
1208 ; AVX-NEXT: vblendps {{.*#+}} ymm11 = ymm11[0,1,2,3,4],ymm12[5,6,7]
1209 ; AVX-NEXT: vshufps {{.*#+}} ymm12 = ymm14[1,0],ymm4[2,0],ymm14[5,4],ymm4[6,4]
1210 ; AVX-NEXT: vperm2f128 {{.*#+}} ymm13 = ymm8[2,3,0,1]
1211 ; AVX-NEXT: vshufps {{.*#+}} ymm8 = ymm8[3,0],ymm13[0,0],ymm8[7,4],ymm13[4,4]
1212 ; AVX-NEXT: vshufps {{.*#+}} ymm8 = ymm8[2,0],ymm12[2,0],ymm8[6,4],ymm12[6,4]
1213 ; AVX-NEXT: vmovaps 96(%rdi), %xmm12
1214 ; AVX-NEXT: vshufps {{.*#+}} xmm13 = xmm12[0,1,0,1]
1215 ; AVX-NEXT: vblendps {{.*#+}} xmm13 = xmm15[0,1,2],xmm13[3]
1216 ; AVX-NEXT: vshufps {{.*#+}} xmm9 = xmm9[2,3,2,3]
1217 ; AVX-NEXT: vblendps {{.*#+}} xmm9 = mem[0],xmm9[1],mem[2,3]
1218 ; AVX-NEXT: vblendps {{.*#+}} xmm9 = xmm9[0,1],xmm13[2,3]
1219 ; AVX-NEXT: vblendps {{.*#+}} ymm8 = ymm9[0,1,2,3],ymm8[4,5,6,7]
1220 ; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm14[2,1],ymm4[3,3],ymm14[6,5],ymm4[7,7]
1221 ; AVX-NEXT: vblendps {{.*#+}} xmm9 = xmm5[0],xmm6[1],xmm5[2,3]
1222 ; AVX-NEXT: vinsertf128 $1, %xmm9, %ymm0, %ymm9
1223 ; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm9[1,0],ymm4[2,0],ymm9[5,4],ymm4[6,4]
1224 ; AVX-NEXT: vblendps {{.*#+}} xmm9 = xmm12[0,1,2],xmm15[3]
1225 ; AVX-NEXT: vshufps {{.*#+}} ymm13 = ymm0[0,0],ymm1[1,0],ymm0[4,4],ymm1[5,4]
1226 ; AVX-NEXT: vshufps {{.*#+}} ymm13 = ymm13[2,0,2,3,6,4,6,7]
1227 ; AVX-NEXT: vextractf128 $1, %ymm13, %xmm13
1228 ; AVX-NEXT: vshufps {{.*#+}} xmm9 = xmm13[0,1],xmm9[3,2]
1229 ; AVX-NEXT: vblendps {{.*#+}} ymm4 = ymm9[0,1,2,3],ymm4[4,5,6,7]
1230 ; AVX-NEXT: vperm2f128 {{.*#+}} ymm9 = ymm14[2,3,0,1]
1231 ; AVX-NEXT: vshufps {{.*#+}} ymm9 = ymm14[3,0],ymm9[0,0],ymm14[7,4],ymm9[4,4]
1232 ; AVX-NEXT: vshufps {{.*#+}} xmm6 = xmm6[2,3,2,3]
1233 ; AVX-NEXT: vblendps {{.*#+}} xmm5 = xmm6[0],xmm5[1],xmm6[2,3]
1234 ; AVX-NEXT: vinsertf128 $1, %xmm5, %ymm0, %ymm5
1235 ; AVX-NEXT: vshufps {{.*#+}} ymm5 = ymm5[0,1],ymm9[2,0],ymm5[4,5],ymm9[6,4]
1236 ; AVX-NEXT: vshufps {{.*#+}} xmm2 = xmm2[0,1,0,1]
1237 ; AVX-NEXT: vblendps {{.*#+}} xmm2 = xmm2[0,1,2],xmm12[3]
1238 ; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm0[1,0],ymm1[2,0],ymm0[5,4],ymm1[6,4]
1239 ; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0,2,3,6,4,6,7]
1240 ; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0
1241 ; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3]
1242 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm5[4,5,6,7]
1243 ; AVX-NEXT: vmovaps %ymm3, (%rsi)
1244 ; AVX-NEXT: vmovaps %ymm7, (%rdx)
1245 ; AVX-NEXT: vmovaps %ymm10, (%rcx)
1246 ; AVX-NEXT: vmovaps %ymm11, (%r8)
1247 ; AVX-NEXT: vmovaps %ymm8, (%r9)
1248 ; AVX-NEXT: movq {{[0-9]+}}(%rsp), %rax
1249 ; AVX-NEXT: vmovaps %ymm4, (%rax)
1250 ; AVX-NEXT: movq {{[0-9]+}}(%rsp), %rax
1251 ; AVX-NEXT: vmovaps %ymm0, (%rax)
1252 ; AVX-NEXT: vzeroupper
1255 ; AVX2-LABEL: load_i32_stride7_vf8:
1257 ; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax
1258 ; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %r10
1259 ; AVX2-NEXT: vmovdqa 64(%rdi), %ymm9
1260 ; AVX2-NEXT: vmovdqa 160(%rdi), %ymm4
1261 ; AVX2-NEXT: vmovdqa 128(%rdi), %ymm5
1262 ; AVX2-NEXT: vmovdqa (%rdi), %ymm0
1263 ; AVX2-NEXT: vmovdqa 32(%rdi), %ymm1
1264 ; AVX2-NEXT: vmovdqa 96(%rdi), %ymm10
1265 ; AVX2-NEXT: vpbroadcastq 80(%rdi), %ymm2
1266 ; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm10[4,5,6,7]
1267 ; AVX2-NEXT: vpmovsxbd {{.*#+}} xmm3 = [0,7,6,0]
1268 ; AVX2-NEXT: vpblendd {{.*#+}} ymm6 = ymm0[0,1,2,3,4,5],ymm1[6],ymm0[7]
1269 ; AVX2-NEXT: vpermd %ymm6, %ymm3, %ymm3
1270 ; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1,2],ymm2[3,4,5,6,7]
1271 ; AVX2-NEXT: vmovdqa 128(%rdi), %xmm6
1272 ; AVX2-NEXT: vmovdqa 160(%rdi), %xmm3
1273 ; AVX2-NEXT: vpunpckhqdq {{.*#+}} xmm6 = xmm6[1],xmm3[1]
1274 ; AVX2-NEXT: vinserti128 $1, %xmm6, %ymm0, %ymm6
1275 ; AVX2-NEXT: vpbroadcastd 196(%rdi), %ymm7
1276 ; AVX2-NEXT: vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3,4,5,6],ymm7[7]
1277 ; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm6[5,6,7]
1278 ; AVX2-NEXT: vpshufd {{.*#+}} xmm6 = mem[2,2,2,2]
1279 ; AVX2-NEXT: vinserti128 $1, %xmm6, %ymm0, %ymm6
1280 ; AVX2-NEXT: vpalignr {{.*#+}} ymm7 = ymm4[12,13,14,15],ymm5[0,1,2,3,4,5,6,7,8,9,10,11],ymm4[28,29,30,31],ymm5[16,17,18,19,20,21,22,23,24,25,26,27]
1281 ; AVX2-NEXT: vpermq {{.*#+}} ymm7 = ymm7[0,1,2,0]
1282 ; AVX2-NEXT: vpblendd {{.*#+}} ymm6 = ymm7[0,1,2,3,4,5,6],ymm6[7]
1283 ; AVX2-NEXT: vpblendd {{.*#+}} ymm7 = ymm10[0,1],ymm9[2,3],ymm10[4,5],ymm9[6,7]
1284 ; AVX2-NEXT: vpblendd {{.*#+}} ymm8 = ymm1[0],ymm0[1],ymm1[2,3,4],ymm0[5],ymm1[6,7]
1285 ; AVX2-NEXT: vpblendd {{.*#+}} ymm7 = ymm8[0,1,2,3,4],ymm7[5,6],ymm8[7]
1286 ; AVX2-NEXT: vpmovsxbd {{.*#+}} ymm11 = [1,0,7,6,5,0,0,0]
1287 ; AVX2-NEXT: vpermd %ymm7, %ymm11, %ymm7
1288 ; AVX2-NEXT: vpblendd {{.*#+}} ymm6 = ymm7[0,1,2,3,4],ymm6[5,6,7]
1289 ; AVX2-NEXT: vmovdqa 80(%rdi), %xmm7
1290 ; AVX2-NEXT: vpalignr {{.*#+}} ymm11 = ymm10[8,9,10,11,12,13,14,15],ymm9[0,1,2,3,4,5,6,7],ymm10[24,25,26,27,28,29,30,31],ymm9[16,17,18,19,20,21,22,23]
1291 ; AVX2-NEXT: vpblendd {{.*#+}} ymm7 = ymm11[0,1,2],ymm7[3],ymm11[4,5,6,7]
1292 ; AVX2-NEXT: vpbroadcastd 8(%rdi), %xmm11
1293 ; AVX2-NEXT: vmovdqa 32(%rdi), %xmm12
1294 ; AVX2-NEXT: vpblendd {{.*#+}} xmm11 = xmm11[0],xmm12[1],xmm11[2,3]
1295 ; AVX2-NEXT: vpblendd {{.*#+}} ymm7 = ymm11[0,1],ymm7[2,3,4,5,6,7]
1296 ; AVX2-NEXT: vpunpcklqdq {{.*#+}} ymm11 = ymm5[0],ymm4[0],ymm5[2],ymm4[2]
1297 ; AVX2-NEXT: vpbroadcastd 204(%rdi), %ymm13
1298 ; AVX2-NEXT: vpblendd {{.*#+}} ymm11 = ymm11[0,1,2,3,4,5,6],ymm13[7]
1299 ; AVX2-NEXT: vpblendd {{.*#+}} ymm7 = ymm7[0,1,2,3,4],ymm11[5,6,7]
1300 ; AVX2-NEXT: vpblendd {{.*#+}} xmm11 = xmm12[0,1,2],mem[3]
1301 ; AVX2-NEXT: vpshufd {{.*#+}} xmm11 = xmm11[3,2,2,3]
1302 ; AVX2-NEXT: vpblendd {{.*#+}} ymm9 = ymm10[0],ymm9[1],ymm10[2,3,4,5,6,7]
1303 ; AVX2-NEXT: vpshufd {{.*#+}} ymm9 = ymm9[3,1,1,0,7,5,5,4]
1304 ; AVX2-NEXT: vpblendd {{.*#+}} ymm9 = ymm11[0,1],ymm9[2,3,4,5,6,7]
1305 ; AVX2-NEXT: vshufps {{.*#+}} ymm10 = ymm5[0,2],ymm4[1,3],ymm5[4,6],ymm4[5,7]
1306 ; AVX2-NEXT: vbroadcastss 208(%rdi), %ymm11
1307 ; AVX2-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0,1,2,3,4,5,6],ymm11[7]
1308 ; AVX2-NEXT: vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3,4],ymm10[5,6,7]
1309 ; AVX2-NEXT: vpbroadcastd 100(%rdi), %xmm10
1310 ; AVX2-NEXT: vmovdqa 64(%rdi), %xmm11
1311 ; AVX2-NEXT: vpblendd {{.*#+}} xmm10 = xmm11[0,1,2],xmm10[3]
1312 ; AVX2-NEXT: vpmovsxbd {{.*#+}} xmm12 = [4,3,0,0]
1313 ; AVX2-NEXT: vpblendd {{.*#+}} ymm13 = ymm1[0,1,2,3],ymm0[4,5,6,7]
1314 ; AVX2-NEXT: vpermd %ymm13, %ymm12, %ymm12
1315 ; AVX2-NEXT: vpblendd {{.*#+}} xmm10 = xmm12[0,1],xmm10[2,3]
1316 ; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm12 = [0,7,0,7,0,7,0,7]
1317 ; AVX2-NEXT: vpermd %ymm5, %ymm12, %ymm13
1318 ; AVX2-NEXT: vpblendd {{.*#+}} ymm13 = ymm13[0,1,2,3,4,5],ymm4[6,7]
1319 ; AVX2-NEXT: vpbroadcastd 212(%rdi), %ymm14
1320 ; AVX2-NEXT: vpblendd {{.*#+}} ymm13 = ymm13[0,1,2,3,4,5,6],ymm14[7]
1321 ; AVX2-NEXT: vpblendd {{.*#+}} ymm10 = ymm10[0,1,2,3],ymm13[4,5,6,7]
1322 ; AVX2-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0],ymm5[1],ymm4[2,3,4],ymm5[5],ymm4[6,7]
1323 ; AVX2-NEXT: vpshufd {{.*#+}} ymm4 = ymm4[1,0,3,3,5,4,7,7]
1324 ; AVX2-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,1,0,3]
1325 ; AVX2-NEXT: vpbroadcastd 216(%rdi), %ymm5
1326 ; AVX2-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3,4,5,6],ymm5[7]
1327 ; AVX2-NEXT: vmovdqa 96(%rdi), %xmm5
1328 ; AVX2-NEXT: vpblendd {{.*#+}} xmm11 = xmm5[0,1,2],xmm11[3]
1329 ; AVX2-NEXT: vpshufd {{.*#+}} xmm11 = xmm11[0,1,3,2]
1330 ; AVX2-NEXT: vpshufd {{.*#+}} ymm8 = ymm8[1,0,2,3,5,4,6,7]
1331 ; AVX2-NEXT: vextracti128 $1, %ymm8, %xmm8
1332 ; AVX2-NEXT: vpblendd {{.*#+}} xmm8 = xmm8[0,1],xmm11[2,3]
1333 ; AVX2-NEXT: vpblendd {{.*#+}} ymm4 = ymm8[0,1,2,3],ymm4[4,5,6,7]
1334 ; AVX2-NEXT: vpermd 192(%rdi), %ymm12, %ymm8
1335 ; AVX2-NEXT: vpbroadcastd 136(%rdi), %xmm11
1336 ; AVX2-NEXT: vpblendd {{.*#+}} xmm3 = xmm11[0],xmm3[1],xmm11[2,3]
1337 ; AVX2-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
1338 ; AVX2-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5],ymm8[6,7]
1339 ; AVX2-NEXT: vpbroadcastd 80(%rdi), %ymm8
1340 ; AVX2-NEXT: vpblendd {{.*#+}} xmm5 = xmm8[0,1,2],xmm5[3]
1341 ; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[2,3,2,3,6,7,6,7]
1342 ; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4],ymm1[5],ymm0[6,7]
1343 ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
1344 ; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm5[2,3]
1345 ; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm3[4,5,6,7]
1346 ; AVX2-NEXT: vmovdqa %ymm2, (%rsi)
1347 ; AVX2-NEXT: vmovdqa %ymm6, (%rdx)
1348 ; AVX2-NEXT: vmovdqa %ymm7, (%rcx)
1349 ; AVX2-NEXT: vmovdqa %ymm9, (%r8)
1350 ; AVX2-NEXT: vmovdqa %ymm10, (%r9)
1351 ; AVX2-NEXT: vmovdqa %ymm4, (%r10)
1352 ; AVX2-NEXT: vmovdqa %ymm0, (%rax)
1353 ; AVX2-NEXT: vzeroupper
1356 ; AVX2-FP-LABEL: load_i32_stride7_vf8:
1358 ; AVX2-FP-NEXT: movq {{[0-9]+}}(%rsp), %rax
1359 ; AVX2-FP-NEXT: movq {{[0-9]+}}(%rsp), %r10
1360 ; AVX2-FP-NEXT: vmovdqa 64(%rdi), %ymm9
1361 ; AVX2-FP-NEXT: vmovdqa 160(%rdi), %ymm4
1362 ; AVX2-FP-NEXT: vmovdqa 128(%rdi), %ymm5
1363 ; AVX2-FP-NEXT: vmovdqa (%rdi), %ymm0
1364 ; AVX2-FP-NEXT: vmovdqa 32(%rdi), %ymm1
1365 ; AVX2-FP-NEXT: vmovdqa 96(%rdi), %ymm10
1366 ; AVX2-FP-NEXT: vpbroadcastq 80(%rdi), %ymm2
1367 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm10[4,5,6,7]
1368 ; AVX2-FP-NEXT: vpmovsxbd {{.*#+}} xmm3 = [0,7,6,0]
1369 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm6 = ymm0[0,1,2,3,4,5],ymm1[6],ymm0[7]
1370 ; AVX2-FP-NEXT: vpermd %ymm6, %ymm3, %ymm3
1371 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1,2],ymm2[3,4,5,6,7]
1372 ; AVX2-FP-NEXT: vmovdqa 128(%rdi), %xmm6
1373 ; AVX2-FP-NEXT: vmovdqa 160(%rdi), %xmm3
1374 ; AVX2-FP-NEXT: vpunpckhqdq {{.*#+}} xmm6 = xmm6[1],xmm3[1]
1375 ; AVX2-FP-NEXT: vinserti128 $1, %xmm6, %ymm0, %ymm6
1376 ; AVX2-FP-NEXT: vpbroadcastd 196(%rdi), %ymm7
1377 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3,4,5,6],ymm7[7]
1378 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm6[5,6,7]
1379 ; AVX2-FP-NEXT: vpshufd {{.*#+}} xmm6 = mem[2,2,2,2]
1380 ; AVX2-FP-NEXT: vinserti128 $1, %xmm6, %ymm0, %ymm6
1381 ; AVX2-FP-NEXT: vpalignr {{.*#+}} ymm7 = ymm4[12,13,14,15],ymm5[0,1,2,3,4,5,6,7,8,9,10,11],ymm4[28,29,30,31],ymm5[16,17,18,19,20,21,22,23,24,25,26,27]
1382 ; AVX2-FP-NEXT: vpermq {{.*#+}} ymm7 = ymm7[0,1,2,0]
1383 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm6 = ymm7[0,1,2,3,4,5,6],ymm6[7]
1384 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm7 = ymm10[0,1],ymm9[2,3],ymm10[4,5],ymm9[6,7]
1385 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm8 = ymm1[0],ymm0[1],ymm1[2,3,4],ymm0[5],ymm1[6,7]
1386 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm7 = ymm8[0,1,2,3,4],ymm7[5,6],ymm8[7]
1387 ; AVX2-FP-NEXT: vpmovsxbd {{.*#+}} ymm11 = [1,0,7,6,5,0,0,0]
1388 ; AVX2-FP-NEXT: vpermd %ymm7, %ymm11, %ymm7
1389 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm6 = ymm7[0,1,2,3,4],ymm6[5,6,7]
1390 ; AVX2-FP-NEXT: vmovdqa 80(%rdi), %xmm7
1391 ; AVX2-FP-NEXT: vpalignr {{.*#+}} ymm11 = ymm10[8,9,10,11,12,13,14,15],ymm9[0,1,2,3,4,5,6,7],ymm10[24,25,26,27,28,29,30,31],ymm9[16,17,18,19,20,21,22,23]
1392 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm7 = ymm11[0,1,2],ymm7[3],ymm11[4,5,6,7]
1393 ; AVX2-FP-NEXT: vpbroadcastd 8(%rdi), %xmm11
1394 ; AVX2-FP-NEXT: vmovdqa 32(%rdi), %xmm12
1395 ; AVX2-FP-NEXT: vpblendd {{.*#+}} xmm11 = xmm11[0],xmm12[1],xmm11[2,3]
1396 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm7 = ymm11[0,1],ymm7[2,3,4,5,6,7]
1397 ; AVX2-FP-NEXT: vpunpcklqdq {{.*#+}} ymm11 = ymm5[0],ymm4[0],ymm5[2],ymm4[2]
1398 ; AVX2-FP-NEXT: vpbroadcastd 204(%rdi), %ymm13
1399 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm11 = ymm11[0,1,2,3,4,5,6],ymm13[7]
1400 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm7 = ymm7[0,1,2,3,4],ymm11[5,6,7]
1401 ; AVX2-FP-NEXT: vpblendd {{.*#+}} xmm11 = xmm12[0,1,2],mem[3]
1402 ; AVX2-FP-NEXT: vpshufd {{.*#+}} xmm11 = xmm11[3,2,2,3]
1403 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm9 = ymm10[0],ymm9[1],ymm10[2,3,4,5,6,7]
1404 ; AVX2-FP-NEXT: vpshufd {{.*#+}} ymm9 = ymm9[3,1,1,0,7,5,5,4]
1405 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm9 = ymm11[0,1],ymm9[2,3,4,5,6,7]
1406 ; AVX2-FP-NEXT: vshufps {{.*#+}} ymm10 = ymm5[0,2],ymm4[1,3],ymm5[4,6],ymm4[5,7]
1407 ; AVX2-FP-NEXT: vbroadcastss 208(%rdi), %ymm11
1408 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0,1,2,3,4,5,6],ymm11[7]
1409 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3,4],ymm10[5,6,7]
1410 ; AVX2-FP-NEXT: vpbroadcastd 100(%rdi), %xmm10
1411 ; AVX2-FP-NEXT: vmovdqa 64(%rdi), %xmm11
1412 ; AVX2-FP-NEXT: vpblendd {{.*#+}} xmm10 = xmm11[0,1,2],xmm10[3]
1413 ; AVX2-FP-NEXT: vpmovsxbd {{.*#+}} xmm12 = [4,3,0,0]
1414 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm13 = ymm1[0,1,2,3],ymm0[4,5,6,7]
1415 ; AVX2-FP-NEXT: vpermd %ymm13, %ymm12, %ymm12
1416 ; AVX2-FP-NEXT: vpblendd {{.*#+}} xmm10 = xmm12[0,1],xmm10[2,3]
1417 ; AVX2-FP-NEXT: vpbroadcastq {{.*#+}} ymm12 = [0,7,0,7,0,7,0,7]
1418 ; AVX2-FP-NEXT: vpermd %ymm5, %ymm12, %ymm13
1419 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm13 = ymm13[0,1,2,3,4,5],ymm4[6,7]
1420 ; AVX2-FP-NEXT: vpbroadcastd 212(%rdi), %ymm14
1421 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm13 = ymm13[0,1,2,3,4,5,6],ymm14[7]
1422 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm10 = ymm10[0,1,2,3],ymm13[4,5,6,7]
1423 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0],ymm5[1],ymm4[2,3,4],ymm5[5],ymm4[6,7]
1424 ; AVX2-FP-NEXT: vpshufd {{.*#+}} ymm4 = ymm4[1,0,3,3,5,4,7,7]
1425 ; AVX2-FP-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,1,0,3]
1426 ; AVX2-FP-NEXT: vpbroadcastd 216(%rdi), %ymm5
1427 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3,4,5,6],ymm5[7]
1428 ; AVX2-FP-NEXT: vmovdqa 96(%rdi), %xmm5
1429 ; AVX2-FP-NEXT: vpblendd {{.*#+}} xmm11 = xmm5[0,1,2],xmm11[3]
1430 ; AVX2-FP-NEXT: vpshufd {{.*#+}} xmm11 = xmm11[0,1,3,2]
1431 ; AVX2-FP-NEXT: vpshufd {{.*#+}} ymm8 = ymm8[1,0,2,3,5,4,6,7]
1432 ; AVX2-FP-NEXT: vextracti128 $1, %ymm8, %xmm8
1433 ; AVX2-FP-NEXT: vpblendd {{.*#+}} xmm8 = xmm8[0,1],xmm11[2,3]
1434 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm4 = ymm8[0,1,2,3],ymm4[4,5,6,7]
1435 ; AVX2-FP-NEXT: vpermd 192(%rdi), %ymm12, %ymm8
1436 ; AVX2-FP-NEXT: vpbroadcastd 136(%rdi), %xmm11
1437 ; AVX2-FP-NEXT: vpblendd {{.*#+}} xmm3 = xmm11[0],xmm3[1],xmm11[2,3]
1438 ; AVX2-FP-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
1439 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5],ymm8[6,7]
1440 ; AVX2-FP-NEXT: vpbroadcastd 80(%rdi), %ymm8
1441 ; AVX2-FP-NEXT: vpblendd {{.*#+}} xmm5 = xmm8[0,1,2],xmm5[3]
1442 ; AVX2-FP-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[2,3,2,3,6,7,6,7]
1443 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4],ymm1[5],ymm0[6,7]
1444 ; AVX2-FP-NEXT: vextracti128 $1, %ymm0, %xmm0
1445 ; AVX2-FP-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm5[2,3]
1446 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm3[4,5,6,7]
1447 ; AVX2-FP-NEXT: vmovdqa %ymm2, (%rsi)
1448 ; AVX2-FP-NEXT: vmovdqa %ymm6, (%rdx)
1449 ; AVX2-FP-NEXT: vmovdqa %ymm7, (%rcx)
1450 ; AVX2-FP-NEXT: vmovdqa %ymm9, (%r8)
1451 ; AVX2-FP-NEXT: vmovdqa %ymm10, (%r9)
1452 ; AVX2-FP-NEXT: vmovdqa %ymm4, (%r10)
1453 ; AVX2-FP-NEXT: vmovdqa %ymm0, (%rax)
1454 ; AVX2-FP-NEXT: vzeroupper
1455 ; AVX2-FP-NEXT: retq
1457 ; AVX2-FCP-LABEL: load_i32_stride7_vf8:
1458 ; AVX2-FCP: # %bb.0:
1459 ; AVX2-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
1460 ; AVX2-FCP-NEXT: movq {{[0-9]+}}(%rsp), %r10
1461 ; AVX2-FCP-NEXT: vmovdqa 64(%rdi), %ymm9
1462 ; AVX2-FCP-NEXT: vmovdqa 160(%rdi), %ymm4
1463 ; AVX2-FCP-NEXT: vmovdqa 128(%rdi), %ymm5
1464 ; AVX2-FCP-NEXT: vmovdqa (%rdi), %ymm0
1465 ; AVX2-FCP-NEXT: vmovdqa 32(%rdi), %ymm1
1466 ; AVX2-FCP-NEXT: vmovdqa 96(%rdi), %ymm10
1467 ; AVX2-FCP-NEXT: vpbroadcastq 80(%rdi), %ymm2
1468 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm10[4,5,6,7]
1469 ; AVX2-FCP-NEXT: vpmovsxbd {{.*#+}} xmm3 = [0,7,6,0]
1470 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm0[0,1,2,3,4,5],ymm1[6],ymm0[7]
1471 ; AVX2-FCP-NEXT: vpermd %ymm6, %ymm3, %ymm3
1472 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1,2],ymm2[3,4,5,6,7]
1473 ; AVX2-FCP-NEXT: vmovdqa 128(%rdi), %xmm6
1474 ; AVX2-FCP-NEXT: vmovdqa 160(%rdi), %xmm3
1475 ; AVX2-FCP-NEXT: vpunpckhqdq {{.*#+}} xmm6 = xmm6[1],xmm3[1]
1476 ; AVX2-FCP-NEXT: vinserti128 $1, %xmm6, %ymm0, %ymm6
1477 ; AVX2-FCP-NEXT: vpbroadcastd 196(%rdi), %ymm7
1478 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3,4,5,6],ymm7[7]
1479 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm6[5,6,7]
1480 ; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm6 = mem[2,2,2,2]
1481 ; AVX2-FCP-NEXT: vinserti128 $1, %xmm6, %ymm0, %ymm6
1482 ; AVX2-FCP-NEXT: vpalignr {{.*#+}} ymm7 = ymm4[12,13,14,15],ymm5[0,1,2,3,4,5,6,7,8,9,10,11],ymm4[28,29,30,31],ymm5[16,17,18,19,20,21,22,23,24,25,26,27]
1483 ; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm7 = ymm7[0,1,2,0]
1484 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm7[0,1,2,3,4,5,6],ymm6[7]
1485 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm7 = ymm10[0,1],ymm9[2,3],ymm10[4,5],ymm9[6,7]
1486 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm8 = ymm1[0],ymm0[1],ymm1[2,3,4],ymm0[5],ymm1[6,7]
1487 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm7 = ymm8[0,1,2,3,4],ymm7[5,6],ymm8[7]
1488 ; AVX2-FCP-NEXT: vpmovsxbd {{.*#+}} ymm11 = [1,0,7,6,5,0,0,0]
1489 ; AVX2-FCP-NEXT: vpermd %ymm7, %ymm11, %ymm7
1490 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm7[0,1,2,3,4],ymm6[5,6,7]
1491 ; AVX2-FCP-NEXT: vmovdqa 80(%rdi), %xmm7
1492 ; AVX2-FCP-NEXT: vpalignr {{.*#+}} ymm11 = ymm10[8,9,10,11,12,13,14,15],ymm9[0,1,2,3,4,5,6,7],ymm10[24,25,26,27,28,29,30,31],ymm9[16,17,18,19,20,21,22,23]
1493 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm7 = ymm11[0,1,2],ymm7[3],ymm11[4,5,6,7]
1494 ; AVX2-FCP-NEXT: vpbroadcastd 8(%rdi), %xmm11
1495 ; AVX2-FCP-NEXT: vmovdqa 32(%rdi), %xmm12
1496 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm11 = xmm11[0],xmm12[1],xmm11[2,3]
1497 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm7 = ymm11[0,1],ymm7[2,3,4,5,6,7]
1498 ; AVX2-FCP-NEXT: vpunpcklqdq {{.*#+}} ymm11 = ymm5[0],ymm4[0],ymm5[2],ymm4[2]
1499 ; AVX2-FCP-NEXT: vpbroadcastd 204(%rdi), %ymm13
1500 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm11 = ymm11[0,1,2,3,4,5,6],ymm13[7]
1501 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm7 = ymm7[0,1,2,3,4],ymm11[5,6,7]
1502 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm11 = xmm12[0,1,2],mem[3]
1503 ; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm11 = xmm11[3,2,2,3]
1504 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm9 = ymm10[0],ymm9[1],ymm10[2,3,4,5,6,7]
1505 ; AVX2-FCP-NEXT: vpshufd {{.*#+}} ymm9 = ymm9[3,1,1,0,7,5,5,4]
1506 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm9 = ymm11[0,1],ymm9[2,3,4,5,6,7]
1507 ; AVX2-FCP-NEXT: vshufps {{.*#+}} ymm10 = ymm5[0,2],ymm4[1,3],ymm5[4,6],ymm4[5,7]
1508 ; AVX2-FCP-NEXT: vbroadcastss 208(%rdi), %ymm11
1509 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0,1,2,3,4,5,6],ymm11[7]
1510 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3,4],ymm10[5,6,7]
1511 ; AVX2-FCP-NEXT: vpbroadcastd 100(%rdi), %xmm10
1512 ; AVX2-FCP-NEXT: vmovdqa 64(%rdi), %xmm11
1513 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm10 = xmm11[0,1,2],xmm10[3]
1514 ; AVX2-FCP-NEXT: vpmovsxbd {{.*#+}} xmm12 = [4,3,0,0]
1515 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm13 = ymm1[0,1,2,3],ymm0[4,5,6,7]
1516 ; AVX2-FCP-NEXT: vpermd %ymm13, %ymm12, %ymm12
1517 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm10 = xmm12[0,1],xmm10[2,3]
1518 ; AVX2-FCP-NEXT: vpbroadcastq {{.*#+}} ymm12 = [0,7,0,7,0,7,0,7]
1519 ; AVX2-FCP-NEXT: vpermd %ymm5, %ymm12, %ymm13
1520 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm13 = ymm13[0,1,2,3,4,5],ymm4[6,7]
1521 ; AVX2-FCP-NEXT: vpbroadcastd 212(%rdi), %ymm14
1522 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm13 = ymm13[0,1,2,3,4,5,6],ymm14[7]
1523 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm10 = ymm10[0,1,2,3],ymm13[4,5,6,7]
1524 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0],ymm5[1],ymm4[2,3,4],ymm5[5],ymm4[6,7]
1525 ; AVX2-FCP-NEXT: vpmovsxbq {{.*#+}} ymm5 = [0,0,1,7]
1526 ; AVX2-FCP-NEXT: vpermd %ymm4, %ymm5, %ymm4
1527 ; AVX2-FCP-NEXT: vpbroadcastd 216(%rdi), %ymm5
1528 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3,4,5,6],ymm5[7]
1529 ; AVX2-FCP-NEXT: vmovdqa 96(%rdi), %xmm5
1530 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm11 = xmm5[0,1,2],xmm11[3]
1531 ; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm11 = xmm11[0,1,3,2]
1532 ; AVX2-FCP-NEXT: vpshufd {{.*#+}} ymm8 = ymm8[1,0,2,3,5,4,6,7]
1533 ; AVX2-FCP-NEXT: vextracti128 $1, %ymm8, %xmm8
1534 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm8 = xmm8[0,1],xmm11[2,3]
1535 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm8[0,1,2,3],ymm4[4,5,6,7]
1536 ; AVX2-FCP-NEXT: vpermd 192(%rdi), %ymm12, %ymm8
1537 ; AVX2-FCP-NEXT: vpbroadcastd 136(%rdi), %xmm11
1538 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm3 = xmm11[0],xmm3[1],xmm11[2,3]
1539 ; AVX2-FCP-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
1540 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5],ymm8[6,7]
1541 ; AVX2-FCP-NEXT: vpbroadcastd 80(%rdi), %ymm8
1542 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm5 = xmm8[0,1,2],xmm5[3]
1543 ; AVX2-FCP-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[2,3,2,3,6,7,6,7]
1544 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4],ymm1[5],ymm0[6,7]
1545 ; AVX2-FCP-NEXT: vextracti128 $1, %ymm0, %xmm0
1546 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm5[2,3]
1547 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm3[4,5,6,7]
1548 ; AVX2-FCP-NEXT: vmovdqa %ymm2, (%rsi)
1549 ; AVX2-FCP-NEXT: vmovdqa %ymm6, (%rdx)
1550 ; AVX2-FCP-NEXT: vmovdqa %ymm7, (%rcx)
1551 ; AVX2-FCP-NEXT: vmovdqa %ymm9, (%r8)
1552 ; AVX2-FCP-NEXT: vmovdqa %ymm10, (%r9)
1553 ; AVX2-FCP-NEXT: vmovdqa %ymm4, (%r10)
1554 ; AVX2-FCP-NEXT: vmovdqa %ymm0, (%rax)
1555 ; AVX2-FCP-NEXT: vzeroupper
1556 ; AVX2-FCP-NEXT: retq
1558 ; AVX512-LABEL: load_i32_stride7_vf8:
1560 ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax
1561 ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r10
1562 ; AVX512-NEXT: vmovdqa64 (%rdi), %zmm0
1563 ; AVX512-NEXT: vmovdqa64 64(%rdi), %zmm1
1564 ; AVX512-NEXT: vmovdqa64 128(%rdi), %zmm2
1565 ; AVX512-NEXT: vmovdqa64 192(%rdi), %zmm3
1566 ; AVX512-NEXT: vpmovsxbd {{.*#+}} ymm4 = [0,0,0,0,0,3,10,17]
1567 ; AVX512-NEXT: vpermi2d %zmm3, %zmm2, %zmm4
1568 ; AVX512-NEXT: vpmovsxbd {{.*#+}} ymm5 = [0,7,14,21,28,0,0,0]
1569 ; AVX512-NEXT: vpermi2d %zmm1, %zmm0, %zmm5
1570 ; AVX512-NEXT: vpblendd {{.*#+}} ymm4 = ymm5[0,1,2,3,4],ymm4[5,6,7]
1571 ; AVX512-NEXT: vpmovsxbd {{.*#+}} ymm5 = [0,0,0,0,0,4,11,18]
1572 ; AVX512-NEXT: vpermi2d %zmm3, %zmm2, %zmm5
1573 ; AVX512-NEXT: vpmovsxbd {{.*#+}} ymm6 = [1,8,15,22,29,0,0,0]
1574 ; AVX512-NEXT: vpermi2d %zmm1, %zmm0, %zmm6
1575 ; AVX512-NEXT: vpblendd {{.*#+}} ymm5 = ymm6[0,1,2,3,4],ymm5[5,6,7]
1576 ; AVX512-NEXT: vpmovsxbd {{.*#+}} ymm6 = [0,0,0,0,0,5,12,19]
1577 ; AVX512-NEXT: vpermi2d %zmm3, %zmm2, %zmm6
1578 ; AVX512-NEXT: vpmovsxbd {{.*#+}} ymm7 = [18,25,0,7,14,0,0,0]
1579 ; AVX512-NEXT: vpermi2d %zmm0, %zmm1, %zmm7
1580 ; AVX512-NEXT: vpblendd {{.*#+}} ymm6 = ymm7[0,1,2,3,4],ymm6[5,6,7]
1581 ; AVX512-NEXT: vpmovsxbd {{.*#+}} ymm7 = [0,0,0,0,0,6,13,20]
1582 ; AVX512-NEXT: vpermi2d %zmm3, %zmm2, %zmm7
1583 ; AVX512-NEXT: vpmovsxbd {{.*#+}} ymm8 = [19,26,1,8,15,0,0,0]
1584 ; AVX512-NEXT: vpermi2d %zmm0, %zmm1, %zmm8
1585 ; AVX512-NEXT: vpblendd {{.*#+}} ymm7 = ymm8[0,1,2,3,4],ymm7[5,6,7]
1586 ; AVX512-NEXT: vpmovsxbd {{.*#+}} ymm8 = [0,0,0,0,0,7,14,21]
1587 ; AVX512-NEXT: vpermi2d %zmm3, %zmm2, %zmm8
1588 ; AVX512-NEXT: vpmovsxbd {{.*#+}} xmm9 = [4,11,18,25]
1589 ; AVX512-NEXT: vpermi2d %zmm1, %zmm0, %zmm9
1590 ; AVX512-NEXT: vpblendd {{.*#+}} ymm8 = ymm9[0,1,2,3],ymm8[4,5,6,7]
1591 ; AVX512-NEXT: vpmovsxbd {{.*#+}} ymm9 = [0,0,0,0,1,8,15,22]
1592 ; AVX512-NEXT: vpermi2d %zmm3, %zmm2, %zmm9
1593 ; AVX512-NEXT: vpmovsxbd {{.*#+}} xmm10 = [5,12,19,26]
1594 ; AVX512-NEXT: vpermi2d %zmm1, %zmm0, %zmm10
1595 ; AVX512-NEXT: vpblendd {{.*#+}} ymm9 = ymm10[0,1,2,3],ymm9[4,5,6,7]
1596 ; AVX512-NEXT: vpmovsxbd {{.*#+}} ymm10 = [0,0,0,0,2,9,16,23]
1597 ; AVX512-NEXT: vpermi2d %zmm3, %zmm2, %zmm10
1598 ; AVX512-NEXT: vpmovsxbd {{.*#+}} xmm2 = [6,13,20,27]
1599 ; AVX512-NEXT: vpermi2d %zmm1, %zmm0, %zmm2
1600 ; AVX512-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm10[4,5,6,7]
1601 ; AVX512-NEXT: vmovdqa %ymm4, (%rsi)
1602 ; AVX512-NEXT: vmovdqa %ymm5, (%rdx)
1603 ; AVX512-NEXT: vmovdqa %ymm6, (%rcx)
1604 ; AVX512-NEXT: vmovdqa %ymm7, (%r8)
1605 ; AVX512-NEXT: vmovdqa %ymm8, (%r9)
1606 ; AVX512-NEXT: vmovdqa %ymm9, (%r10)
1607 ; AVX512-NEXT: vmovdqa %ymm0, (%rax)
1608 ; AVX512-NEXT: vzeroupper
1611 ; AVX512-FCP-LABEL: load_i32_stride7_vf8:
1612 ; AVX512-FCP: # %bb.0:
1613 ; AVX512-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
1614 ; AVX512-FCP-NEXT: movq {{[0-9]+}}(%rsp), %r10
1615 ; AVX512-FCP-NEXT: vmovdqa64 (%rdi), %zmm0
1616 ; AVX512-FCP-NEXT: vmovdqa64 64(%rdi), %zmm1
1617 ; AVX512-FCP-NEXT: vmovdqa64 128(%rdi), %zmm2
1618 ; AVX512-FCP-NEXT: vmovdqa64 192(%rdi), %zmm3
1619 ; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm4 = [0,0,0,0,0,3,10,17]
1620 ; AVX512-FCP-NEXT: vpermi2d %zmm3, %zmm2, %zmm4
1621 ; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm5 = [0,7,14,21,28,0,0,0]
1622 ; AVX512-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm5
1623 ; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm5[0,1,2,3,4],ymm4[5,6,7]
1624 ; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm5 = [0,0,0,0,0,4,11,18]
1625 ; AVX512-FCP-NEXT: vpermi2d %zmm3, %zmm2, %zmm5
1626 ; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm6 = [1,8,15,22,29,0,0,0]
1627 ; AVX512-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm6
1628 ; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm5 = ymm6[0,1,2,3,4],ymm5[5,6,7]
1629 ; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm6 = [0,0,0,0,0,5,12,19]
1630 ; AVX512-FCP-NEXT: vpermi2d %zmm3, %zmm2, %zmm6
1631 ; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm7 = [18,25,0,7,14,0,0,0]
1632 ; AVX512-FCP-NEXT: vpermi2d %zmm0, %zmm1, %zmm7
1633 ; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm7[0,1,2,3,4],ymm6[5,6,7]
1634 ; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm7 = [0,0,0,0,0,6,13,20]
1635 ; AVX512-FCP-NEXT: vpermi2d %zmm3, %zmm2, %zmm7
1636 ; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm8 = [19,26,1,8,15,0,0,0]
1637 ; AVX512-FCP-NEXT: vpermi2d %zmm0, %zmm1, %zmm8
1638 ; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm7 = ymm8[0,1,2,3,4],ymm7[5,6,7]
1639 ; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm8 = [0,0,0,0,0,7,14,21]
1640 ; AVX512-FCP-NEXT: vpermi2d %zmm3, %zmm2, %zmm8
1641 ; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} xmm9 = [4,11,18,25]
1642 ; AVX512-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm9
1643 ; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm8 = ymm9[0,1,2,3],ymm8[4,5,6,7]
1644 ; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm9 = [0,0,0,0,1,8,15,22]
1645 ; AVX512-FCP-NEXT: vpermi2d %zmm3, %zmm2, %zmm9
1646 ; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} xmm10 = [5,12,19,26]
1647 ; AVX512-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm10
1648 ; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm9 = ymm10[0,1,2,3],ymm9[4,5,6,7]
1649 ; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm10 = [0,0,0,0,2,9,16,23]
1650 ; AVX512-FCP-NEXT: vpermi2d %zmm3, %zmm2, %zmm10
1651 ; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} xmm2 = [6,13,20,27]
1652 ; AVX512-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm2
1653 ; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm10[4,5,6,7]
1654 ; AVX512-FCP-NEXT: vmovdqa %ymm4, (%rsi)
1655 ; AVX512-FCP-NEXT: vmovdqa %ymm5, (%rdx)
1656 ; AVX512-FCP-NEXT: vmovdqa %ymm6, (%rcx)
1657 ; AVX512-FCP-NEXT: vmovdqa %ymm7, (%r8)
1658 ; AVX512-FCP-NEXT: vmovdqa %ymm8, (%r9)
1659 ; AVX512-FCP-NEXT: vmovdqa %ymm9, (%r10)
1660 ; AVX512-FCP-NEXT: vmovdqa %ymm0, (%rax)
1661 ; AVX512-FCP-NEXT: vzeroupper
1662 ; AVX512-FCP-NEXT: retq
1664 ; AVX512DQ-LABEL: load_i32_stride7_vf8:
1665 ; AVX512DQ: # %bb.0:
1666 ; AVX512DQ-NEXT: movq {{[0-9]+}}(%rsp), %rax
1667 ; AVX512DQ-NEXT: movq {{[0-9]+}}(%rsp), %r10
1668 ; AVX512DQ-NEXT: vmovdqa64 (%rdi), %zmm0
1669 ; AVX512DQ-NEXT: vmovdqa64 64(%rdi), %zmm1
1670 ; AVX512DQ-NEXT: vmovdqa64 128(%rdi), %zmm2
1671 ; AVX512DQ-NEXT: vmovdqa64 192(%rdi), %zmm3
1672 ; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} ymm4 = [0,0,0,0,0,3,10,17]
1673 ; AVX512DQ-NEXT: vpermi2d %zmm3, %zmm2, %zmm4
1674 ; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} ymm5 = [0,7,14,21,28,0,0,0]
1675 ; AVX512DQ-NEXT: vpermi2d %zmm1, %zmm0, %zmm5
1676 ; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm4 = ymm5[0,1,2,3,4],ymm4[5,6,7]
1677 ; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} ymm5 = [0,0,0,0,0,4,11,18]
1678 ; AVX512DQ-NEXT: vpermi2d %zmm3, %zmm2, %zmm5
1679 ; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} ymm6 = [1,8,15,22,29,0,0,0]
1680 ; AVX512DQ-NEXT: vpermi2d %zmm1, %zmm0, %zmm6
1681 ; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm5 = ymm6[0,1,2,3,4],ymm5[5,6,7]
1682 ; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} ymm6 = [0,0,0,0,0,5,12,19]
1683 ; AVX512DQ-NEXT: vpermi2d %zmm3, %zmm2, %zmm6
1684 ; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} ymm7 = [18,25,0,7,14,0,0,0]
1685 ; AVX512DQ-NEXT: vpermi2d %zmm0, %zmm1, %zmm7
1686 ; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm6 = ymm7[0,1,2,3,4],ymm6[5,6,7]
1687 ; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} ymm7 = [0,0,0,0,0,6,13,20]
1688 ; AVX512DQ-NEXT: vpermi2d %zmm3, %zmm2, %zmm7
1689 ; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} ymm8 = [19,26,1,8,15,0,0,0]
1690 ; AVX512DQ-NEXT: vpermi2d %zmm0, %zmm1, %zmm8
1691 ; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm7 = ymm8[0,1,2,3,4],ymm7[5,6,7]
1692 ; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} ymm8 = [0,0,0,0,0,7,14,21]
1693 ; AVX512DQ-NEXT: vpermi2d %zmm3, %zmm2, %zmm8
1694 ; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} xmm9 = [4,11,18,25]
1695 ; AVX512DQ-NEXT: vpermi2d %zmm1, %zmm0, %zmm9
1696 ; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm8 = ymm9[0,1,2,3],ymm8[4,5,6,7]
1697 ; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} ymm9 = [0,0,0,0,1,8,15,22]
1698 ; AVX512DQ-NEXT: vpermi2d %zmm3, %zmm2, %zmm9
1699 ; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} xmm10 = [5,12,19,26]
1700 ; AVX512DQ-NEXT: vpermi2d %zmm1, %zmm0, %zmm10
1701 ; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm9 = ymm10[0,1,2,3],ymm9[4,5,6,7]
1702 ; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} ymm10 = [0,0,0,0,2,9,16,23]
1703 ; AVX512DQ-NEXT: vpermi2d %zmm3, %zmm2, %zmm10
1704 ; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} xmm2 = [6,13,20,27]
1705 ; AVX512DQ-NEXT: vpermi2d %zmm1, %zmm0, %zmm2
1706 ; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm10[4,5,6,7]
1707 ; AVX512DQ-NEXT: vmovdqa %ymm4, (%rsi)
1708 ; AVX512DQ-NEXT: vmovdqa %ymm5, (%rdx)
1709 ; AVX512DQ-NEXT: vmovdqa %ymm6, (%rcx)
1710 ; AVX512DQ-NEXT: vmovdqa %ymm7, (%r8)
1711 ; AVX512DQ-NEXT: vmovdqa %ymm8, (%r9)
1712 ; AVX512DQ-NEXT: vmovdqa %ymm9, (%r10)
1713 ; AVX512DQ-NEXT: vmovdqa %ymm0, (%rax)
1714 ; AVX512DQ-NEXT: vzeroupper
1715 ; AVX512DQ-NEXT: retq
1717 ; AVX512DQ-FCP-LABEL: load_i32_stride7_vf8:
1718 ; AVX512DQ-FCP: # %bb.0:
1719 ; AVX512DQ-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
1720 ; AVX512DQ-FCP-NEXT: movq {{[0-9]+}}(%rsp), %r10
1721 ; AVX512DQ-FCP-NEXT: vmovdqa64 (%rdi), %zmm0
1722 ; AVX512DQ-FCP-NEXT: vmovdqa64 64(%rdi), %zmm1
1723 ; AVX512DQ-FCP-NEXT: vmovdqa64 128(%rdi), %zmm2
1724 ; AVX512DQ-FCP-NEXT: vmovdqa64 192(%rdi), %zmm3
1725 ; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm4 = [0,0,0,0,0,3,10,17]
1726 ; AVX512DQ-FCP-NEXT: vpermi2d %zmm3, %zmm2, %zmm4
1727 ; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm5 = [0,7,14,21,28,0,0,0]
1728 ; AVX512DQ-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm5
1729 ; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm5[0,1,2,3,4],ymm4[5,6,7]
1730 ; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm5 = [0,0,0,0,0,4,11,18]
1731 ; AVX512DQ-FCP-NEXT: vpermi2d %zmm3, %zmm2, %zmm5
1732 ; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm6 = [1,8,15,22,29,0,0,0]
1733 ; AVX512DQ-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm6
1734 ; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm5 = ymm6[0,1,2,3,4],ymm5[5,6,7]
1735 ; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm6 = [0,0,0,0,0,5,12,19]
1736 ; AVX512DQ-FCP-NEXT: vpermi2d %zmm3, %zmm2, %zmm6
1737 ; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm7 = [18,25,0,7,14,0,0,0]
1738 ; AVX512DQ-FCP-NEXT: vpermi2d %zmm0, %zmm1, %zmm7
1739 ; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm7[0,1,2,3,4],ymm6[5,6,7]
1740 ; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm7 = [0,0,0,0,0,6,13,20]
1741 ; AVX512DQ-FCP-NEXT: vpermi2d %zmm3, %zmm2, %zmm7
1742 ; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm8 = [19,26,1,8,15,0,0,0]
1743 ; AVX512DQ-FCP-NEXT: vpermi2d %zmm0, %zmm1, %zmm8
1744 ; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm7 = ymm8[0,1,2,3,4],ymm7[5,6,7]
1745 ; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm8 = [0,0,0,0,0,7,14,21]
1746 ; AVX512DQ-FCP-NEXT: vpermi2d %zmm3, %zmm2, %zmm8
1747 ; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} xmm9 = [4,11,18,25]
1748 ; AVX512DQ-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm9
1749 ; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm8 = ymm9[0,1,2,3],ymm8[4,5,6,7]
1750 ; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm9 = [0,0,0,0,1,8,15,22]
1751 ; AVX512DQ-FCP-NEXT: vpermi2d %zmm3, %zmm2, %zmm9
1752 ; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} xmm10 = [5,12,19,26]
1753 ; AVX512DQ-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm10
1754 ; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm9 = ymm10[0,1,2,3],ymm9[4,5,6,7]
1755 ; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm10 = [0,0,0,0,2,9,16,23]
1756 ; AVX512DQ-FCP-NEXT: vpermi2d %zmm3, %zmm2, %zmm10
1757 ; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} xmm2 = [6,13,20,27]
1758 ; AVX512DQ-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm2
1759 ; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm10[4,5,6,7]
1760 ; AVX512DQ-FCP-NEXT: vmovdqa %ymm4, (%rsi)
1761 ; AVX512DQ-FCP-NEXT: vmovdqa %ymm5, (%rdx)
1762 ; AVX512DQ-FCP-NEXT: vmovdqa %ymm6, (%rcx)
1763 ; AVX512DQ-FCP-NEXT: vmovdqa %ymm7, (%r8)
1764 ; AVX512DQ-FCP-NEXT: vmovdqa %ymm8, (%r9)
1765 ; AVX512DQ-FCP-NEXT: vmovdqa %ymm9, (%r10)
1766 ; AVX512DQ-FCP-NEXT: vmovdqa %ymm0, (%rax)
1767 ; AVX512DQ-FCP-NEXT: vzeroupper
1768 ; AVX512DQ-FCP-NEXT: retq
1770 ; AVX512BW-LABEL: load_i32_stride7_vf8:
1771 ; AVX512BW: # %bb.0:
1772 ; AVX512BW-NEXT: movq {{[0-9]+}}(%rsp), %rax
1773 ; AVX512BW-NEXT: movq {{[0-9]+}}(%rsp), %r10
1774 ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0
1775 ; AVX512BW-NEXT: vmovdqa64 64(%rdi), %zmm1
1776 ; AVX512BW-NEXT: vmovdqa64 128(%rdi), %zmm2
1777 ; AVX512BW-NEXT: vmovdqa64 192(%rdi), %zmm3
1778 ; AVX512BW-NEXT: vpmovsxbd {{.*#+}} ymm4 = [0,0,0,0,0,3,10,17]
1779 ; AVX512BW-NEXT: vpermi2d %zmm3, %zmm2, %zmm4
1780 ; AVX512BW-NEXT: vpmovsxbd {{.*#+}} ymm5 = [0,7,14,21,28,0,0,0]
1781 ; AVX512BW-NEXT: vpermi2d %zmm1, %zmm0, %zmm5
1782 ; AVX512BW-NEXT: vpblendd {{.*#+}} ymm4 = ymm5[0,1,2,3,4],ymm4[5,6,7]
1783 ; AVX512BW-NEXT: vpmovsxbd {{.*#+}} ymm5 = [0,0,0,0,0,4,11,18]
1784 ; AVX512BW-NEXT: vpermi2d %zmm3, %zmm2, %zmm5
1785 ; AVX512BW-NEXT: vpmovsxbd {{.*#+}} ymm6 = [1,8,15,22,29,0,0,0]
1786 ; AVX512BW-NEXT: vpermi2d %zmm1, %zmm0, %zmm6
1787 ; AVX512BW-NEXT: vpblendd {{.*#+}} ymm5 = ymm6[0,1,2,3,4],ymm5[5,6,7]
1788 ; AVX512BW-NEXT: vpmovsxbd {{.*#+}} ymm6 = [0,0,0,0,0,5,12,19]
1789 ; AVX512BW-NEXT: vpermi2d %zmm3, %zmm2, %zmm6
1790 ; AVX512BW-NEXT: vpmovsxbd {{.*#+}} ymm7 = [18,25,0,7,14,0,0,0]
1791 ; AVX512BW-NEXT: vpermi2d %zmm0, %zmm1, %zmm7
1792 ; AVX512BW-NEXT: vpblendd {{.*#+}} ymm6 = ymm7[0,1,2,3,4],ymm6[5,6,7]
1793 ; AVX512BW-NEXT: vpmovsxbd {{.*#+}} ymm7 = [0,0,0,0,0,6,13,20]
1794 ; AVX512BW-NEXT: vpermi2d %zmm3, %zmm2, %zmm7
1795 ; AVX512BW-NEXT: vpmovsxbd {{.*#+}} ymm8 = [19,26,1,8,15,0,0,0]
1796 ; AVX512BW-NEXT: vpermi2d %zmm0, %zmm1, %zmm8
1797 ; AVX512BW-NEXT: vpblendd {{.*#+}} ymm7 = ymm8[0,1,2,3,4],ymm7[5,6,7]
1798 ; AVX512BW-NEXT: vpmovsxbd {{.*#+}} ymm8 = [0,0,0,0,0,7,14,21]
1799 ; AVX512BW-NEXT: vpermi2d %zmm3, %zmm2, %zmm8
1800 ; AVX512BW-NEXT: vpmovsxbd {{.*#+}} xmm9 = [4,11,18,25]
1801 ; AVX512BW-NEXT: vpermi2d %zmm1, %zmm0, %zmm9
1802 ; AVX512BW-NEXT: vpblendd {{.*#+}} ymm8 = ymm9[0,1,2,3],ymm8[4,5,6,7]
1803 ; AVX512BW-NEXT: vpmovsxbd {{.*#+}} ymm9 = [0,0,0,0,1,8,15,22]
1804 ; AVX512BW-NEXT: vpermi2d %zmm3, %zmm2, %zmm9
1805 ; AVX512BW-NEXT: vpmovsxbd {{.*#+}} xmm10 = [5,12,19,26]
1806 ; AVX512BW-NEXT: vpermi2d %zmm1, %zmm0, %zmm10
1807 ; AVX512BW-NEXT: vpblendd {{.*#+}} ymm9 = ymm10[0,1,2,3],ymm9[4,5,6,7]
1808 ; AVX512BW-NEXT: vpmovsxbd {{.*#+}} ymm10 = [0,0,0,0,2,9,16,23]
1809 ; AVX512BW-NEXT: vpermi2d %zmm3, %zmm2, %zmm10
1810 ; AVX512BW-NEXT: vpmovsxbd {{.*#+}} xmm2 = [6,13,20,27]
1811 ; AVX512BW-NEXT: vpermi2d %zmm1, %zmm0, %zmm2
1812 ; AVX512BW-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm10[4,5,6,7]
1813 ; AVX512BW-NEXT: vmovdqa %ymm4, (%rsi)
1814 ; AVX512BW-NEXT: vmovdqa %ymm5, (%rdx)
1815 ; AVX512BW-NEXT: vmovdqa %ymm6, (%rcx)
1816 ; AVX512BW-NEXT: vmovdqa %ymm7, (%r8)
1817 ; AVX512BW-NEXT: vmovdqa %ymm8, (%r9)
1818 ; AVX512BW-NEXT: vmovdqa %ymm9, (%r10)
1819 ; AVX512BW-NEXT: vmovdqa %ymm0, (%rax)
1820 ; AVX512BW-NEXT: vzeroupper
1821 ; AVX512BW-NEXT: retq
1823 ; AVX512BW-FCP-LABEL: load_i32_stride7_vf8:
1824 ; AVX512BW-FCP: # %bb.0:
1825 ; AVX512BW-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
1826 ; AVX512BW-FCP-NEXT: movq {{[0-9]+}}(%rsp), %r10
1827 ; AVX512BW-FCP-NEXT: vmovdqa64 (%rdi), %zmm0
1828 ; AVX512BW-FCP-NEXT: vmovdqa64 64(%rdi), %zmm1
1829 ; AVX512BW-FCP-NEXT: vmovdqa64 128(%rdi), %zmm2
1830 ; AVX512BW-FCP-NEXT: vmovdqa64 192(%rdi), %zmm3
1831 ; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm4 = [0,0,0,0,0,3,10,17]
1832 ; AVX512BW-FCP-NEXT: vpermi2d %zmm3, %zmm2, %zmm4
1833 ; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm5 = [0,7,14,21,28,0,0,0]
1834 ; AVX512BW-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm5
1835 ; AVX512BW-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm5[0,1,2,3,4],ymm4[5,6,7]
1836 ; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm5 = [0,0,0,0,0,4,11,18]
1837 ; AVX512BW-FCP-NEXT: vpermi2d %zmm3, %zmm2, %zmm5
1838 ; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm6 = [1,8,15,22,29,0,0,0]
1839 ; AVX512BW-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm6
1840 ; AVX512BW-FCP-NEXT: vpblendd {{.*#+}} ymm5 = ymm6[0,1,2,3,4],ymm5[5,6,7]
1841 ; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm6 = [0,0,0,0,0,5,12,19]
1842 ; AVX512BW-FCP-NEXT: vpermi2d %zmm3, %zmm2, %zmm6
1843 ; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm7 = [18,25,0,7,14,0,0,0]
1844 ; AVX512BW-FCP-NEXT: vpermi2d %zmm0, %zmm1, %zmm7
1845 ; AVX512BW-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm7[0,1,2,3,4],ymm6[5,6,7]
1846 ; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm7 = [0,0,0,0,0,6,13,20]
1847 ; AVX512BW-FCP-NEXT: vpermi2d %zmm3, %zmm2, %zmm7
1848 ; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm8 = [19,26,1,8,15,0,0,0]
1849 ; AVX512BW-FCP-NEXT: vpermi2d %zmm0, %zmm1, %zmm8
1850 ; AVX512BW-FCP-NEXT: vpblendd {{.*#+}} ymm7 = ymm8[0,1,2,3,4],ymm7[5,6,7]
1851 ; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm8 = [0,0,0,0,0,7,14,21]
1852 ; AVX512BW-FCP-NEXT: vpermi2d %zmm3, %zmm2, %zmm8
1853 ; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm9 = [4,11,18,25]
1854 ; AVX512BW-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm9
1855 ; AVX512BW-FCP-NEXT: vpblendd {{.*#+}} ymm8 = ymm9[0,1,2,3],ymm8[4,5,6,7]
1856 ; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm9 = [0,0,0,0,1,8,15,22]
1857 ; AVX512BW-FCP-NEXT: vpermi2d %zmm3, %zmm2, %zmm9
1858 ; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm10 = [5,12,19,26]
1859 ; AVX512BW-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm10
1860 ; AVX512BW-FCP-NEXT: vpblendd {{.*#+}} ymm9 = ymm10[0,1,2,3],ymm9[4,5,6,7]
1861 ; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm10 = [0,0,0,0,2,9,16,23]
1862 ; AVX512BW-FCP-NEXT: vpermi2d %zmm3, %zmm2, %zmm10
1863 ; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm2 = [6,13,20,27]
1864 ; AVX512BW-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm2
1865 ; AVX512BW-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm10[4,5,6,7]
1866 ; AVX512BW-FCP-NEXT: vmovdqa %ymm4, (%rsi)
1867 ; AVX512BW-FCP-NEXT: vmovdqa %ymm5, (%rdx)
1868 ; AVX512BW-FCP-NEXT: vmovdqa %ymm6, (%rcx)
1869 ; AVX512BW-FCP-NEXT: vmovdqa %ymm7, (%r8)
1870 ; AVX512BW-FCP-NEXT: vmovdqa %ymm8, (%r9)
1871 ; AVX512BW-FCP-NEXT: vmovdqa %ymm9, (%r10)
1872 ; AVX512BW-FCP-NEXT: vmovdqa %ymm0, (%rax)
1873 ; AVX512BW-FCP-NEXT: vzeroupper
1874 ; AVX512BW-FCP-NEXT: retq
1876 ; AVX512DQ-BW-LABEL: load_i32_stride7_vf8:
1877 ; AVX512DQ-BW: # %bb.0:
1878 ; AVX512DQ-BW-NEXT: movq {{[0-9]+}}(%rsp), %rax
1879 ; AVX512DQ-BW-NEXT: movq {{[0-9]+}}(%rsp), %r10
1880 ; AVX512DQ-BW-NEXT: vmovdqa64 (%rdi), %zmm0
1881 ; AVX512DQ-BW-NEXT: vmovdqa64 64(%rdi), %zmm1
1882 ; AVX512DQ-BW-NEXT: vmovdqa64 128(%rdi), %zmm2
1883 ; AVX512DQ-BW-NEXT: vmovdqa64 192(%rdi), %zmm3
1884 ; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} ymm4 = [0,0,0,0,0,3,10,17]
1885 ; AVX512DQ-BW-NEXT: vpermi2d %zmm3, %zmm2, %zmm4
1886 ; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} ymm5 = [0,7,14,21,28,0,0,0]
1887 ; AVX512DQ-BW-NEXT: vpermi2d %zmm1, %zmm0, %zmm5
1888 ; AVX512DQ-BW-NEXT: vpblendd {{.*#+}} ymm4 = ymm5[0,1,2,3,4],ymm4[5,6,7]
1889 ; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} ymm5 = [0,0,0,0,0,4,11,18]
1890 ; AVX512DQ-BW-NEXT: vpermi2d %zmm3, %zmm2, %zmm5
1891 ; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} ymm6 = [1,8,15,22,29,0,0,0]
1892 ; AVX512DQ-BW-NEXT: vpermi2d %zmm1, %zmm0, %zmm6
1893 ; AVX512DQ-BW-NEXT: vpblendd {{.*#+}} ymm5 = ymm6[0,1,2,3,4],ymm5[5,6,7]
1894 ; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} ymm6 = [0,0,0,0,0,5,12,19]
1895 ; AVX512DQ-BW-NEXT: vpermi2d %zmm3, %zmm2, %zmm6
1896 ; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} ymm7 = [18,25,0,7,14,0,0,0]
1897 ; AVX512DQ-BW-NEXT: vpermi2d %zmm0, %zmm1, %zmm7
1898 ; AVX512DQ-BW-NEXT: vpblendd {{.*#+}} ymm6 = ymm7[0,1,2,3,4],ymm6[5,6,7]
1899 ; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} ymm7 = [0,0,0,0,0,6,13,20]
1900 ; AVX512DQ-BW-NEXT: vpermi2d %zmm3, %zmm2, %zmm7
1901 ; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} ymm8 = [19,26,1,8,15,0,0,0]
1902 ; AVX512DQ-BW-NEXT: vpermi2d %zmm0, %zmm1, %zmm8
1903 ; AVX512DQ-BW-NEXT: vpblendd {{.*#+}} ymm7 = ymm8[0,1,2,3,4],ymm7[5,6,7]
1904 ; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} ymm8 = [0,0,0,0,0,7,14,21]
1905 ; AVX512DQ-BW-NEXT: vpermi2d %zmm3, %zmm2, %zmm8
1906 ; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} xmm9 = [4,11,18,25]
1907 ; AVX512DQ-BW-NEXT: vpermi2d %zmm1, %zmm0, %zmm9
1908 ; AVX512DQ-BW-NEXT: vpblendd {{.*#+}} ymm8 = ymm9[0,1,2,3],ymm8[4,5,6,7]
1909 ; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} ymm9 = [0,0,0,0,1,8,15,22]
1910 ; AVX512DQ-BW-NEXT: vpermi2d %zmm3, %zmm2, %zmm9
1911 ; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} xmm10 = [5,12,19,26]
1912 ; AVX512DQ-BW-NEXT: vpermi2d %zmm1, %zmm0, %zmm10
1913 ; AVX512DQ-BW-NEXT: vpblendd {{.*#+}} ymm9 = ymm10[0,1,2,3],ymm9[4,5,6,7]
1914 ; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} ymm10 = [0,0,0,0,2,9,16,23]
1915 ; AVX512DQ-BW-NEXT: vpermi2d %zmm3, %zmm2, %zmm10
1916 ; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} xmm2 = [6,13,20,27]
1917 ; AVX512DQ-BW-NEXT: vpermi2d %zmm1, %zmm0, %zmm2
1918 ; AVX512DQ-BW-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm10[4,5,6,7]
1919 ; AVX512DQ-BW-NEXT: vmovdqa %ymm4, (%rsi)
1920 ; AVX512DQ-BW-NEXT: vmovdqa %ymm5, (%rdx)
1921 ; AVX512DQ-BW-NEXT: vmovdqa %ymm6, (%rcx)
1922 ; AVX512DQ-BW-NEXT: vmovdqa %ymm7, (%r8)
1923 ; AVX512DQ-BW-NEXT: vmovdqa %ymm8, (%r9)
1924 ; AVX512DQ-BW-NEXT: vmovdqa %ymm9, (%r10)
1925 ; AVX512DQ-BW-NEXT: vmovdqa %ymm0, (%rax)
1926 ; AVX512DQ-BW-NEXT: vzeroupper
1927 ; AVX512DQ-BW-NEXT: retq
1929 ; AVX512DQ-BW-FCP-LABEL: load_i32_stride7_vf8:
1930 ; AVX512DQ-BW-FCP: # %bb.0:
1931 ; AVX512DQ-BW-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
1932 ; AVX512DQ-BW-FCP-NEXT: movq {{[0-9]+}}(%rsp), %r10
1933 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rdi), %zmm0
1934 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 64(%rdi), %zmm1
1935 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 128(%rdi), %zmm2
1936 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 192(%rdi), %zmm3
1937 ; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm4 = [0,0,0,0,0,3,10,17]
1938 ; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm3, %zmm2, %zmm4
1939 ; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm5 = [0,7,14,21,28,0,0,0]
1940 ; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm5
1941 ; AVX512DQ-BW-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm5[0,1,2,3,4],ymm4[5,6,7]
1942 ; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm5 = [0,0,0,0,0,4,11,18]
1943 ; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm3, %zmm2, %zmm5
1944 ; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm6 = [1,8,15,22,29,0,0,0]
1945 ; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm6
1946 ; AVX512DQ-BW-FCP-NEXT: vpblendd {{.*#+}} ymm5 = ymm6[0,1,2,3,4],ymm5[5,6,7]
1947 ; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm6 = [0,0,0,0,0,5,12,19]
1948 ; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm3, %zmm2, %zmm6
1949 ; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm7 = [18,25,0,7,14,0,0,0]
1950 ; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm0, %zmm1, %zmm7
1951 ; AVX512DQ-BW-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm7[0,1,2,3,4],ymm6[5,6,7]
1952 ; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm7 = [0,0,0,0,0,6,13,20]
1953 ; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm3, %zmm2, %zmm7
1954 ; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm8 = [19,26,1,8,15,0,0,0]
1955 ; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm0, %zmm1, %zmm8
1956 ; AVX512DQ-BW-FCP-NEXT: vpblendd {{.*#+}} ymm7 = ymm8[0,1,2,3,4],ymm7[5,6,7]
1957 ; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm8 = [0,0,0,0,0,7,14,21]
1958 ; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm3, %zmm2, %zmm8
1959 ; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm9 = [4,11,18,25]
1960 ; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm9
1961 ; AVX512DQ-BW-FCP-NEXT: vpblendd {{.*#+}} ymm8 = ymm9[0,1,2,3],ymm8[4,5,6,7]
1962 ; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm9 = [0,0,0,0,1,8,15,22]
1963 ; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm3, %zmm2, %zmm9
1964 ; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm10 = [5,12,19,26]
1965 ; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm10
1966 ; AVX512DQ-BW-FCP-NEXT: vpblendd {{.*#+}} ymm9 = ymm10[0,1,2,3],ymm9[4,5,6,7]
1967 ; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm10 = [0,0,0,0,2,9,16,23]
1968 ; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm3, %zmm2, %zmm10
1969 ; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm2 = [6,13,20,27]
1970 ; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm2
1971 ; AVX512DQ-BW-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm10[4,5,6,7]
1972 ; AVX512DQ-BW-FCP-NEXT: vmovdqa %ymm4, (%rsi)
1973 ; AVX512DQ-BW-FCP-NEXT: vmovdqa %ymm5, (%rdx)
1974 ; AVX512DQ-BW-FCP-NEXT: vmovdqa %ymm6, (%rcx)
1975 ; AVX512DQ-BW-FCP-NEXT: vmovdqa %ymm7, (%r8)
1976 ; AVX512DQ-BW-FCP-NEXT: vmovdqa %ymm8, (%r9)
1977 ; AVX512DQ-BW-FCP-NEXT: vmovdqa %ymm9, (%r10)
1978 ; AVX512DQ-BW-FCP-NEXT: vmovdqa %ymm0, (%rax)
1979 ; AVX512DQ-BW-FCP-NEXT: vzeroupper
1980 ; AVX512DQ-BW-FCP-NEXT: retq
1981 %wide.vec = load <56 x i32>, ptr %in.vec, align 64
1982 %strided.vec0 = shufflevector <56 x i32> %wide.vec, <56 x i32> poison, <8 x i32> <i32 0, i32 7, i32 14, i32 21, i32 28, i32 35, i32 42, i32 49>
1983 %strided.vec1 = shufflevector <56 x i32> %wide.vec, <56 x i32> poison, <8 x i32> <i32 1, i32 8, i32 15, i32 22, i32 29, i32 36, i32 43, i32 50>
1984 %strided.vec2 = shufflevector <56 x i32> %wide.vec, <56 x i32> poison, <8 x i32> <i32 2, i32 9, i32 16, i32 23, i32 30, i32 37, i32 44, i32 51>
1985 %strided.vec3 = shufflevector <56 x i32> %wide.vec, <56 x i32> poison, <8 x i32> <i32 3, i32 10, i32 17, i32 24, i32 31, i32 38, i32 45, i32 52>
1986 %strided.vec4 = shufflevector <56 x i32> %wide.vec, <56 x i32> poison, <8 x i32> <i32 4, i32 11, i32 18, i32 25, i32 32, i32 39, i32 46, i32 53>
1987 %strided.vec5 = shufflevector <56 x i32> %wide.vec, <56 x i32> poison, <8 x i32> <i32 5, i32 12, i32 19, i32 26, i32 33, i32 40, i32 47, i32 54>
1988 %strided.vec6 = shufflevector <56 x i32> %wide.vec, <56 x i32> poison, <8 x i32> <i32 6, i32 13, i32 20, i32 27, i32 34, i32 41, i32 48, i32 55>
1989 store <8 x i32> %strided.vec0, ptr %out.vec0, align 64
1990 store <8 x i32> %strided.vec1, ptr %out.vec1, align 64
1991 store <8 x i32> %strided.vec2, ptr %out.vec2, align 64
1992 store <8 x i32> %strided.vec3, ptr %out.vec3, align 64
1993 store <8 x i32> %strided.vec4, ptr %out.vec4, align 64
1994 store <8 x i32> %strided.vec5, ptr %out.vec5, align 64
1995 store <8 x i32> %strided.vec6, ptr %out.vec6, align 64
1999 define void @load_i32_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2, ptr %out.vec3, ptr %out.vec4, ptr %out.vec5, ptr %out.vec6) nounwind {
2000 ; SSE-LABEL: load_i32_stride7_vf16:
2002 ; SSE-NEXT: subq $440, %rsp # imm = 0x1B8
2003 ; SSE-NEXT: movdqa 304(%rdi), %xmm3
2004 ; SSE-NEXT: movdqa 272(%rdi), %xmm5
2005 ; SSE-NEXT: movdqa 224(%rdi), %xmm15
2006 ; SSE-NEXT: movdqa 240(%rdi), %xmm6
2007 ; SSE-NEXT: movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2008 ; SSE-NEXT: movdqa 80(%rdi), %xmm7
2009 ; SSE-NEXT: movdqa (%rdi), %xmm2
2010 ; SSE-NEXT: movdqa 16(%rdi), %xmm8
2011 ; SSE-NEXT: movdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2012 ; SSE-NEXT: movdqa 48(%rdi), %xmm9
2013 ; SSE-NEXT: movdqa 192(%rdi), %xmm14
2014 ; SSE-NEXT: movdqa 160(%rdi), %xmm12
2015 ; SSE-NEXT: movdqa 112(%rdi), %xmm4
2016 ; SSE-NEXT: movdqa 128(%rdi), %xmm0
2017 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2018 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,3,3,3]
2019 ; SSE-NEXT: movdqa %xmm4, %xmm1
2020 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
2021 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm12[2,2,3,3]
2022 ; SSE-NEXT: movdqa %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2023 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm14[0],xmm0[1],xmm14[1]
2024 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
2025 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2026 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm8[3,3,3,3]
2027 ; SSE-NEXT: movdqa %xmm2, %xmm1
2028 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
2029 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm9[2,2,3,3]
2030 ; SSE-NEXT: movdqa %xmm9, %xmm11
2031 ; SSE-NEXT: movdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2032 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm7[0],xmm0[1],xmm7[1]
2033 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
2034 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2035 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm6[3,3,3,3]
2036 ; SSE-NEXT: movdqa %xmm15, %xmm1
2037 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
2038 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm5[2,2,3,3]
2039 ; SSE-NEXT: movdqa %xmm5, %xmm9
2040 ; SSE-NEXT: movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2041 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
2042 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
2043 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2044 ; SSE-NEXT: movdqa 336(%rdi), %xmm1
2045 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2046 ; SSE-NEXT: movdqa 352(%rdi), %xmm0
2047 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2048 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,3,3,3]
2049 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
2050 ; SSE-NEXT: movdqa 416(%rdi), %xmm8
2051 ; SSE-NEXT: movdqa 384(%rdi), %xmm13
2052 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm13[2,2,3,3]
2053 ; SSE-NEXT: movdqa %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2054 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm8[0],xmm0[1],xmm8[1]
2055 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
2056 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2057 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm14[2,2,2,2]
2058 ; SSE-NEXT: movdqa %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2059 ; SSE-NEXT: movdqa %xmm12, %xmm1
2060 ; SSE-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm0[2],xmm1[3],xmm0[3]
2061 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm4[1,1,1,1]
2062 ; SSE-NEXT: movdqa %xmm4, %xmm5
2063 ; SSE-NEXT: movdqa 144(%rdi), %xmm4
2064 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
2065 ; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2066 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
2067 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2068 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm7[2,2,2,2]
2069 ; SSE-NEXT: movdqa %xmm7, %xmm12
2070 ; SSE-NEXT: movdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2071 ; SSE-NEXT: movdqa %xmm11, %xmm1
2072 ; SSE-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm0[2],xmm1[3],xmm0[3]
2073 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,1,1]
2074 ; SSE-NEXT: movdqa %xmm2, %xmm10
2075 ; SSE-NEXT: movdqa 32(%rdi), %xmm7
2076 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm7[0],xmm0[1],xmm7[1]
2077 ; SSE-NEXT: movdqa %xmm7, (%rsp) # 16-byte Spill
2078 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
2079 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2080 ; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2081 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm3[2,2,2,2]
2082 ; SSE-NEXT: movdqa %xmm9, %xmm1
2083 ; SSE-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm0[2],xmm1[3],xmm0[3]
2084 ; SSE-NEXT: movdqa %xmm15, %xmm11
2085 ; SSE-NEXT: movdqa %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2086 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm15[1,1,1,1]
2087 ; SSE-NEXT: movdqa 256(%rdi), %xmm15
2088 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm15[0],xmm0[1],xmm15[1]
2089 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
2090 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2091 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm8[2,2,2,2]
2092 ; SSE-NEXT: movdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2093 ; SSE-NEXT: movdqa %xmm13, %xmm1
2094 ; SSE-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm0[2],xmm1[3],xmm0[3]
2095 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
2096 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm13[1,1,1,1]
2097 ; SSE-NEXT: movdqa 368(%rdi), %xmm2
2098 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
2099 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
2100 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2101 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm5[2,3,2,3]
2102 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm4[1,1,1,1]
2103 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
2104 ; SSE-NEXT: movdqa 176(%rdi), %xmm4
2105 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm14[2,3,2,3]
2106 ; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm4[0,0,1,1]
2107 ; SSE-NEXT: punpckldq {{.*#+}} xmm9 = xmm9[0],xmm1[0],xmm9[1],xmm1[1]
2108 ; SSE-NEXT: movsd {{.*#+}} xmm9 = xmm0[0],xmm9[1]
2109 ; SSE-NEXT: movapd %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2110 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm10[2,3,2,3]
2111 ; SSE-NEXT: movdqa %xmm10, %xmm14
2112 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm7[1,1,1,1]
2113 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
2114 ; SSE-NEXT: movdqa 64(%rdi), %xmm9
2115 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm12[2,3,2,3]
2116 ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm9[0,0,1,1]
2117 ; SSE-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm1[0],xmm6[1],xmm1[1]
2118 ; SSE-NEXT: movsd {{.*#+}} xmm6 = xmm0[0],xmm6[1]
2119 ; SSE-NEXT: movapd %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2120 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm11[2,3,2,3]
2121 ; SSE-NEXT: movdqa %xmm15, %xmm12
2122 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm15[1,1,1,1]
2123 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
2124 ; SSE-NEXT: movdqa 288(%rdi), %xmm15
2125 ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm3[2,3,2,3]
2126 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm15[0,0,1,1]
2127 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm6[0],xmm1[1],xmm6[1]
2128 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
2129 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2130 ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm13[2,3,2,3]
2131 ; SSE-NEXT: movdqa %xmm13, %xmm1
2132 ; SSE-NEXT: movdqa %xmm2, %xmm11
2133 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,1,1]
2134 ; SSE-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm0[0],xmm6[1],xmm0[1]
2135 ; SSE-NEXT: movdqa 400(%rdi), %xmm13
2136 ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm8[2,3,2,3]
2137 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm13[0,0,1,1]
2138 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm7[0],xmm0[1],xmm7[1]
2139 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm6[0],xmm0[1]
2140 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2141 ; SSE-NEXT: movdqa 208(%rdi), %xmm10
2142 ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm10[0,0,1,1]
2143 ; SSE-NEXT: movdqa %xmm4, %xmm2
2144 ; SSE-NEXT: movdqa %xmm4, %xmm0
2145 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1]
2146 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm5[2,2,3,3]
2147 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
2148 ; SSE-NEXT: punpckhdq {{.*#+}} xmm4 = xmm4[2],xmm5[2],xmm4[3],xmm5[3]
2149 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm4[0],xmm0[1]
2150 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2151 ; SSE-NEXT: movdqa 96(%rdi), %xmm0
2152 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2153 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm0[0,0,1,1]
2154 ; SSE-NEXT: movdqa %xmm9, %xmm3
2155 ; SSE-NEXT: movdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2156 ; SSE-NEXT: movdqa %xmm9, %xmm0
2157 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
2158 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm14[2,2,3,3]
2159 ; SSE-NEXT: movdqa (%rsp), %xmm6 # 16-byte Reload
2160 ; SSE-NEXT: punpckhdq {{.*#+}} xmm4 = xmm4[2],xmm6[2],xmm4[3],xmm6[3]
2161 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm4[0],xmm0[1]
2162 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2163 ; SSE-NEXT: movdqa 320(%rdi), %xmm7
2164 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm7[0,0,1,1]
2165 ; SSE-NEXT: movdqa %xmm15, %xmm0
2166 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
2167 ; SSE-NEXT: pshufd $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
2168 ; SSE-NEXT: # xmm4 = mem[2,2,3,3]
2169 ; SSE-NEXT: punpckhdq {{.*#+}} xmm4 = xmm4[2],xmm12[2],xmm4[3],xmm12[3]
2170 ; SSE-NEXT: movdqa %xmm12, %xmm14
2171 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm4[0],xmm0[1]
2172 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2173 ; SSE-NEXT: movdqa 432(%rdi), %xmm0
2174 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2175 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
2176 ; SSE-NEXT: movdqa %xmm13, %xmm4
2177 ; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1]
2178 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,2,3,3]
2179 ; SSE-NEXT: movdqa %xmm11, %xmm9
2180 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm11[2],xmm0[3],xmm11[3]
2181 ; SSE-NEXT: movsd {{.*#+}} xmm4 = xmm0[0],xmm4[1]
2182 ; SSE-NEXT: movapd %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2183 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm5[3,3,3,3]
2184 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
2185 ; SSE-NEXT: movdqa %xmm8, %xmm5
2186 ; SSE-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1]
2187 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,2,3,3]
2188 ; SSE-NEXT: movdqa %xmm2, %xmm1
2189 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm10[0],xmm0[1],xmm10[1]
2190 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm5[0],xmm0[1]
2191 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2192 ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm6[3,3,3,3]
2193 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
2194 ; SSE-NEXT: movdqa %xmm4, %xmm0
2195 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1]
2196 ; SSE-NEXT: pshufd {{.*#+}} xmm12 = xmm3[2,2,3,3]
2197 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
2198 ; SSE-NEXT: punpckldq {{.*#+}} xmm12 = xmm12[0],xmm2[0],xmm12[1],xmm2[1]
2199 ; SSE-NEXT: movsd {{.*#+}} xmm12 = xmm0[0],xmm12[1]
2200 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm14[3,3,3,3]
2201 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
2202 ; SSE-NEXT: movdqa %xmm6, %xmm5
2203 ; SSE-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1]
2204 ; SSE-NEXT: pshufd {{.*#+}} xmm14 = xmm15[2,2,3,3]
2205 ; SSE-NEXT: punpckldq {{.*#+}} xmm14 = xmm14[0],xmm7[0],xmm14[1],xmm7[1]
2206 ; SSE-NEXT: movdqa %xmm7, %xmm11
2207 ; SSE-NEXT: movdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2208 ; SSE-NEXT: movsd {{.*#+}} xmm14 = xmm5[0],xmm14[1]
2209 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm9[3,3,3,3]
2210 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
2211 ; SSE-NEXT: movdqa %xmm7, %xmm5
2212 ; SSE-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1]
2213 ; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm13[2,2,3,3]
2214 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
2215 ; SSE-NEXT: punpckldq {{.*#+}} xmm9 = xmm9[0],xmm3[0],xmm9[1],xmm3[1]
2216 ; SSE-NEXT: movsd {{.*#+}} xmm9 = xmm5[0],xmm9[1]
2217 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm10[2,2,2,2]
2218 ; SSE-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm0[2],xmm1[3],xmm0[3]
2219 ; SSE-NEXT: movdqa %xmm8, %xmm5
2220 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm8[1,1,1,1]
2221 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
2222 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm8[0],xmm0[1],xmm8[1]
2223 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
2224 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2225 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,2,2,2]
2226 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
2227 ; SSE-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm0[2],xmm1[3],xmm0[3]
2228 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm4[1,1,1,1]
2229 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
2230 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
2231 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
2232 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2233 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm11[2,2,2,2]
2234 ; SSE-NEXT: punpckhdq {{.*#+}} xmm15 = xmm15[2],xmm0[2],xmm15[3],xmm0[3]
2235 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm6[1,1,1,1]
2236 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
2237 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
2238 ; SSE-NEXT: movsd {{.*#+}} xmm15 = xmm0[0],xmm15[1]
2239 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm3[2,2,2,2]
2240 ; SSE-NEXT: movdqa %xmm3, %xmm11
2241 ; SSE-NEXT: punpckhdq {{.*#+}} xmm13 = xmm13[2],xmm0[2],xmm13[3],xmm0[3]
2242 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm7[1,1,1,1]
2243 ; SSE-NEXT: movdqa %xmm7, %xmm2
2244 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
2245 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
2246 ; SSE-NEXT: movsd {{.*#+}} xmm13 = xmm0[0],xmm13[1]
2247 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm8[1,1,1,1]
2248 ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[2,3,2,3]
2249 ; SSE-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1]
2250 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm10[2,3,2,3]
2251 ; SSE-NEXT: pshufd $80, {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Folded Reload
2252 ; SSE-NEXT: # xmm10 = mem[0,0,1,1]
2253 ; SSE-NEXT: punpckldq {{.*#+}} xmm10 = xmm10[0],xmm0[0],xmm10[1],xmm0[1]
2254 ; SSE-NEXT: movsd {{.*#+}} xmm10 = xmm5[0],xmm10[1]
2255 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm4[1,1,1,1]
2256 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Folded Reload
2257 ; SSE-NEXT: # xmm5 = mem[2,3,2,3]
2258 ; SSE-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1]
2259 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
2260 ; SSE-NEXT: # xmm0 = mem[2,3,2,3]
2261 ; SSE-NEXT: pshufd $80, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload
2262 ; SSE-NEXT: # xmm7 = mem[0,0,1,1]
2263 ; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm0[0],xmm7[1],xmm0[1]
2264 ; SSE-NEXT: movsd {{.*#+}} xmm7 = xmm5[0],xmm7[1]
2265 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1]
2266 ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm6[2,3,2,3]
2267 ; SSE-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1]
2268 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
2269 ; SSE-NEXT: # xmm0 = mem[2,3,2,3]
2270 ; SSE-NEXT: pshufd $80, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
2271 ; SSE-NEXT: # xmm6 = mem[0,0,1,1]
2272 ; SSE-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm0[0],xmm6[1],xmm0[1]
2273 ; SSE-NEXT: movsd {{.*#+}} xmm6 = xmm5[0],xmm6[1]
2274 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,1,1,1]
2275 ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm2[2,3,2,3]
2276 ; SSE-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1]
2277 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm11[2,3,2,3]
2278 ; SSE-NEXT: pshufd $80, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
2279 ; SSE-NEXT: # xmm4 = mem[0,0,1,1]
2280 ; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1]
2281 ; SSE-NEXT: movsd {{.*#+}} xmm4 = xmm5[0],xmm4[1]
2282 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2283 ; SSE-NEXT: movaps %xmm0, 48(%rsi)
2284 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2285 ; SSE-NEXT: movaps %xmm0, 32(%rsi)
2286 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2287 ; SSE-NEXT: movaps %xmm0, (%rsi)
2288 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2289 ; SSE-NEXT: movaps %xmm0, 16(%rsi)
2290 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2291 ; SSE-NEXT: movaps %xmm0, 48(%rdx)
2292 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2293 ; SSE-NEXT: movaps %xmm0, 32(%rdx)
2294 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2295 ; SSE-NEXT: movaps %xmm0, (%rdx)
2296 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2297 ; SSE-NEXT: movaps %xmm0, 16(%rdx)
2298 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2299 ; SSE-NEXT: movaps %xmm0, 48(%rcx)
2300 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2301 ; SSE-NEXT: movaps %xmm0, 32(%rcx)
2302 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2303 ; SSE-NEXT: movaps %xmm0, (%rcx)
2304 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2305 ; SSE-NEXT: movaps %xmm0, 16(%rcx)
2306 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2307 ; SSE-NEXT: movaps %xmm0, 48(%r8)
2308 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2309 ; SSE-NEXT: movaps %xmm0, 32(%r8)
2310 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2311 ; SSE-NEXT: movaps %xmm0, (%r8)
2312 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2313 ; SSE-NEXT: movaps %xmm0, 16(%r8)
2314 ; SSE-NEXT: movapd %xmm9, 48(%r9)
2315 ; SSE-NEXT: movapd %xmm14, 32(%r9)
2316 ; SSE-NEXT: movapd %xmm12, (%r9)
2317 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2318 ; SSE-NEXT: movaps %xmm0, 16(%r9)
2319 ; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax
2320 ; SSE-NEXT: movapd %xmm13, 48(%rax)
2321 ; SSE-NEXT: movapd %xmm15, 32(%rax)
2322 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2323 ; SSE-NEXT: movaps %xmm0, (%rax)
2324 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2325 ; SSE-NEXT: movaps %xmm0, 16(%rax)
2326 ; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax
2327 ; SSE-NEXT: movapd %xmm4, 48(%rax)
2328 ; SSE-NEXT: movapd %xmm6, 32(%rax)
2329 ; SSE-NEXT: movapd %xmm7, (%rax)
2330 ; SSE-NEXT: movapd %xmm10, 16(%rax)
2331 ; SSE-NEXT: addq $440, %rsp # imm = 0x1B8
2334 ; AVX-LABEL: load_i32_stride7_vf16:
2336 ; AVX-NEXT: subq $456, %rsp # imm = 0x1C8
2337 ; AVX-NEXT: vmovaps 256(%rdi), %ymm4
2338 ; AVX-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2339 ; AVX-NEXT: vmovaps 224(%rdi), %ymm5
2340 ; AVX-NEXT: vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2341 ; AVX-NEXT: vmovaps 320(%rdi), %ymm15
2342 ; AVX-NEXT: vmovaps 32(%rdi), %ymm2
2343 ; AVX-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2344 ; AVX-NEXT: vmovaps (%rdi), %ymm1
2345 ; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2346 ; AVX-NEXT: vmovaps 96(%rdi), %ymm7
2347 ; AVX-NEXT: vmovaps 80(%rdi), %xmm0
2348 ; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2349 ; AVX-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm7[0],ymm0[0],ymm7[2],ymm0[2]
2350 ; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm2[6],ymm1[7]
2351 ; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
2352 ; AVX-NEXT: vmovaps (%rdi), %xmm13
2353 ; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm13[0,1],xmm1[2,3]
2354 ; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,3,2,3]
2355 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
2356 ; AVX-NEXT: vmovaps 160(%rdi), %xmm2
2357 ; AVX-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2358 ; AVX-NEXT: vmovaps 128(%rdi), %xmm1
2359 ; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2360 ; AVX-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm1[1],xmm2[1]
2361 ; AVX-NEXT: vmovaps 192(%rdi), %xmm12
2362 ; AVX-NEXT: vinsertps {{.*#+}} xmm1 = zero,xmm1[1,2],xmm12[1]
2363 ; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
2364 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
2365 ; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2366 ; AVX-NEXT: vmovaps 304(%rdi), %xmm0
2367 ; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2368 ; AVX-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm15[0],ymm0[0],ymm15[2],ymm0[2]
2369 ; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm5[0,1,2,3,4,5],ymm4[6],ymm5[7]
2370 ; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
2371 ; AVX-NEXT: vmovaps 224(%rdi), %xmm10
2372 ; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm10[0,1],xmm1[2,3]
2373 ; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,3,2,3]
2374 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
2375 ; AVX-NEXT: vmovaps 384(%rdi), %xmm2
2376 ; AVX-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2377 ; AVX-NEXT: vmovaps 352(%rdi), %xmm1
2378 ; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2379 ; AVX-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm1[1],xmm2[1]
2380 ; AVX-NEXT: vmovaps 416(%rdi), %xmm8
2381 ; AVX-NEXT: vinsertps {{.*#+}} xmm1 = zero,xmm1[1,2],xmm8[1]
2382 ; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
2383 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
2384 ; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2385 ; AVX-NEXT: vmovaps 64(%rdi), %ymm5
2386 ; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm7[1,1],ymm5[2,2],ymm7[5,5],ymm5[6,6]
2387 ; AVX-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
2388 ; AVX-NEXT: vmovaps 32(%rdi), %xmm11
2389 ; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm11[0],xmm13[1],xmm11[2,3]
2390 ; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,0],mem[3,3]
2391 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
2392 ; AVX-NEXT: vmovaps 160(%rdi), %ymm6
2393 ; AVX-NEXT: vmovaps 128(%rdi), %ymm1
2394 ; AVX-NEXT: vperm2f128 {{.*#+}} ymm3 = ymm1[2,3],ymm6[0,1]
2395 ; AVX-NEXT: vshufps {{.*#+}} ymm3 = ymm1[0,0],ymm3[3,3],ymm1[4,4],ymm3[7,7]
2396 ; AVX-NEXT: vextractf128 $1, %ymm3, %xmm3
2397 ; AVX-NEXT: vinsertps {{.*#+}} xmm3 = zero,xmm3[1,2],xmm12[2]
2398 ; AVX-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm3
2399 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm3[5,6,7]
2400 ; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2401 ; AVX-NEXT: vmovaps 288(%rdi), %ymm3
2402 ; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm15[1,1],ymm3[2,2],ymm15[5,5],ymm3[6,6]
2403 ; AVX-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
2404 ; AVX-NEXT: vmovaps 256(%rdi), %xmm9
2405 ; AVX-NEXT: vblendps {{.*#+}} xmm4 = xmm9[0],xmm10[1],xmm9[2,3]
2406 ; AVX-NEXT: vshufps {{.*#+}} xmm4 = xmm4[1,0],mem[3,3]
2407 ; AVX-NEXT: vblendps {{.*#+}} ymm2 = ymm4[0,1,2],ymm0[3,4,5,6,7]
2408 ; AVX-NEXT: vmovaps 384(%rdi), %ymm4
2409 ; AVX-NEXT: vmovaps 352(%rdi), %ymm0
2410 ; AVX-NEXT: vperm2f128 {{.*#+}} ymm14 = ymm0[2,3],ymm4[0,1]
2411 ; AVX-NEXT: vshufps {{.*#+}} ymm14 = ymm0[0,0],ymm14[3,3],ymm0[4,4],ymm14[7,7]
2412 ; AVX-NEXT: vextractf128 $1, %ymm14, %xmm14
2413 ; AVX-NEXT: vinsertps {{.*#+}} xmm14 = zero,xmm14[1,2],xmm8[2]
2414 ; AVX-NEXT: vinsertf128 $1, %xmm14, %ymm0, %ymm14
2415 ; AVX-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm14[5,6,7]
2416 ; AVX-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2417 ; AVX-NEXT: vshufps {{.*#+}} xmm2 = xmm13[2,3,2,3]
2418 ; AVX-NEXT: vblendps {{.*#+}} xmm2 = xmm2[0],xmm11[1],xmm2[2,3]
2419 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
2420 ; AVX-NEXT: vshufps {{.*#+}} ymm14 = ymm14[3,1],ymm5[0,3],ymm14[7,5],ymm5[4,7]
2421 ; AVX-NEXT: vshufps {{.*#+}} ymm14 = ymm7[2,1],ymm14[2,0],ymm7[6,5],ymm14[6,4]
2422 ; AVX-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm14[2,3,4,5,6,7]
2423 ; AVX-NEXT: vunpcklpd {{.*#+}} ymm14 = ymm1[0],ymm6[0],ymm1[2],ymm6[2]
2424 ; AVX-NEXT: vextractf128 $1, %ymm14, %xmm14
2425 ; AVX-NEXT: vblendps {{.*#+}} xmm12 = xmm14[0,1,2],xmm12[3]
2426 ; AVX-NEXT: vinsertf128 $1, %xmm12, %ymm0, %ymm12
2427 ; AVX-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm12[5,6,7]
2428 ; AVX-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2429 ; AVX-NEXT: vshufps {{.*#+}} xmm2 = xmm10[2,3,2,3]
2430 ; AVX-NEXT: vblendps {{.*#+}} xmm2 = xmm2[0],xmm9[1],xmm2[2,3]
2431 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
2432 ; AVX-NEXT: vshufps {{.*#+}} ymm12 = ymm12[3,1],ymm3[0,3],ymm12[7,5],ymm3[4,7]
2433 ; AVX-NEXT: vshufps {{.*#+}} ymm12 = ymm15[2,1],ymm12[2,0],ymm15[6,5],ymm12[6,4]
2434 ; AVX-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm12[2,3,4,5,6,7]
2435 ; AVX-NEXT: vunpcklpd {{.*#+}} ymm12 = ymm0[0],ymm4[0],ymm0[2],ymm4[2]
2436 ; AVX-NEXT: vextractf128 $1, %ymm12, %xmm12
2437 ; AVX-NEXT: vblendps {{.*#+}} xmm8 = xmm12[0,1,2],xmm8[3]
2438 ; AVX-NEXT: vinsertf128 $1, %xmm8, %ymm0, %ymm8
2439 ; AVX-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm8[5,6,7]
2440 ; AVX-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2441 ; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm5[1,0],ymm7[0,0],ymm5[5,4],ymm7[4,4]
2442 ; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm7[3,1],ymm2[0,2],ymm7[7,5],ymm2[4,6]
2443 ; AVX-NEXT: vblendps {{.*#+}} xmm5 = xmm11[0,1,2],xmm13[3]
2444 ; AVX-NEXT: vshufps {{.*#+}} xmm5 = xmm5[3,2,2,3]
2445 ; AVX-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1],ymm2[2,3,4,5,6,7]
2446 ; AVX-NEXT: vmovaps 192(%rdi), %ymm2
2447 ; AVX-NEXT: vshufps {{.*#+}} ymm7 = ymm2[0,1],ymm6[1,3],ymm2[4,5],ymm6[5,7]
2448 ; AVX-NEXT: vshufps {{.*#+}} ymm7 = ymm1[0,2],ymm7[2,0],ymm1[4,6],ymm7[6,4]
2449 ; AVX-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3,4],ymm7[5,6,7]
2450 ; AVX-NEXT: vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2451 ; AVX-NEXT: vshufps {{.*#+}} ymm3 = ymm3[1,0],ymm15[0,0],ymm3[5,4],ymm15[4,4]
2452 ; AVX-NEXT: vshufps {{.*#+}} ymm3 = ymm15[3,1],ymm3[0,2],ymm15[7,5],ymm3[4,6]
2453 ; AVX-NEXT: vblendps {{.*#+}} xmm5 = xmm9[0,1,2],xmm10[3]
2454 ; AVX-NEXT: vshufps {{.*#+}} xmm5 = xmm5[3,2,2,3]
2455 ; AVX-NEXT: vblendps {{.*#+}} ymm3 = ymm5[0,1],ymm3[2,3,4,5,6,7]
2456 ; AVX-NEXT: vmovaps 416(%rdi), %ymm5
2457 ; AVX-NEXT: vshufps {{.*#+}} ymm8 = ymm5[0,1],ymm4[1,3],ymm5[4,5],ymm4[5,7]
2458 ; AVX-NEXT: vshufps {{.*#+}} ymm8 = ymm0[0,2],ymm8[2,0],ymm0[4,6],ymm8[6,4]
2459 ; AVX-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3,4],ymm8[5,6,7]
2460 ; AVX-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2461 ; AVX-NEXT: vperm2f128 {{.*#+}} ymm3 = ymm1[2,3,0,1]
2462 ; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm1[3,0],ymm3[0,0],ymm1[7,4],ymm3[4,4]
2463 ; AVX-NEXT: vshufps {{.*#+}} ymm3 = ymm2[1,0],ymm6[2,0],ymm2[5,4],ymm6[6,4]
2464 ; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm1[2,0],ymm3[2,0],ymm1[6,4],ymm3[6,4]
2465 ; AVX-NEXT: vmovaps 64(%rdi), %xmm3
2466 ; AVX-NEXT: vmovaps 96(%rdi), %xmm10
2467 ; AVX-NEXT: vshufps {{.*#+}} xmm8 = xmm10[0,1,0,1]
2468 ; AVX-NEXT: vblendps {{.*#+}} xmm8 = xmm3[0,1,2],xmm8[3]
2469 ; AVX-NEXT: vshufps {{.*#+}} xmm11 = xmm11[2,3,2,3]
2470 ; AVX-NEXT: vblendps {{.*#+}} xmm11 = mem[0],xmm11[1],mem[2,3]
2471 ; AVX-NEXT: vblendps {{.*#+}} xmm8 = xmm11[0,1],xmm8[2,3]
2472 ; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm8[0,1,2,3],ymm1[4,5,6,7]
2473 ; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2474 ; AVX-NEXT: vperm2f128 {{.*#+}} ymm8 = ymm0[2,3,0,1]
2475 ; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm0[3,0],ymm8[0,0],ymm0[7,4],ymm8[4,4]
2476 ; AVX-NEXT: vshufps {{.*#+}} ymm8 = ymm5[1,0],ymm4[2,0],ymm5[5,4],ymm4[6,4]
2477 ; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0],ymm8[2,0],ymm0[6,4],ymm8[6,4]
2478 ; AVX-NEXT: vmovaps 320(%rdi), %xmm8
2479 ; AVX-NEXT: vshufps {{.*#+}} xmm11 = xmm8[0,1,0,1]
2480 ; AVX-NEXT: vmovaps 288(%rdi), %xmm12
2481 ; AVX-NEXT: vblendps {{.*#+}} xmm11 = xmm12[0,1,2],xmm11[3]
2482 ; AVX-NEXT: vshufps {{.*#+}} xmm9 = xmm9[2,3,2,3]
2483 ; AVX-NEXT: vblendps {{.*#+}} xmm9 = mem[0],xmm9[1],mem[2,3]
2484 ; AVX-NEXT: vblendps {{.*#+}} xmm9 = xmm9[0,1],xmm11[2,3]
2485 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm9[0,1,2,3],ymm0[4,5,6,7]
2486 ; AVX-NEXT: vmovups %ymm0, (%rsp) # 32-byte Spill
2487 ; AVX-NEXT: vshufps {{.*#+}} ymm6 = ymm2[2,1],ymm6[3,3],ymm2[6,5],ymm6[7,7]
2488 ; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
2489 ; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2490 ; AVX-NEXT: vblendps {{.*#+}} xmm9 = xmm1[0],xmm0[1],xmm1[2,3]
2491 ; AVX-NEXT: vinsertf128 $1, %xmm9, %ymm0, %ymm9
2492 ; AVX-NEXT: vshufps {{.*#+}} ymm6 = ymm9[1,0],ymm6[2,0],ymm9[5,4],ymm6[6,4]
2493 ; AVX-NEXT: vblendps {{.*#+}} xmm3 = xmm10[0,1,2],xmm3[3]
2494 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
2495 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
2496 ; AVX-NEXT: vshufps {{.*#+}} ymm9 = ymm14[0,0],ymm13[1,0],ymm14[4,4],ymm13[5,4]
2497 ; AVX-NEXT: vshufps {{.*#+}} ymm9 = ymm9[2,0,2,3,6,4,6,7]
2498 ; AVX-NEXT: vextractf128 $1, %ymm9, %xmm9
2499 ; AVX-NEXT: vshufps {{.*#+}} xmm3 = xmm9[0,1],xmm3[3,2]
2500 ; AVX-NEXT: vblendps {{.*#+}} ymm15 = ymm3[0,1,2,3],ymm6[4,5,6,7]
2501 ; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm5[2,1],ymm4[3,3],ymm5[6,5],ymm4[7,7]
2502 ; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
2503 ; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
2504 ; AVX-NEXT: vblendps {{.*#+}} xmm6 = xmm7[0],xmm3[1],xmm7[2,3]
2505 ; AVX-NEXT: vinsertf128 $1, %xmm6, %ymm0, %ymm6
2506 ; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm6[1,0],ymm4[2,0],ymm6[5,4],ymm4[6,4]
2507 ; AVX-NEXT: vblendps {{.*#+}} xmm6 = xmm8[0,1,2],xmm12[3]
2508 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
2509 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
2510 ; AVX-NEXT: vshufps {{.*#+}} ymm9 = ymm11[0,0],ymm12[1,0],ymm11[4,4],ymm12[5,4]
2511 ; AVX-NEXT: vshufps {{.*#+}} ymm9 = ymm9[2,0,2,3,6,4,6,7]
2512 ; AVX-NEXT: vextractf128 $1, %ymm9, %xmm9
2513 ; AVX-NEXT: vshufps {{.*#+}} xmm6 = xmm9[0,1],xmm6[3,2]
2514 ; AVX-NEXT: vblendps {{.*#+}} ymm4 = ymm6[0,1,2,3],ymm4[4,5,6,7]
2515 ; AVX-NEXT: vperm2f128 {{.*#+}} ymm6 = ymm2[2,3,0,1]
2516 ; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm2[3,0],ymm6[0,0],ymm2[7,4],ymm6[4,4]
2517 ; AVX-NEXT: vshufps {{.*#+}} xmm6 = xmm0[2,3,2,3]
2518 ; AVX-NEXT: vblendps {{.*#+}} xmm6 = xmm6[0],xmm1[1],xmm6[2,3]
2519 ; AVX-NEXT: vinsertf128 $1, %xmm6, %ymm0, %ymm6
2520 ; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm6[0,1],ymm2[2,0],ymm6[4,5],ymm2[6,4]
2521 ; AVX-NEXT: vpermilps $68, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
2522 ; AVX-NEXT: # xmm6 = mem[0,1,0,1]
2523 ; AVX-NEXT: vblendps {{.*#+}} xmm6 = xmm6[0,1,2],xmm10[3]
2524 ; AVX-NEXT: vshufps {{.*#+}} ymm9 = ymm14[1,0],ymm13[2,0],ymm14[5,4],ymm13[6,4]
2525 ; AVX-NEXT: vshufps {{.*#+}} ymm9 = ymm9[2,0,2,3,6,4,6,7]
2526 ; AVX-NEXT: vextractf128 $1, %ymm9, %xmm9
2527 ; AVX-NEXT: vblendps {{.*#+}} xmm6 = xmm9[0,1],xmm6[2,3]
2528 ; AVX-NEXT: vblendps {{.*#+}} ymm2 = ymm6[0,1,2,3],ymm2[4,5,6,7]
2529 ; AVX-NEXT: vperm2f128 {{.*#+}} ymm6 = ymm5[2,3,0,1]
2530 ; AVX-NEXT: vshufps {{.*#+}} ymm5 = ymm5[3,0],ymm6[0,0],ymm5[7,4],ymm6[4,4]
2531 ; AVX-NEXT: vshufps {{.*#+}} xmm6 = xmm3[2,3,2,3]
2532 ; AVX-NEXT: vblendps {{.*#+}} xmm6 = xmm6[0],xmm7[1],xmm6[2,3]
2533 ; AVX-NEXT: vinsertf128 $1, %xmm6, %ymm0, %ymm6
2534 ; AVX-NEXT: vshufps {{.*#+}} ymm5 = ymm6[0,1],ymm5[2,0],ymm6[4,5],ymm5[6,4]
2535 ; AVX-NEXT: vpermilps $68, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
2536 ; AVX-NEXT: # xmm6 = mem[0,1,0,1]
2537 ; AVX-NEXT: vblendps {{.*#+}} xmm6 = xmm6[0,1,2],xmm8[3]
2538 ; AVX-NEXT: vshufps {{.*#+}} ymm8 = ymm11[1,0],ymm12[2,0],ymm11[5,4],ymm12[6,4]
2539 ; AVX-NEXT: vshufps {{.*#+}} ymm8 = ymm8[2,0,2,3,6,4,6,7]
2540 ; AVX-NEXT: vextractf128 $1, %ymm8, %xmm8
2541 ; AVX-NEXT: vblendps {{.*#+}} xmm6 = xmm8[0,1],xmm6[2,3]
2542 ; AVX-NEXT: vblendps {{.*#+}} ymm5 = ymm6[0,1,2,3],ymm5[4,5,6,7]
2543 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
2544 ; AVX-NEXT: vmovaps %ymm6, 32(%rsi)
2545 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
2546 ; AVX-NEXT: vmovaps %ymm6, (%rsi)
2547 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2548 ; AVX-NEXT: vmovaps %ymm0, 32(%rdx)
2549 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2550 ; AVX-NEXT: vmovaps %ymm0, (%rdx)
2551 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2552 ; AVX-NEXT: vmovaps %ymm0, 32(%rcx)
2553 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2554 ; AVX-NEXT: vmovaps %ymm0, (%rcx)
2555 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2556 ; AVX-NEXT: vmovaps %ymm0, 32(%r8)
2557 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2558 ; AVX-NEXT: vmovaps %ymm0, (%r8)
2559 ; AVX-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
2560 ; AVX-NEXT: vmovaps %ymm0, 32(%r9)
2561 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2562 ; AVX-NEXT: vmovaps %ymm0, (%r9)
2563 ; AVX-NEXT: movq {{[0-9]+}}(%rsp), %rax
2564 ; AVX-NEXT: vmovaps %ymm4, 32(%rax)
2565 ; AVX-NEXT: vmovaps %ymm15, (%rax)
2566 ; AVX-NEXT: movq {{[0-9]+}}(%rsp), %rax
2567 ; AVX-NEXT: vmovaps %ymm5, 32(%rax)
2568 ; AVX-NEXT: vmovaps %ymm2, (%rax)
2569 ; AVX-NEXT: addq $456, %rsp # imm = 0x1C8
2570 ; AVX-NEXT: vzeroupper
2573 ; AVX2-LABEL: load_i32_stride7_vf16:
2575 ; AVX2-NEXT: subq $264, %rsp # imm = 0x108
2576 ; AVX2-NEXT: vmovdqa 288(%rdi), %ymm5
2577 ; AVX2-NEXT: vmovdqa 384(%rdi), %ymm9
2578 ; AVX2-NEXT: vmovdqa 352(%rdi), %ymm7
2579 ; AVX2-NEXT: vmovdqa 320(%rdi), %ymm4
2580 ; AVX2-NEXT: vmovdqa 256(%rdi), %ymm0
2581 ; AVX2-NEXT: vmovdqa 224(%rdi), %ymm3
2582 ; AVX2-NEXT: vmovdqa (%rdi), %ymm10
2583 ; AVX2-NEXT: vmovdqa 32(%rdi), %ymm6
2584 ; AVX2-NEXT: vmovdqa 96(%rdi), %ymm15
2585 ; AVX2-NEXT: vpbroadcastq 80(%rdi), %ymm1
2586 ; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm15[4,5,6,7]
2587 ; AVX2-NEXT: vpmovsxbd {{.*#+}} xmm2 = [0,7,6,0]
2588 ; AVX2-NEXT: vpblendd {{.*#+}} ymm8 = ymm10[0,1,2,3,4,5],ymm6[6],ymm10[7]
2589 ; AVX2-NEXT: vpermd %ymm8, %ymm2, %ymm8
2590 ; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm8[0,1,2],ymm1[3,4,5,6,7]
2591 ; AVX2-NEXT: vmovdqa 128(%rdi), %xmm8
2592 ; AVX2-NEXT: vmovdqa 160(%rdi), %xmm11
2593 ; AVX2-NEXT: vmovdqa %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2594 ; AVX2-NEXT: vpunpckhqdq {{.*#+}} xmm8 = xmm8[1],xmm11[1]
2595 ; AVX2-NEXT: vinserti128 $1, %xmm8, %ymm0, %ymm8
2596 ; AVX2-NEXT: vpbroadcastd 196(%rdi), %ymm11
2597 ; AVX2-NEXT: vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3,4,5,6],ymm11[7]
2598 ; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm8[5,6,7]
2599 ; AVX2-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2600 ; AVX2-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2601 ; AVX2-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2602 ; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3,4,5],ymm0[6],ymm3[7]
2603 ; AVX2-NEXT: vpermd %ymm1, %ymm2, %ymm1
2604 ; AVX2-NEXT: vpbroadcastq 304(%rdi), %ymm2
2605 ; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5,6,7]
2606 ; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4,5,6,7]
2607 ; AVX2-NEXT: vmovdqa 352(%rdi), %xmm2
2608 ; AVX2-NEXT: vmovdqa 384(%rdi), %xmm8
2609 ; AVX2-NEXT: vmovdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2610 ; AVX2-NEXT: vpunpckhqdq {{.*#+}} xmm2 = xmm2[1],xmm8[1]
2611 ; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
2612 ; AVX2-NEXT: vpbroadcastd 420(%rdi), %ymm8
2613 ; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,6],ymm8[7]
2614 ; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5,6,7]
2615 ; AVX2-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2616 ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = mem[2,2,2,2]
2617 ; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
2618 ; AVX2-NEXT: vpalignr {{.*#+}} ymm2 = ymm9[12,13,14,15],ymm7[0,1,2,3,4,5,6,7,8,9,10,11],ymm9[28,29,30,31],ymm7[16,17,18,19,20,21,22,23,24,25,26,27]
2619 ; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,1,2,0]
2620 ; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3,4,5,6],ymm1[7]
2621 ; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm4[0,1],ymm5[2,3],ymm4[4,5],ymm5[6,7]
2622 ; AVX2-NEXT: vpblendd {{.*#+}} ymm8 = ymm0[0],ymm3[1],ymm0[2,3,4],ymm3[5],ymm0[6,7]
2623 ; AVX2-NEXT: vmovdqu %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2624 ; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm8[0,1,2,3,4],ymm2[5,6],ymm8[7]
2625 ; AVX2-NEXT: vpmovsxbd {{.*#+}} ymm12 = [1,0,7,6,5,6,5,6]
2626 ; AVX2-NEXT: vpermd %ymm2, %ymm12, %ymm2
2627 ; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3,4],ymm1[5,6,7]
2628 ; AVX2-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2629 ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = mem[2,2,2,2]
2630 ; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm8
2631 ; AVX2-NEXT: vmovdqa 160(%rdi), %ymm3
2632 ; AVX2-NEXT: vmovdqa 128(%rdi), %ymm2
2633 ; AVX2-NEXT: vpalignr {{.*#+}} ymm11 = ymm3[12,13,14,15],ymm2[0,1,2,3,4,5,6,7,8,9,10,11],ymm3[28,29,30,31],ymm2[16,17,18,19,20,21,22,23,24,25,26,27]
2634 ; AVX2-NEXT: vpermq {{.*#+}} ymm11 = ymm11[0,1,2,0]
2635 ; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm11[0,1,2,3,4,5,6],ymm8[7]
2636 ; AVX2-NEXT: vmovdqa 64(%rdi), %ymm11
2637 ; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm15[0,1],ymm11[2,3],ymm15[4,5],ymm11[6,7]
2638 ; AVX2-NEXT: vpblendd {{.*#+}} ymm8 = ymm6[0],ymm10[1],ymm6[2,3,4],ymm10[5],ymm6[6,7]
2639 ; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm8[0,1,2,3,4],ymm0[5,6],ymm8[7]
2640 ; AVX2-NEXT: vpermd %ymm0, %ymm12, %ymm0
2641 ; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
2642 ; AVX2-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2643 ; AVX2-NEXT: vmovdqa 80(%rdi), %xmm0
2644 ; AVX2-NEXT: vpalignr {{.*#+}} ymm1 = ymm15[8,9,10,11,12,13,14,15],ymm11[0,1,2,3,4,5,6,7],ymm15[24,25,26,27,28,29,30,31],ymm11[16,17,18,19,20,21,22,23]
2645 ; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5,6,7]
2646 ; AVX2-NEXT: vpbroadcastd 8(%rdi), %xmm1
2647 ; AVX2-NEXT: vmovdqa 32(%rdi), %xmm12
2648 ; AVX2-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm12[1],xmm1[2,3]
2649 ; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5,6,7]
2650 ; AVX2-NEXT: vpunpcklqdq {{.*#+}} ymm1 = ymm2[0],ymm3[0],ymm2[2],ymm3[2]
2651 ; AVX2-NEXT: vpbroadcastd 204(%rdi), %ymm14
2652 ; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm14[7]
2653 ; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
2654 ; AVX2-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2655 ; AVX2-NEXT: vmovdqa 304(%rdi), %xmm0
2656 ; AVX2-NEXT: vpalignr {{.*#+}} ymm1 = ymm4[8,9,10,11,12,13,14,15],ymm5[0,1,2,3,4,5,6,7],ymm4[24,25,26,27,28,29,30,31],ymm5[16,17,18,19,20,21,22,23]
2657 ; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5,6,7]
2658 ; AVX2-NEXT: vpbroadcastd 232(%rdi), %xmm1
2659 ; AVX2-NEXT: vmovdqa 256(%rdi), %xmm14
2660 ; AVX2-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm14[1],xmm1[2,3]
2661 ; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5,6,7]
2662 ; AVX2-NEXT: vpunpcklqdq {{.*#+}} ymm1 = ymm7[0],ymm9[0],ymm7[2],ymm9[2]
2663 ; AVX2-NEXT: vpbroadcastd 428(%rdi), %ymm13
2664 ; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm13[7]
2665 ; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
2666 ; AVX2-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2667 ; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm15[0],ymm11[1],ymm15[2,3,4,5,6,7]
2668 ; AVX2-NEXT: vpblendd {{.*#+}} xmm1 = xmm12[0,1,2],mem[3]
2669 ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[3,2,2,3]
2670 ; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[3,1,1,0,7,5,5,4]
2671 ; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5,6,7]
2672 ; AVX2-NEXT: vshufps {{.*#+}} ymm1 = ymm2[0,2],ymm3[1,3],ymm2[4,6],ymm3[5,7]
2673 ; AVX2-NEXT: vbroadcastss 208(%rdi), %ymm11
2674 ; AVX2-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm11[7]
2675 ; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
2676 ; AVX2-NEXT: vmovdqu %ymm0, (%rsp) # 32-byte Spill
2677 ; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm4[0],ymm5[1],ymm4[2,3,4,5,6,7]
2678 ; AVX2-NEXT: vpblendd {{.*#+}} xmm1 = xmm14[0,1,2],mem[3]
2679 ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[3,2,2,3]
2680 ; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[3,1,1,0,7,5,5,4]
2681 ; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5,6,7]
2682 ; AVX2-NEXT: vshufps {{.*#+}} ymm1 = ymm7[0,2],ymm9[1,3],ymm7[4,6],ymm9[5,7]
2683 ; AVX2-NEXT: vbroadcastss 432(%rdi), %ymm4
2684 ; AVX2-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm4[7]
2685 ; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
2686 ; AVX2-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2687 ; AVX2-NEXT: vpbroadcastd 100(%rdi), %xmm0
2688 ; AVX2-NEXT: vmovdqa 64(%rdi), %xmm1
2689 ; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3]
2690 ; AVX2-NEXT: vpmovsxbd {{.*#+}} xmm4 = [4,3,0,0]
2691 ; AVX2-NEXT: vpblendd {{.*#+}} ymm5 = ymm6[0,1,2,3],ymm10[4,5,6,7]
2692 ; AVX2-NEXT: vmovdqa %ymm6, %ymm15
2693 ; AVX2-NEXT: vpermd %ymm5, %ymm4, %ymm5
2694 ; AVX2-NEXT: vpblendd {{.*#+}} xmm5 = xmm5[0,1],xmm0[2,3]
2695 ; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm0 = [0,7,0,7,0,7,0,7]
2696 ; AVX2-NEXT: vpermd %ymm2, %ymm0, %ymm11
2697 ; AVX2-NEXT: vpblendd {{.*#+}} ymm11 = ymm11[0,1,2,3,4,5],ymm3[6,7]
2698 ; AVX2-NEXT: vpbroadcastd 212(%rdi), %ymm12
2699 ; AVX2-NEXT: vpblendd {{.*#+}} ymm11 = ymm11[0,1,2,3,4,5,6],ymm12[7]
2700 ; AVX2-NEXT: vpblendd {{.*#+}} ymm12 = ymm5[0,1,2,3],ymm11[4,5,6,7]
2701 ; AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
2702 ; AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
2703 ; AVX2-NEXT: vpblendd {{.*#+}} ymm5 = ymm14[0,1,2,3],ymm6[4,5,6,7]
2704 ; AVX2-NEXT: vpermd %ymm5, %ymm4, %ymm4
2705 ; AVX2-NEXT: vpbroadcastd 324(%rdi), %xmm5
2706 ; AVX2-NEXT: vmovdqa 288(%rdi), %xmm13
2707 ; AVX2-NEXT: vpblendd {{.*#+}} xmm5 = xmm13[0,1,2],xmm5[3]
2708 ; AVX2-NEXT: vpblendd {{.*#+}} xmm4 = xmm4[0,1],xmm5[2,3]
2709 ; AVX2-NEXT: vpermd %ymm7, %ymm0, %ymm5
2710 ; AVX2-NEXT: vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3,4,5],ymm9[6,7]
2711 ; AVX2-NEXT: vpbroadcastd 436(%rdi), %ymm11
2712 ; AVX2-NEXT: vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3,4,5,6],ymm11[7]
2713 ; AVX2-NEXT: vpblendd {{.*#+}} ymm11 = ymm4[0,1,2,3],ymm5[4,5,6,7]
2714 ; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0],ymm2[1],ymm3[2,3,4],ymm2[5],ymm3[6,7]
2715 ; AVX2-NEXT: vpshufd {{.*#+}} ymm2 = ymm2[1,0,3,3,5,4,7,7]
2716 ; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,1,0,3]
2717 ; AVX2-NEXT: vpbroadcastd 216(%rdi), %ymm3
2718 ; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,6],ymm3[7]
2719 ; AVX2-NEXT: vmovdqa 96(%rdi), %xmm3
2720 ; AVX2-NEXT: vpblendd {{.*#+}} xmm1 = xmm3[0,1,2],xmm1[3]
2721 ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,3,2]
2722 ; AVX2-NEXT: vpshufd {{.*#+}} ymm4 = ymm8[1,0,2,3,5,4,6,7]
2723 ; AVX2-NEXT: vextracti128 $1, %ymm4, %xmm4
2724 ; AVX2-NEXT: vpblendd {{.*#+}} xmm1 = xmm4[0,1],xmm1[2,3]
2725 ; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
2726 ; AVX2-NEXT: vmovdqa 320(%rdi), %xmm8
2727 ; AVX2-NEXT: vpblendd {{.*#+}} xmm2 = xmm8[0,1,2],xmm13[3]
2728 ; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,1,3,2]
2729 ; AVX2-NEXT: vpshufd $225, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Folded Reload
2730 ; AVX2-NEXT: # ymm4 = mem[1,0,2,3,5,4,6,7]
2731 ; AVX2-NEXT: vextracti128 $1, %ymm4, %xmm4
2732 ; AVX2-NEXT: vpblendd {{.*#+}} xmm2 = xmm4[0,1],xmm2[2,3]
2733 ; AVX2-NEXT: vpblendd {{.*#+}} ymm4 = ymm9[0],ymm7[1],ymm9[2,3,4],ymm7[5],ymm9[6,7]
2734 ; AVX2-NEXT: vpshufd {{.*#+}} ymm4 = ymm4[1,0,3,3,5,4,7,7]
2735 ; AVX2-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,1,0,3]
2736 ; AVX2-NEXT: vpbroadcastd 440(%rdi), %ymm5
2737 ; AVX2-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3,4,5,6],ymm5[7]
2738 ; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5,6,7]
2739 ; AVX2-NEXT: vpbroadcastd 136(%rdi), %xmm4
2740 ; AVX2-NEXT: vpblendd $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm4 # 16-byte Folded Reload
2741 ; AVX2-NEXT: # xmm4 = xmm4[0],mem[1],xmm4[2,3]
2742 ; AVX2-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4
2743 ; AVX2-NEXT: vpermd 192(%rdi), %ymm0, %ymm5
2744 ; AVX2-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3,4,5],ymm5[6,7]
2745 ; AVX2-NEXT: vpbroadcastd 80(%rdi), %ymm5
2746 ; AVX2-NEXT: vpblendd {{.*#+}} xmm3 = xmm5[0,1,2],xmm3[3]
2747 ; AVX2-NEXT: vpshufd {{.*#+}} ymm5 = ymm10[2,3,2,3,6,7,6,7]
2748 ; AVX2-NEXT: vpblendd {{.*#+}} ymm5 = ymm5[0],ymm15[1],ymm5[2,3,4],ymm15[5],ymm5[6,7]
2749 ; AVX2-NEXT: vextracti128 $1, %ymm5, %xmm5
2750 ; AVX2-NEXT: vpblendd {{.*#+}} xmm3 = xmm5[0,1],xmm3[2,3]
2751 ; AVX2-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm4[4,5,6,7]
2752 ; AVX2-NEXT: vpbroadcastd 360(%rdi), %xmm4
2753 ; AVX2-NEXT: vpblendd $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm4 # 16-byte Folded Reload
2754 ; AVX2-NEXT: # xmm4 = xmm4[0],mem[1],xmm4[2,3]
2755 ; AVX2-NEXT: vpermd 416(%rdi), %ymm0, %ymm0
2756 ; AVX2-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4
2757 ; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm4[0,1,2,3,4,5],ymm0[6,7]
2758 ; AVX2-NEXT: vpbroadcastd 304(%rdi), %ymm4
2759 ; AVX2-NEXT: vpblendd {{.*#+}} xmm4 = xmm4[0,1,2],xmm8[3]
2760 ; AVX2-NEXT: vpshufd {{.*#+}} ymm5 = ymm6[2,3,2,3,6,7,6,7]
2761 ; AVX2-NEXT: vpblendd {{.*#+}} ymm5 = ymm5[0],ymm14[1],ymm5[2,3,4],ymm14[5],ymm5[6,7]
2762 ; AVX2-NEXT: vextracti128 $1, %ymm5, %xmm5
2763 ; AVX2-NEXT: vpblendd {{.*#+}} xmm4 = xmm5[0,1],xmm4[2,3]
2764 ; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm4[0,1,2,3],ymm0[4,5,6,7]
2765 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
2766 ; AVX2-NEXT: vmovaps %ymm4, 32(%rsi)
2767 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
2768 ; AVX2-NEXT: vmovaps %ymm4, (%rsi)
2769 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
2770 ; AVX2-NEXT: vmovaps %ymm4, 32(%rdx)
2771 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
2772 ; AVX2-NEXT: vmovaps %ymm4, (%rdx)
2773 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
2774 ; AVX2-NEXT: vmovaps %ymm4, 32(%rcx)
2775 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
2776 ; AVX2-NEXT: vmovaps %ymm4, (%rcx)
2777 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
2778 ; AVX2-NEXT: vmovaps %ymm4, 32(%r8)
2779 ; AVX2-NEXT: vmovups (%rsp), %ymm4 # 32-byte Reload
2780 ; AVX2-NEXT: vmovaps %ymm4, (%r8)
2781 ; AVX2-NEXT: vmovdqa %ymm11, 32(%r9)
2782 ; AVX2-NEXT: vmovdqa %ymm12, (%r9)
2783 ; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax
2784 ; AVX2-NEXT: vmovdqa %ymm2, 32(%rax)
2785 ; AVX2-NEXT: vmovdqa %ymm1, (%rax)
2786 ; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax
2787 ; AVX2-NEXT: vmovdqa %ymm0, 32(%rax)
2788 ; AVX2-NEXT: vmovdqa %ymm3, (%rax)
2789 ; AVX2-NEXT: addq $264, %rsp # imm = 0x108
2790 ; AVX2-NEXT: vzeroupper
2793 ; AVX2-FP-LABEL: load_i32_stride7_vf16:
2795 ; AVX2-FP-NEXT: subq $264, %rsp # imm = 0x108
2796 ; AVX2-FP-NEXT: vmovdqa 288(%rdi), %ymm5
2797 ; AVX2-FP-NEXT: vmovdqa 384(%rdi), %ymm9
2798 ; AVX2-FP-NEXT: vmovdqa 352(%rdi), %ymm7
2799 ; AVX2-FP-NEXT: vmovdqa 320(%rdi), %ymm4
2800 ; AVX2-FP-NEXT: vmovdqa 256(%rdi), %ymm0
2801 ; AVX2-FP-NEXT: vmovdqa 224(%rdi), %ymm3
2802 ; AVX2-FP-NEXT: vmovdqa (%rdi), %ymm10
2803 ; AVX2-FP-NEXT: vmovdqa 32(%rdi), %ymm6
2804 ; AVX2-FP-NEXT: vmovdqa 96(%rdi), %ymm15
2805 ; AVX2-FP-NEXT: vpbroadcastq 80(%rdi), %ymm1
2806 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm15[4,5,6,7]
2807 ; AVX2-FP-NEXT: vpmovsxbd {{.*#+}} xmm2 = [0,7,6,0]
2808 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm8 = ymm10[0,1,2,3,4,5],ymm6[6],ymm10[7]
2809 ; AVX2-FP-NEXT: vpermd %ymm8, %ymm2, %ymm8
2810 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm8[0,1,2],ymm1[3,4,5,6,7]
2811 ; AVX2-FP-NEXT: vmovdqa 128(%rdi), %xmm8
2812 ; AVX2-FP-NEXT: vmovdqa 160(%rdi), %xmm11
2813 ; AVX2-FP-NEXT: vmovdqa %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2814 ; AVX2-FP-NEXT: vpunpckhqdq {{.*#+}} xmm8 = xmm8[1],xmm11[1]
2815 ; AVX2-FP-NEXT: vinserti128 $1, %xmm8, %ymm0, %ymm8
2816 ; AVX2-FP-NEXT: vpbroadcastd 196(%rdi), %ymm11
2817 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3,4,5,6],ymm11[7]
2818 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm8[5,6,7]
2819 ; AVX2-FP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2820 ; AVX2-FP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2821 ; AVX2-FP-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2822 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3,4,5],ymm0[6],ymm3[7]
2823 ; AVX2-FP-NEXT: vpermd %ymm1, %ymm2, %ymm1
2824 ; AVX2-FP-NEXT: vpbroadcastq 304(%rdi), %ymm2
2825 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5,6,7]
2826 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4,5,6,7]
2827 ; AVX2-FP-NEXT: vmovdqa 352(%rdi), %xmm2
2828 ; AVX2-FP-NEXT: vmovdqa 384(%rdi), %xmm8
2829 ; AVX2-FP-NEXT: vmovdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2830 ; AVX2-FP-NEXT: vpunpckhqdq {{.*#+}} xmm2 = xmm2[1],xmm8[1]
2831 ; AVX2-FP-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
2832 ; AVX2-FP-NEXT: vpbroadcastd 420(%rdi), %ymm8
2833 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,6],ymm8[7]
2834 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5,6,7]
2835 ; AVX2-FP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2836 ; AVX2-FP-NEXT: vpshufd {{.*#+}} xmm1 = mem[2,2,2,2]
2837 ; AVX2-FP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
2838 ; AVX2-FP-NEXT: vpalignr {{.*#+}} ymm2 = ymm9[12,13,14,15],ymm7[0,1,2,3,4,5,6,7,8,9,10,11],ymm9[28,29,30,31],ymm7[16,17,18,19,20,21,22,23,24,25,26,27]
2839 ; AVX2-FP-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,1,2,0]
2840 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3,4,5,6],ymm1[7]
2841 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm2 = ymm4[0,1],ymm5[2,3],ymm4[4,5],ymm5[6,7]
2842 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm8 = ymm0[0],ymm3[1],ymm0[2,3,4],ymm3[5],ymm0[6,7]
2843 ; AVX2-FP-NEXT: vmovdqu %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2844 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm2 = ymm8[0,1,2,3,4],ymm2[5,6],ymm8[7]
2845 ; AVX2-FP-NEXT: vpmovsxbd {{.*#+}} ymm12 = [1,0,7,6,5,6,5,6]
2846 ; AVX2-FP-NEXT: vpermd %ymm2, %ymm12, %ymm2
2847 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3,4],ymm1[5,6,7]
2848 ; AVX2-FP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2849 ; AVX2-FP-NEXT: vpshufd {{.*#+}} xmm1 = mem[2,2,2,2]
2850 ; AVX2-FP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm8
2851 ; AVX2-FP-NEXT: vmovdqa 160(%rdi), %ymm3
2852 ; AVX2-FP-NEXT: vmovdqa 128(%rdi), %ymm2
2853 ; AVX2-FP-NEXT: vpalignr {{.*#+}} ymm11 = ymm3[12,13,14,15],ymm2[0,1,2,3,4,5,6,7,8,9,10,11],ymm3[28,29,30,31],ymm2[16,17,18,19,20,21,22,23,24,25,26,27]
2854 ; AVX2-FP-NEXT: vpermq {{.*#+}} ymm11 = ymm11[0,1,2,0]
2855 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm11[0,1,2,3,4,5,6],ymm8[7]
2856 ; AVX2-FP-NEXT: vmovdqa 64(%rdi), %ymm11
2857 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm15[0,1],ymm11[2,3],ymm15[4,5],ymm11[6,7]
2858 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm8 = ymm6[0],ymm10[1],ymm6[2,3,4],ymm10[5],ymm6[6,7]
2859 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm8[0,1,2,3,4],ymm0[5,6],ymm8[7]
2860 ; AVX2-FP-NEXT: vpermd %ymm0, %ymm12, %ymm0
2861 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
2862 ; AVX2-FP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2863 ; AVX2-FP-NEXT: vmovdqa 80(%rdi), %xmm0
2864 ; AVX2-FP-NEXT: vpalignr {{.*#+}} ymm1 = ymm15[8,9,10,11,12,13,14,15],ymm11[0,1,2,3,4,5,6,7],ymm15[24,25,26,27,28,29,30,31],ymm11[16,17,18,19,20,21,22,23]
2865 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5,6,7]
2866 ; AVX2-FP-NEXT: vpbroadcastd 8(%rdi), %xmm1
2867 ; AVX2-FP-NEXT: vmovdqa 32(%rdi), %xmm12
2868 ; AVX2-FP-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm12[1],xmm1[2,3]
2869 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5,6,7]
2870 ; AVX2-FP-NEXT: vpunpcklqdq {{.*#+}} ymm1 = ymm2[0],ymm3[0],ymm2[2],ymm3[2]
2871 ; AVX2-FP-NEXT: vpbroadcastd 204(%rdi), %ymm14
2872 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm14[7]
2873 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
2874 ; AVX2-FP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2875 ; AVX2-FP-NEXT: vmovdqa 304(%rdi), %xmm0
2876 ; AVX2-FP-NEXT: vpalignr {{.*#+}} ymm1 = ymm4[8,9,10,11,12,13,14,15],ymm5[0,1,2,3,4,5,6,7],ymm4[24,25,26,27,28,29,30,31],ymm5[16,17,18,19,20,21,22,23]
2877 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5,6,7]
2878 ; AVX2-FP-NEXT: vpbroadcastd 232(%rdi), %xmm1
2879 ; AVX2-FP-NEXT: vmovdqa 256(%rdi), %xmm14
2880 ; AVX2-FP-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm14[1],xmm1[2,3]
2881 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5,6,7]
2882 ; AVX2-FP-NEXT: vpunpcklqdq {{.*#+}} ymm1 = ymm7[0],ymm9[0],ymm7[2],ymm9[2]
2883 ; AVX2-FP-NEXT: vpbroadcastd 428(%rdi), %ymm13
2884 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm13[7]
2885 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
2886 ; AVX2-FP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2887 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm15[0],ymm11[1],ymm15[2,3,4,5,6,7]
2888 ; AVX2-FP-NEXT: vpblendd {{.*#+}} xmm1 = xmm12[0,1,2],mem[3]
2889 ; AVX2-FP-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[3,2,2,3]
2890 ; AVX2-FP-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[3,1,1,0,7,5,5,4]
2891 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5,6,7]
2892 ; AVX2-FP-NEXT: vshufps {{.*#+}} ymm1 = ymm2[0,2],ymm3[1,3],ymm2[4,6],ymm3[5,7]
2893 ; AVX2-FP-NEXT: vbroadcastss 208(%rdi), %ymm11
2894 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm11[7]
2895 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
2896 ; AVX2-FP-NEXT: vmovdqu %ymm0, (%rsp) # 32-byte Spill
2897 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm4[0],ymm5[1],ymm4[2,3,4,5,6,7]
2898 ; AVX2-FP-NEXT: vpblendd {{.*#+}} xmm1 = xmm14[0,1,2],mem[3]
2899 ; AVX2-FP-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[3,2,2,3]
2900 ; AVX2-FP-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[3,1,1,0,7,5,5,4]
2901 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5,6,7]
2902 ; AVX2-FP-NEXT: vshufps {{.*#+}} ymm1 = ymm7[0,2],ymm9[1,3],ymm7[4,6],ymm9[5,7]
2903 ; AVX2-FP-NEXT: vbroadcastss 432(%rdi), %ymm4
2904 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm4[7]
2905 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
2906 ; AVX2-FP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2907 ; AVX2-FP-NEXT: vpbroadcastd 100(%rdi), %xmm0
2908 ; AVX2-FP-NEXT: vmovdqa 64(%rdi), %xmm1
2909 ; AVX2-FP-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3]
2910 ; AVX2-FP-NEXT: vpmovsxbd {{.*#+}} xmm4 = [4,3,0,0]
2911 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm5 = ymm6[0,1,2,3],ymm10[4,5,6,7]
2912 ; AVX2-FP-NEXT: vmovdqa %ymm6, %ymm15
2913 ; AVX2-FP-NEXT: vpermd %ymm5, %ymm4, %ymm5
2914 ; AVX2-FP-NEXT: vpblendd {{.*#+}} xmm5 = xmm5[0,1],xmm0[2,3]
2915 ; AVX2-FP-NEXT: vpbroadcastq {{.*#+}} ymm0 = [0,7,0,7,0,7,0,7]
2916 ; AVX2-FP-NEXT: vpermd %ymm2, %ymm0, %ymm11
2917 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm11 = ymm11[0,1,2,3,4,5],ymm3[6,7]
2918 ; AVX2-FP-NEXT: vpbroadcastd 212(%rdi), %ymm12
2919 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm11 = ymm11[0,1,2,3,4,5,6],ymm12[7]
2920 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm12 = ymm5[0,1,2,3],ymm11[4,5,6,7]
2921 ; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
2922 ; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
2923 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm5 = ymm14[0,1,2,3],ymm6[4,5,6,7]
2924 ; AVX2-FP-NEXT: vpermd %ymm5, %ymm4, %ymm4
2925 ; AVX2-FP-NEXT: vpbroadcastd 324(%rdi), %xmm5
2926 ; AVX2-FP-NEXT: vmovdqa 288(%rdi), %xmm13
2927 ; AVX2-FP-NEXT: vpblendd {{.*#+}} xmm5 = xmm13[0,1,2],xmm5[3]
2928 ; AVX2-FP-NEXT: vpblendd {{.*#+}} xmm4 = xmm4[0,1],xmm5[2,3]
2929 ; AVX2-FP-NEXT: vpermd %ymm7, %ymm0, %ymm5
2930 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3,4,5],ymm9[6,7]
2931 ; AVX2-FP-NEXT: vpbroadcastd 436(%rdi), %ymm11
2932 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3,4,5,6],ymm11[7]
2933 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm11 = ymm4[0,1,2,3],ymm5[4,5,6,7]
2934 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0],ymm2[1],ymm3[2,3,4],ymm2[5],ymm3[6,7]
2935 ; AVX2-FP-NEXT: vpshufd {{.*#+}} ymm2 = ymm2[1,0,3,3,5,4,7,7]
2936 ; AVX2-FP-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,1,0,3]
2937 ; AVX2-FP-NEXT: vpbroadcastd 216(%rdi), %ymm3
2938 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,6],ymm3[7]
2939 ; AVX2-FP-NEXT: vmovdqa 96(%rdi), %xmm3
2940 ; AVX2-FP-NEXT: vpblendd {{.*#+}} xmm1 = xmm3[0,1,2],xmm1[3]
2941 ; AVX2-FP-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,3,2]
2942 ; AVX2-FP-NEXT: vpshufd {{.*#+}} ymm4 = ymm8[1,0,2,3,5,4,6,7]
2943 ; AVX2-FP-NEXT: vextracti128 $1, %ymm4, %xmm4
2944 ; AVX2-FP-NEXT: vpblendd {{.*#+}} xmm1 = xmm4[0,1],xmm1[2,3]
2945 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
2946 ; AVX2-FP-NEXT: vmovdqa 320(%rdi), %xmm8
2947 ; AVX2-FP-NEXT: vpblendd {{.*#+}} xmm2 = xmm8[0,1,2],xmm13[3]
2948 ; AVX2-FP-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,1,3,2]
2949 ; AVX2-FP-NEXT: vpshufd $225, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Folded Reload
2950 ; AVX2-FP-NEXT: # ymm4 = mem[1,0,2,3,5,4,6,7]
2951 ; AVX2-FP-NEXT: vextracti128 $1, %ymm4, %xmm4
2952 ; AVX2-FP-NEXT: vpblendd {{.*#+}} xmm2 = xmm4[0,1],xmm2[2,3]
2953 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm4 = ymm9[0],ymm7[1],ymm9[2,3,4],ymm7[5],ymm9[6,7]
2954 ; AVX2-FP-NEXT: vpshufd {{.*#+}} ymm4 = ymm4[1,0,3,3,5,4,7,7]
2955 ; AVX2-FP-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,1,0,3]
2956 ; AVX2-FP-NEXT: vpbroadcastd 440(%rdi), %ymm5
2957 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3,4,5,6],ymm5[7]
2958 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5,6,7]
2959 ; AVX2-FP-NEXT: vpbroadcastd 136(%rdi), %xmm4
2960 ; AVX2-FP-NEXT: vpblendd $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm4 # 16-byte Folded Reload
2961 ; AVX2-FP-NEXT: # xmm4 = xmm4[0],mem[1],xmm4[2,3]
2962 ; AVX2-FP-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4
2963 ; AVX2-FP-NEXT: vpermd 192(%rdi), %ymm0, %ymm5
2964 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3,4,5],ymm5[6,7]
2965 ; AVX2-FP-NEXT: vpbroadcastd 80(%rdi), %ymm5
2966 ; AVX2-FP-NEXT: vpblendd {{.*#+}} xmm3 = xmm5[0,1,2],xmm3[3]
2967 ; AVX2-FP-NEXT: vpshufd {{.*#+}} ymm5 = ymm10[2,3,2,3,6,7,6,7]
2968 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm5 = ymm5[0],ymm15[1],ymm5[2,3,4],ymm15[5],ymm5[6,7]
2969 ; AVX2-FP-NEXT: vextracti128 $1, %ymm5, %xmm5
2970 ; AVX2-FP-NEXT: vpblendd {{.*#+}} xmm3 = xmm5[0,1],xmm3[2,3]
2971 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm4[4,5,6,7]
2972 ; AVX2-FP-NEXT: vpbroadcastd 360(%rdi), %xmm4
2973 ; AVX2-FP-NEXT: vpblendd $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm4 # 16-byte Folded Reload
2974 ; AVX2-FP-NEXT: # xmm4 = xmm4[0],mem[1],xmm4[2,3]
2975 ; AVX2-FP-NEXT: vpermd 416(%rdi), %ymm0, %ymm0
2976 ; AVX2-FP-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4
2977 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm4[0,1,2,3,4,5],ymm0[6,7]
2978 ; AVX2-FP-NEXT: vpbroadcastd 304(%rdi), %ymm4
2979 ; AVX2-FP-NEXT: vpblendd {{.*#+}} xmm4 = xmm4[0,1,2],xmm8[3]
2980 ; AVX2-FP-NEXT: vpshufd {{.*#+}} ymm5 = ymm6[2,3,2,3,6,7,6,7]
2981 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm5 = ymm5[0],ymm14[1],ymm5[2,3,4],ymm14[5],ymm5[6,7]
2982 ; AVX2-FP-NEXT: vextracti128 $1, %ymm5, %xmm5
2983 ; AVX2-FP-NEXT: vpblendd {{.*#+}} xmm4 = xmm5[0,1],xmm4[2,3]
2984 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm4[0,1,2,3],ymm0[4,5,6,7]
2985 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
2986 ; AVX2-FP-NEXT: vmovaps %ymm4, 32(%rsi)
2987 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
2988 ; AVX2-FP-NEXT: vmovaps %ymm4, (%rsi)
2989 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
2990 ; AVX2-FP-NEXT: vmovaps %ymm4, 32(%rdx)
2991 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
2992 ; AVX2-FP-NEXT: vmovaps %ymm4, (%rdx)
2993 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
2994 ; AVX2-FP-NEXT: vmovaps %ymm4, 32(%rcx)
2995 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
2996 ; AVX2-FP-NEXT: vmovaps %ymm4, (%rcx)
2997 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
2998 ; AVX2-FP-NEXT: vmovaps %ymm4, 32(%r8)
2999 ; AVX2-FP-NEXT: vmovups (%rsp), %ymm4 # 32-byte Reload
3000 ; AVX2-FP-NEXT: vmovaps %ymm4, (%r8)
3001 ; AVX2-FP-NEXT: vmovdqa %ymm11, 32(%r9)
3002 ; AVX2-FP-NEXT: vmovdqa %ymm12, (%r9)
3003 ; AVX2-FP-NEXT: movq {{[0-9]+}}(%rsp), %rax
3004 ; AVX2-FP-NEXT: vmovdqa %ymm2, 32(%rax)
3005 ; AVX2-FP-NEXT: vmovdqa %ymm1, (%rax)
3006 ; AVX2-FP-NEXT: movq {{[0-9]+}}(%rsp), %rax
3007 ; AVX2-FP-NEXT: vmovdqa %ymm0, 32(%rax)
3008 ; AVX2-FP-NEXT: vmovdqa %ymm3, (%rax)
3009 ; AVX2-FP-NEXT: addq $264, %rsp # imm = 0x108
3010 ; AVX2-FP-NEXT: vzeroupper
3011 ; AVX2-FP-NEXT: retq
3013 ; AVX2-FCP-LABEL: load_i32_stride7_vf16:
3014 ; AVX2-FCP: # %bb.0:
3015 ; AVX2-FCP-NEXT: subq $264, %rsp # imm = 0x108
3016 ; AVX2-FCP-NEXT: vmovdqa 288(%rdi), %ymm5
3017 ; AVX2-FCP-NEXT: vmovdqa 384(%rdi), %ymm9
3018 ; AVX2-FCP-NEXT: vmovdqa 352(%rdi), %ymm7
3019 ; AVX2-FCP-NEXT: vmovdqa 320(%rdi), %ymm4
3020 ; AVX2-FCP-NEXT: vmovdqa 256(%rdi), %ymm0
3021 ; AVX2-FCP-NEXT: vmovdqa 224(%rdi), %ymm3
3022 ; AVX2-FCP-NEXT: vmovdqa (%rdi), %ymm10
3023 ; AVX2-FCP-NEXT: vmovdqa 32(%rdi), %ymm6
3024 ; AVX2-FCP-NEXT: vmovdqa 96(%rdi), %ymm15
3025 ; AVX2-FCP-NEXT: vpbroadcastq 80(%rdi), %ymm1
3026 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm15[4,5,6,7]
3027 ; AVX2-FCP-NEXT: vpmovsxbd {{.*#+}} xmm2 = [0,7,6,0]
3028 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm8 = ymm10[0,1,2,3,4,5],ymm6[6],ymm10[7]
3029 ; AVX2-FCP-NEXT: vpermd %ymm8, %ymm2, %ymm8
3030 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm8[0,1,2],ymm1[3,4,5,6,7]
3031 ; AVX2-FCP-NEXT: vmovdqa 128(%rdi), %xmm8
3032 ; AVX2-FCP-NEXT: vmovdqa 160(%rdi), %xmm11
3033 ; AVX2-FCP-NEXT: vmovdqa %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3034 ; AVX2-FCP-NEXT: vpunpckhqdq {{.*#+}} xmm8 = xmm8[1],xmm11[1]
3035 ; AVX2-FCP-NEXT: vinserti128 $1, %xmm8, %ymm0, %ymm8
3036 ; AVX2-FCP-NEXT: vpbroadcastd 196(%rdi), %ymm11
3037 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3,4,5,6],ymm11[7]
3038 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm8[5,6,7]
3039 ; AVX2-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3040 ; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3041 ; AVX2-FCP-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3042 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3,4,5],ymm0[6],ymm3[7]
3043 ; AVX2-FCP-NEXT: vpermd %ymm1, %ymm2, %ymm1
3044 ; AVX2-FCP-NEXT: vpbroadcastq 304(%rdi), %ymm2
3045 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5,6,7]
3046 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4,5,6,7]
3047 ; AVX2-FCP-NEXT: vmovdqa 352(%rdi), %xmm2
3048 ; AVX2-FCP-NEXT: vmovdqa 384(%rdi), %xmm8
3049 ; AVX2-FCP-NEXT: vmovdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3050 ; AVX2-FCP-NEXT: vpunpckhqdq {{.*#+}} xmm2 = xmm2[1],xmm8[1]
3051 ; AVX2-FCP-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
3052 ; AVX2-FCP-NEXT: vpbroadcastd 420(%rdi), %ymm8
3053 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,6],ymm8[7]
3054 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5,6,7]
3055 ; AVX2-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3056 ; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm1 = mem[2,2,2,2]
3057 ; AVX2-FCP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
3058 ; AVX2-FCP-NEXT: vpalignr {{.*#+}} ymm2 = ymm9[12,13,14,15],ymm7[0,1,2,3,4,5,6,7,8,9,10,11],ymm9[28,29,30,31],ymm7[16,17,18,19,20,21,22,23,24,25,26,27]
3059 ; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,1,2,0]
3060 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3,4,5,6],ymm1[7]
3061 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm4[0,1],ymm5[2,3],ymm4[4,5],ymm5[6,7]
3062 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm8 = ymm0[0],ymm3[1],ymm0[2,3,4],ymm3[5],ymm0[6,7]
3063 ; AVX2-FCP-NEXT: vmovdqu %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3064 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm8[0,1,2,3,4],ymm2[5,6],ymm8[7]
3065 ; AVX2-FCP-NEXT: vpmovsxbd {{.*#+}} ymm12 = [1,0,7,6,5,6,5,6]
3066 ; AVX2-FCP-NEXT: vpermd %ymm2, %ymm12, %ymm2
3067 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3,4],ymm1[5,6,7]
3068 ; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3069 ; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm1 = mem[2,2,2,2]
3070 ; AVX2-FCP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm8
3071 ; AVX2-FCP-NEXT: vmovdqa 160(%rdi), %ymm3
3072 ; AVX2-FCP-NEXT: vmovdqa 128(%rdi), %ymm2
3073 ; AVX2-FCP-NEXT: vpalignr {{.*#+}} ymm11 = ymm3[12,13,14,15],ymm2[0,1,2,3,4,5,6,7,8,9,10,11],ymm3[28,29,30,31],ymm2[16,17,18,19,20,21,22,23,24,25,26,27]
3074 ; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm11 = ymm11[0,1,2,0]
3075 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm11[0,1,2,3,4,5,6],ymm8[7]
3076 ; AVX2-FCP-NEXT: vmovdqa 64(%rdi), %ymm11
3077 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm15[0,1],ymm11[2,3],ymm15[4,5],ymm11[6,7]
3078 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm8 = ymm6[0],ymm10[1],ymm6[2,3,4],ymm10[5],ymm6[6,7]
3079 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm8[0,1,2,3,4],ymm0[5,6],ymm8[7]
3080 ; AVX2-FCP-NEXT: vpermd %ymm0, %ymm12, %ymm0
3081 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
3082 ; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3083 ; AVX2-FCP-NEXT: vmovdqa 80(%rdi), %xmm0
3084 ; AVX2-FCP-NEXT: vpalignr {{.*#+}} ymm1 = ymm15[8,9,10,11,12,13,14,15],ymm11[0,1,2,3,4,5,6,7],ymm15[24,25,26,27,28,29,30,31],ymm11[16,17,18,19,20,21,22,23]
3085 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5,6,7]
3086 ; AVX2-FCP-NEXT: vpbroadcastd 8(%rdi), %xmm1
3087 ; AVX2-FCP-NEXT: vmovdqa 32(%rdi), %xmm12
3088 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm12[1],xmm1[2,3]
3089 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5,6,7]
3090 ; AVX2-FCP-NEXT: vpunpcklqdq {{.*#+}} ymm1 = ymm2[0],ymm3[0],ymm2[2],ymm3[2]
3091 ; AVX2-FCP-NEXT: vpbroadcastd 204(%rdi), %ymm14
3092 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm14[7]
3093 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
3094 ; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3095 ; AVX2-FCP-NEXT: vmovdqa 304(%rdi), %xmm0
3096 ; AVX2-FCP-NEXT: vpalignr {{.*#+}} ymm1 = ymm4[8,9,10,11,12,13,14,15],ymm5[0,1,2,3,4,5,6,7],ymm4[24,25,26,27,28,29,30,31],ymm5[16,17,18,19,20,21,22,23]
3097 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5,6,7]
3098 ; AVX2-FCP-NEXT: vpbroadcastd 232(%rdi), %xmm1
3099 ; AVX2-FCP-NEXT: vmovdqa 256(%rdi), %xmm14
3100 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm14[1],xmm1[2,3]
3101 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5,6,7]
3102 ; AVX2-FCP-NEXT: vpunpcklqdq {{.*#+}} ymm1 = ymm7[0],ymm9[0],ymm7[2],ymm9[2]
3103 ; AVX2-FCP-NEXT: vpbroadcastd 428(%rdi), %ymm13
3104 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm13[7]
3105 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
3106 ; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3107 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm15[0],ymm11[1],ymm15[2,3,4,5,6,7]
3108 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm1 = xmm12[0,1,2],mem[3]
3109 ; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[3,2,2,3]
3110 ; AVX2-FCP-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[3,1,1,0,7,5,5,4]
3111 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5,6,7]
3112 ; AVX2-FCP-NEXT: vshufps {{.*#+}} ymm1 = ymm2[0,2],ymm3[1,3],ymm2[4,6],ymm3[5,7]
3113 ; AVX2-FCP-NEXT: vbroadcastss 208(%rdi), %ymm11
3114 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm11[7]
3115 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
3116 ; AVX2-FCP-NEXT: vmovdqu %ymm0, (%rsp) # 32-byte Spill
3117 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm4[0],ymm5[1],ymm4[2,3,4,5,6,7]
3118 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm1 = xmm14[0,1,2],mem[3]
3119 ; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[3,2,2,3]
3120 ; AVX2-FCP-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[3,1,1,0,7,5,5,4]
3121 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5,6,7]
3122 ; AVX2-FCP-NEXT: vshufps {{.*#+}} ymm1 = ymm7[0,2],ymm9[1,3],ymm7[4,6],ymm9[5,7]
3123 ; AVX2-FCP-NEXT: vbroadcastss 432(%rdi), %ymm4
3124 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm4[7]
3125 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
3126 ; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3127 ; AVX2-FCP-NEXT: vpbroadcastd 100(%rdi), %xmm0
3128 ; AVX2-FCP-NEXT: vmovdqa 64(%rdi), %xmm1
3129 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3]
3130 ; AVX2-FCP-NEXT: vpmovsxbd {{.*#+}} xmm4 = [4,3,0,0]
3131 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm5 = ymm6[0,1,2,3],ymm10[4,5,6,7]
3132 ; AVX2-FCP-NEXT: vmovdqa %ymm6, %ymm15
3133 ; AVX2-FCP-NEXT: vpermd %ymm5, %ymm4, %ymm5
3134 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm5 = xmm5[0,1],xmm0[2,3]
3135 ; AVX2-FCP-NEXT: vpbroadcastq {{.*#+}} ymm0 = [0,7,0,7,0,7,0,7]
3136 ; AVX2-FCP-NEXT: vpermd %ymm2, %ymm0, %ymm11
3137 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm11 = ymm11[0,1,2,3,4,5],ymm3[6,7]
3138 ; AVX2-FCP-NEXT: vpbroadcastd 212(%rdi), %ymm12
3139 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm11 = ymm11[0,1,2,3,4,5,6],ymm12[7]
3140 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm12 = ymm5[0,1,2,3],ymm11[4,5,6,7]
3141 ; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
3142 ; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
3143 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm5 = ymm14[0,1,2,3],ymm6[4,5,6,7]
3144 ; AVX2-FCP-NEXT: vpermd %ymm5, %ymm4, %ymm4
3145 ; AVX2-FCP-NEXT: vpbroadcastd 324(%rdi), %xmm5
3146 ; AVX2-FCP-NEXT: vmovdqa 288(%rdi), %xmm13
3147 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm5 = xmm13[0,1,2],xmm5[3]
3148 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm4 = xmm4[0,1],xmm5[2,3]
3149 ; AVX2-FCP-NEXT: vpermd %ymm7, %ymm0, %ymm5
3150 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3,4,5],ymm9[6,7]
3151 ; AVX2-FCP-NEXT: vpbroadcastd 436(%rdi), %ymm11
3152 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3,4,5,6],ymm11[7]
3153 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm11 = ymm4[0,1,2,3],ymm5[4,5,6,7]
3154 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0],ymm2[1],ymm3[2,3,4],ymm2[5],ymm3[6,7]
3155 ; AVX2-FCP-NEXT: vpmovsxbd {{.*#+}} ymm3 = [1,0,3,3,1,0,7,7]
3156 ; AVX2-FCP-NEXT: vpermd %ymm2, %ymm3, %ymm2
3157 ; AVX2-FCP-NEXT: vpbroadcastd 216(%rdi), %ymm4
3158 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,6],ymm4[7]
3159 ; AVX2-FCP-NEXT: vmovdqa 96(%rdi), %xmm4
3160 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm1 = xmm4[0,1,2],xmm1[3]
3161 ; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,3,2]
3162 ; AVX2-FCP-NEXT: vpshufd {{.*#+}} ymm5 = ymm8[1,0,2,3,5,4,6,7]
3163 ; AVX2-FCP-NEXT: vextracti128 $1, %ymm5, %xmm5
3164 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm1 = xmm5[0,1],xmm1[2,3]
3165 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
3166 ; AVX2-FCP-NEXT: vmovdqa 320(%rdi), %xmm8
3167 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm2 = xmm8[0,1,2],xmm13[3]
3168 ; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,1,3,2]
3169 ; AVX2-FCP-NEXT: vpshufd $225, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload
3170 ; AVX2-FCP-NEXT: # ymm5 = mem[1,0,2,3,5,4,6,7]
3171 ; AVX2-FCP-NEXT: vextracti128 $1, %ymm5, %xmm5
3172 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm2 = xmm5[0,1],xmm2[2,3]
3173 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm5 = ymm9[0],ymm7[1],ymm9[2,3,4],ymm7[5],ymm9[6,7]
3174 ; AVX2-FCP-NEXT: vpermd %ymm5, %ymm3, %ymm3
3175 ; AVX2-FCP-NEXT: vpbroadcastd 440(%rdi), %ymm5
3176 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5,6],ymm5[7]
3177 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
3178 ; AVX2-FCP-NEXT: vpbroadcastd 136(%rdi), %xmm3
3179 ; AVX2-FCP-NEXT: vpblendd $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm3 # 16-byte Folded Reload
3180 ; AVX2-FCP-NEXT: # xmm3 = xmm3[0],mem[1],xmm3[2,3]
3181 ; AVX2-FCP-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
3182 ; AVX2-FCP-NEXT: vpermd 192(%rdi), %ymm0, %ymm5
3183 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5],ymm5[6,7]
3184 ; AVX2-FCP-NEXT: vpbroadcastd 80(%rdi), %ymm5
3185 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm4 = xmm5[0,1,2],xmm4[3]
3186 ; AVX2-FCP-NEXT: vpshufd {{.*#+}} ymm5 = ymm10[2,3,2,3,6,7,6,7]
3187 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm5 = ymm5[0],ymm15[1],ymm5[2,3,4],ymm15[5],ymm5[6,7]
3188 ; AVX2-FCP-NEXT: vextracti128 $1, %ymm5, %xmm5
3189 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm4 = xmm5[0,1],xmm4[2,3]
3190 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
3191 ; AVX2-FCP-NEXT: vpbroadcastd 360(%rdi), %xmm4
3192 ; AVX2-FCP-NEXT: vpblendd $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm4 # 16-byte Folded Reload
3193 ; AVX2-FCP-NEXT: # xmm4 = xmm4[0],mem[1],xmm4[2,3]
3194 ; AVX2-FCP-NEXT: vpermd 416(%rdi), %ymm0, %ymm0
3195 ; AVX2-FCP-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4
3196 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm4[0,1,2,3,4,5],ymm0[6,7]
3197 ; AVX2-FCP-NEXT: vpbroadcastd 304(%rdi), %ymm4
3198 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm4 = xmm4[0,1,2],xmm8[3]
3199 ; AVX2-FCP-NEXT: vpshufd {{.*#+}} ymm5 = ymm6[2,3,2,3,6,7,6,7]
3200 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm5 = ymm5[0],ymm14[1],ymm5[2,3,4],ymm14[5],ymm5[6,7]
3201 ; AVX2-FCP-NEXT: vextracti128 $1, %ymm5, %xmm5
3202 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm4 = xmm5[0,1],xmm4[2,3]
3203 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm4[0,1,2,3],ymm0[4,5,6,7]
3204 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
3205 ; AVX2-FCP-NEXT: vmovaps %ymm4, 32(%rsi)
3206 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
3207 ; AVX2-FCP-NEXT: vmovaps %ymm4, (%rsi)
3208 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
3209 ; AVX2-FCP-NEXT: vmovaps %ymm4, 32(%rdx)
3210 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
3211 ; AVX2-FCP-NEXT: vmovaps %ymm4, (%rdx)
3212 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
3213 ; AVX2-FCP-NEXT: vmovaps %ymm4, 32(%rcx)
3214 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
3215 ; AVX2-FCP-NEXT: vmovaps %ymm4, (%rcx)
3216 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
3217 ; AVX2-FCP-NEXT: vmovaps %ymm4, 32(%r8)
3218 ; AVX2-FCP-NEXT: vmovups (%rsp), %ymm4 # 32-byte Reload
3219 ; AVX2-FCP-NEXT: vmovaps %ymm4, (%r8)
3220 ; AVX2-FCP-NEXT: vmovdqa %ymm11, 32(%r9)
3221 ; AVX2-FCP-NEXT: vmovdqa %ymm12, (%r9)
3222 ; AVX2-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
3223 ; AVX2-FCP-NEXT: vmovdqa %ymm2, 32(%rax)
3224 ; AVX2-FCP-NEXT: vmovdqa %ymm1, (%rax)
3225 ; AVX2-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
3226 ; AVX2-FCP-NEXT: vmovdqa %ymm0, 32(%rax)
3227 ; AVX2-FCP-NEXT: vmovdqa %ymm3, (%rax)
3228 ; AVX2-FCP-NEXT: addq $264, %rsp # imm = 0x108
3229 ; AVX2-FCP-NEXT: vzeroupper
3230 ; AVX2-FCP-NEXT: retq
3232 ; AVX512-LABEL: load_i32_stride7_vf16:
3234 ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax
3235 ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r10
3236 ; AVX512-NEXT: vmovdqa64 384(%rdi), %zmm1
3237 ; AVX512-NEXT: vmovdqa64 320(%rdi), %zmm5
3238 ; AVX512-NEXT: vmovdqa64 256(%rdi), %zmm4
3239 ; AVX512-NEXT: vmovdqa64 (%rdi), %zmm0
3240 ; AVX512-NEXT: vmovdqa64 64(%rdi), %zmm2
3241 ; AVX512-NEXT: vmovdqa64 128(%rdi), %zmm8
3242 ; AVX512-NEXT: vmovdqa64 192(%rdi), %zmm7
3243 ; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm6 = [8,15,0,0,0,19,26,1,8,15,0,0,0,19,26,1]
3244 ; AVX512-NEXT: # zmm6 = mem[0,1,2,3,0,1,2,3]
3245 ; AVX512-NEXT: vpermi2d %zmm8, %zmm7, %zmm6
3246 ; AVX512-NEXT: vpmovsxbd {{.*#+}} ymm3 = [0,7,14,21,28,0,0,0]
3247 ; AVX512-NEXT: vpermi2d %zmm2, %zmm0, %zmm3
3248 ; AVX512-NEXT: movw $992, %di # imm = 0x3E0
3249 ; AVX512-NEXT: kmovw %edi, %k1
3250 ; AVX512-NEXT: vmovdqa32 %zmm6, %zmm3 {%k1}
3251 ; AVX512-NEXT: vbroadcasti32x4 {{.*#+}} zmm6 = [20,27,6,13,20,27,6,13,20,27,6,13,20,27,6,13]
3252 ; AVX512-NEXT: # zmm6 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
3253 ; AVX512-NEXT: vpermi2d %zmm5, %zmm4, %zmm6
3254 ; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm9 = [0,0,10,11,12,13,18,25,0,0,10,11,12,13,18,25]
3255 ; AVX512-NEXT: # zmm9 = mem[0,1,2,3,0,1,2,3]
3256 ; AVX512-NEXT: vpermi2d %zmm1, %zmm6, %zmm9
3257 ; AVX512-NEXT: movb $-32, %dil
3258 ; AVX512-NEXT: kmovw %edi, %k1
3259 ; AVX512-NEXT: vmovdqa64 %zmm9, %zmm3 {%k1}
3260 ; AVX512-NEXT: vbroadcasti32x4 {{.*#+}} zmm6 = [25,4,11,18,25,4,11,18,25,4,11,18,25,4,11,18]
3261 ; AVX512-NEXT: # zmm6 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
3262 ; AVX512-NEXT: vmovdqa64 %zmm8, %zmm9
3263 ; AVX512-NEXT: vbroadcasti32x4 {{.*#+}} zmm10 = [27,6,13,20,27,6,13,20,27,6,13,20,27,6,13,20]
3264 ; AVX512-NEXT: # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
3265 ; AVX512-NEXT: vpermi2d %zmm7, %zmm8, %zmm10
3266 ; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm11 = [28,0,0,0,0,7,14,21,28,0,0,0,0,7,14,21]
3267 ; AVX512-NEXT: # zmm11 = mem[0,1,2,3,0,1,2,3]
3268 ; AVX512-NEXT: vpermi2d %zmm7, %zmm8, %zmm11
3269 ; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm12 = [29,0,0,0,1,8,15,22,29,0,0,0,1,8,15,22]
3270 ; AVX512-NEXT: # zmm12 = mem[0,1,2,3,0,1,2,3]
3271 ; AVX512-NEXT: vpermi2d %zmm7, %zmm8, %zmm12
3272 ; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm13 = [14,0,0,0,18,25,0,7,14,0,0,0,18,25,0,7]
3273 ; AVX512-NEXT: # zmm13 = mem[0,1,2,3,0,1,2,3]
3274 ; AVX512-NEXT: vpermi2d %zmm8, %zmm7, %zmm13
3275 ; AVX512-NEXT: vpermt2d %zmm7, %zmm6, %zmm8
3276 ; AVX512-NEXT: vpmovsxbd {{.*#+}} ymm14 = [1,8,15,22,29,0,0,0]
3277 ; AVX512-NEXT: vpermi2d %zmm2, %zmm0, %zmm14
3278 ; AVX512-NEXT: movw $480, %di # imm = 0x1E0
3279 ; AVX512-NEXT: kmovw %edi, %k2
3280 ; AVX512-NEXT: vmovdqa32 %zmm8, %zmm14 {%k2}
3281 ; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm8 = [0,0,7,14,21,28,0,0,0,0,7,14,21,28,0,0]
3282 ; AVX512-NEXT: # zmm8 = mem[0,1,2,3,0,1,2,3]
3283 ; AVX512-NEXT: vpermi2d %zmm5, %zmm4, %zmm8
3284 ; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm15 = [0,9,10,11,12,13,19,26,0,9,10,11,12,13,19,26]
3285 ; AVX512-NEXT: # zmm15 = mem[0,1,2,3,0,1,2,3]
3286 ; AVX512-NEXT: vpermi2d %zmm1, %zmm8, %zmm15
3287 ; AVX512-NEXT: movw $-512, %di # imm = 0xFE00
3288 ; AVX512-NEXT: kmovw %edi, %k1
3289 ; AVX512-NEXT: vmovdqa32 %zmm15, %zmm14 {%k1}
3290 ; AVX512-NEXT: vbroadcasti32x4 {{.*#+}} zmm8 = [26,5,12,19,26,5,12,19,26,5,12,19,26,5,12,19]
3291 ; AVX512-NEXT: # zmm8 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
3292 ; AVX512-NEXT: vpermt2d %zmm7, %zmm8, %zmm9
3293 ; AVX512-NEXT: vpmovsxbd {{.*#+}} ymm7 = [18,25,0,7,14,0,0,0]
3294 ; AVX512-NEXT: vpermi2d %zmm0, %zmm2, %zmm7
3295 ; AVX512-NEXT: vmovdqa32 %zmm9, %zmm7 {%k2}
3296 ; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm9 = [0,1,8,15,22,29,0,0,0,1,8,15,22,29,0,0]
3297 ; AVX512-NEXT: # zmm9 = mem[0,1,2,3,0,1,2,3]
3298 ; AVX512-NEXT: vpermi2d %zmm5, %zmm4, %zmm9
3299 ; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm15 = [0,9,10,11,12,13,20,27,0,9,10,11,12,13,20,27]
3300 ; AVX512-NEXT: # zmm15 = mem[0,1,2,3,0,1,2,3]
3301 ; AVX512-NEXT: vpermi2d %zmm1, %zmm9, %zmm15
3302 ; AVX512-NEXT: vmovdqa32 %zmm15, %zmm7 {%k1}
3303 ; AVX512-NEXT: vpmovsxbd {{.*#+}} ymm9 = [19,26,1,8,15,0,0,0]
3304 ; AVX512-NEXT: vpermi2d %zmm0, %zmm2, %zmm9
3305 ; AVX512-NEXT: vmovdqa32 %zmm10, %zmm9 {%k2}
3306 ; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm10 = [0,18,25,0,7,14,0,0,0,18,25,0,7,14,0,0]
3307 ; AVX512-NEXT: # zmm10 = mem[0,1,2,3,0,1,2,3]
3308 ; AVX512-NEXT: vpermi2d %zmm4, %zmm5, %zmm10
3309 ; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm15 = [0,9,10,11,12,13,21,28,0,9,10,11,12,13,21,28]
3310 ; AVX512-NEXT: # zmm15 = mem[0,1,2,3,0,1,2,3]
3311 ; AVX512-NEXT: vpermi2d %zmm1, %zmm10, %zmm15
3312 ; AVX512-NEXT: vmovdqa32 %zmm15, %zmm9 {%k1}
3313 ; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm10 = [0,19,26,1,8,15,0,0,0,19,26,1,8,15,0,0]
3314 ; AVX512-NEXT: # zmm10 = mem[0,1,2,3,0,1,2,3]
3315 ; AVX512-NEXT: vpermi2d %zmm4, %zmm5, %zmm10
3316 ; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm15 = [0,9,10,11,12,13,22,29,0,9,10,11,12,13,22,29]
3317 ; AVX512-NEXT: # zmm15 = mem[0,1,2,3,0,1,2,3]
3318 ; AVX512-NEXT: vpermi2d %zmm1, %zmm10, %zmm15
3319 ; AVX512-NEXT: vpmovsxbd {{.*#+}} xmm10 = [4,11,18,25]
3320 ; AVX512-NEXT: vpermi2d %zmm2, %zmm0, %zmm10
3321 ; AVX512-NEXT: vinserti32x4 $0, %xmm10, %zmm11, %zmm10
3322 ; AVX512-NEXT: vmovdqa32 %zmm15, %zmm10 {%k1}
3323 ; AVX512-NEXT: vpermi2d %zmm5, %zmm4, %zmm6
3324 ; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm11 = [0,9,10,11,12,16,23,30,0,9,10,11,12,16,23,30]
3325 ; AVX512-NEXT: # zmm11 = mem[0,1,2,3,0,1,2,3]
3326 ; AVX512-NEXT: vpermi2d %zmm1, %zmm6, %zmm11
3327 ; AVX512-NEXT: vpmovsxbd {{.*#+}} xmm6 = [5,12,19,26]
3328 ; AVX512-NEXT: vpermi2d %zmm2, %zmm0, %zmm6
3329 ; AVX512-NEXT: vinserti32x4 $0, %xmm6, %zmm12, %zmm6
3330 ; AVX512-NEXT: vmovdqa32 %zmm11, %zmm6 {%k1}
3331 ; AVX512-NEXT: vpermt2d %zmm5, %zmm8, %zmm4
3332 ; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm5 = [0,9,10,11,12,17,24,31,0,9,10,11,12,17,24,31]
3333 ; AVX512-NEXT: # zmm5 = mem[0,1,2,3,0,1,2,3]
3334 ; AVX512-NEXT: vpermi2d %zmm1, %zmm4, %zmm5
3335 ; AVX512-NEXT: vpmovsxbd {{.*#+}} xmm1 = [6,13,20,27]
3336 ; AVX512-NEXT: vpermi2d %zmm2, %zmm0, %zmm1
3337 ; AVX512-NEXT: vinserti32x4 $0, %xmm1, %zmm13, %zmm0
3338 ; AVX512-NEXT: vmovdqa32 %zmm5, %zmm0 {%k1}
3339 ; AVX512-NEXT: vmovdqa64 %zmm3, (%rsi)
3340 ; AVX512-NEXT: vmovdqa64 %zmm14, (%rdx)
3341 ; AVX512-NEXT: vmovdqa64 %zmm7, (%rcx)
3342 ; AVX512-NEXT: vmovdqa64 %zmm9, (%r8)
3343 ; AVX512-NEXT: vmovdqa64 %zmm10, (%r9)
3344 ; AVX512-NEXT: vmovdqa64 %zmm6, (%r10)
3345 ; AVX512-NEXT: vmovdqa64 %zmm0, (%rax)
3346 ; AVX512-NEXT: vzeroupper
3349 ; AVX512-FCP-LABEL: load_i32_stride7_vf16:
3350 ; AVX512-FCP: # %bb.0:
3351 ; AVX512-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
3352 ; AVX512-FCP-NEXT: movq {{[0-9]+}}(%rsp), %r10
3353 ; AVX512-FCP-NEXT: vmovdqa64 384(%rdi), %zmm1
3354 ; AVX512-FCP-NEXT: vmovdqa64 320(%rdi), %zmm5
3355 ; AVX512-FCP-NEXT: vmovdqa64 256(%rdi), %zmm4
3356 ; AVX512-FCP-NEXT: vmovdqa64 (%rdi), %zmm0
3357 ; AVX512-FCP-NEXT: vmovdqa64 64(%rdi), %zmm2
3358 ; AVX512-FCP-NEXT: vmovdqa64 128(%rdi), %zmm8
3359 ; AVX512-FCP-NEXT: vmovdqa64 192(%rdi), %zmm7
3360 ; AVX512-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm6 = [8,15,0,0,0,19,26,1,8,15,0,0,0,19,26,1]
3361 ; AVX512-FCP-NEXT: # zmm6 = mem[0,1,2,3,0,1,2,3]
3362 ; AVX512-FCP-NEXT: vpermi2d %zmm8, %zmm7, %zmm6
3363 ; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm3 = [0,7,14,21,28,0,0,0]
3364 ; AVX512-FCP-NEXT: vpermi2d %zmm2, %zmm0, %zmm3
3365 ; AVX512-FCP-NEXT: movw $992, %di # imm = 0x3E0
3366 ; AVX512-FCP-NEXT: kmovw %edi, %k1
3367 ; AVX512-FCP-NEXT: vmovdqa32 %zmm6, %zmm3 {%k1}
3368 ; AVX512-FCP-NEXT: vbroadcasti32x4 {{.*#+}} zmm6 = [20,27,6,13,20,27,6,13,20,27,6,13,20,27,6,13]
3369 ; AVX512-FCP-NEXT: # zmm6 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
3370 ; AVX512-FCP-NEXT: vpermi2d %zmm5, %zmm4, %zmm6
3371 ; AVX512-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm9 = [0,0,10,11,12,13,18,25,0,0,10,11,12,13,18,25]
3372 ; AVX512-FCP-NEXT: # zmm9 = mem[0,1,2,3,0,1,2,3]
3373 ; AVX512-FCP-NEXT: vpermi2d %zmm1, %zmm6, %zmm9
3374 ; AVX512-FCP-NEXT: movb $-32, %dil
3375 ; AVX512-FCP-NEXT: kmovw %edi, %k1
3376 ; AVX512-FCP-NEXT: vmovdqa64 %zmm9, %zmm3 {%k1}
3377 ; AVX512-FCP-NEXT: vbroadcasti32x4 {{.*#+}} zmm6 = [25,4,11,18,25,4,11,18,25,4,11,18,25,4,11,18]
3378 ; AVX512-FCP-NEXT: # zmm6 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
3379 ; AVX512-FCP-NEXT: vmovdqa64 %zmm8, %zmm9
3380 ; AVX512-FCP-NEXT: vbroadcasti32x4 {{.*#+}} zmm10 = [27,6,13,20,27,6,13,20,27,6,13,20,27,6,13,20]
3381 ; AVX512-FCP-NEXT: # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
3382 ; AVX512-FCP-NEXT: vpermi2d %zmm7, %zmm8, %zmm10
3383 ; AVX512-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm11 = [28,0,0,0,0,7,14,21,28,0,0,0,0,7,14,21]
3384 ; AVX512-FCP-NEXT: # zmm11 = mem[0,1,2,3,0,1,2,3]
3385 ; AVX512-FCP-NEXT: vpermi2d %zmm7, %zmm8, %zmm11
3386 ; AVX512-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm12 = [29,0,0,0,1,8,15,22,29,0,0,0,1,8,15,22]
3387 ; AVX512-FCP-NEXT: # zmm12 = mem[0,1,2,3,0,1,2,3]
3388 ; AVX512-FCP-NEXT: vpermi2d %zmm7, %zmm8, %zmm12
3389 ; AVX512-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm13 = [14,0,0,0,18,25,0,7,14,0,0,0,18,25,0,7]
3390 ; AVX512-FCP-NEXT: # zmm13 = mem[0,1,2,3,0,1,2,3]
3391 ; AVX512-FCP-NEXT: vpermi2d %zmm8, %zmm7, %zmm13
3392 ; AVX512-FCP-NEXT: vpermt2d %zmm7, %zmm6, %zmm8
3393 ; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm14 = [1,8,15,22,29,0,0,0]
3394 ; AVX512-FCP-NEXT: vpermi2d %zmm2, %zmm0, %zmm14
3395 ; AVX512-FCP-NEXT: movw $480, %di # imm = 0x1E0
3396 ; AVX512-FCP-NEXT: kmovw %edi, %k2
3397 ; AVX512-FCP-NEXT: vmovdqa32 %zmm8, %zmm14 {%k2}
3398 ; AVX512-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm8 = [0,0,7,14,21,28,0,0,0,0,7,14,21,28,0,0]
3399 ; AVX512-FCP-NEXT: # zmm8 = mem[0,1,2,3,0,1,2,3]
3400 ; AVX512-FCP-NEXT: vpermi2d %zmm5, %zmm4, %zmm8
3401 ; AVX512-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm15 = [0,9,10,11,12,13,19,26,0,9,10,11,12,13,19,26]
3402 ; AVX512-FCP-NEXT: # zmm15 = mem[0,1,2,3,0,1,2,3]
3403 ; AVX512-FCP-NEXT: vpermi2d %zmm1, %zmm8, %zmm15
3404 ; AVX512-FCP-NEXT: movw $-512, %di # imm = 0xFE00
3405 ; AVX512-FCP-NEXT: kmovw %edi, %k1
3406 ; AVX512-FCP-NEXT: vmovdqa32 %zmm15, %zmm14 {%k1}
3407 ; AVX512-FCP-NEXT: vbroadcasti32x4 {{.*#+}} zmm8 = [26,5,12,19,26,5,12,19,26,5,12,19,26,5,12,19]
3408 ; AVX512-FCP-NEXT: # zmm8 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
3409 ; AVX512-FCP-NEXT: vpermt2d %zmm7, %zmm8, %zmm9
3410 ; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm7 = [18,25,0,7,14,0,0,0]
3411 ; AVX512-FCP-NEXT: vpermi2d %zmm0, %zmm2, %zmm7
3412 ; AVX512-FCP-NEXT: vmovdqa32 %zmm9, %zmm7 {%k2}
3413 ; AVX512-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm9 = [0,1,8,15,22,29,0,0,0,1,8,15,22,29,0,0]
3414 ; AVX512-FCP-NEXT: # zmm9 = mem[0,1,2,3,0,1,2,3]
3415 ; AVX512-FCP-NEXT: vpermi2d %zmm5, %zmm4, %zmm9
3416 ; AVX512-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm15 = [0,9,10,11,12,13,20,27,0,9,10,11,12,13,20,27]
3417 ; AVX512-FCP-NEXT: # zmm15 = mem[0,1,2,3,0,1,2,3]
3418 ; AVX512-FCP-NEXT: vpermi2d %zmm1, %zmm9, %zmm15
3419 ; AVX512-FCP-NEXT: vmovdqa32 %zmm15, %zmm7 {%k1}
3420 ; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm9 = [19,26,1,8,15,0,0,0]
3421 ; AVX512-FCP-NEXT: vpermi2d %zmm0, %zmm2, %zmm9
3422 ; AVX512-FCP-NEXT: vmovdqa32 %zmm10, %zmm9 {%k2}
3423 ; AVX512-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm10 = [0,18,25,0,7,14,0,0,0,18,25,0,7,14,0,0]
3424 ; AVX512-FCP-NEXT: # zmm10 = mem[0,1,2,3,0,1,2,3]
3425 ; AVX512-FCP-NEXT: vpermi2d %zmm4, %zmm5, %zmm10
3426 ; AVX512-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm15 = [0,9,10,11,12,13,21,28,0,9,10,11,12,13,21,28]
3427 ; AVX512-FCP-NEXT: # zmm15 = mem[0,1,2,3,0,1,2,3]
3428 ; AVX512-FCP-NEXT: vpermi2d %zmm1, %zmm10, %zmm15
3429 ; AVX512-FCP-NEXT: vmovdqa32 %zmm15, %zmm9 {%k1}
3430 ; AVX512-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm10 = [0,19,26,1,8,15,0,0,0,19,26,1,8,15,0,0]
3431 ; AVX512-FCP-NEXT: # zmm10 = mem[0,1,2,3,0,1,2,3]
3432 ; AVX512-FCP-NEXT: vpermi2d %zmm4, %zmm5, %zmm10
3433 ; AVX512-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm15 = [0,9,10,11,12,13,22,29,0,9,10,11,12,13,22,29]
3434 ; AVX512-FCP-NEXT: # zmm15 = mem[0,1,2,3,0,1,2,3]
3435 ; AVX512-FCP-NEXT: vpermi2d %zmm1, %zmm10, %zmm15
3436 ; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} xmm10 = [4,11,18,25]
3437 ; AVX512-FCP-NEXT: vpermi2d %zmm2, %zmm0, %zmm10
3438 ; AVX512-FCP-NEXT: vinserti32x4 $0, %xmm10, %zmm11, %zmm10
3439 ; AVX512-FCP-NEXT: vmovdqa32 %zmm15, %zmm10 {%k1}
3440 ; AVX512-FCP-NEXT: vpermi2d %zmm5, %zmm4, %zmm6
3441 ; AVX512-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm11 = [0,9,10,11,12,16,23,30,0,9,10,11,12,16,23,30]
3442 ; AVX512-FCP-NEXT: # zmm11 = mem[0,1,2,3,0,1,2,3]
3443 ; AVX512-FCP-NEXT: vpermi2d %zmm1, %zmm6, %zmm11
3444 ; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} xmm6 = [5,12,19,26]
3445 ; AVX512-FCP-NEXT: vpermi2d %zmm2, %zmm0, %zmm6
3446 ; AVX512-FCP-NEXT: vinserti32x4 $0, %xmm6, %zmm12, %zmm6
3447 ; AVX512-FCP-NEXT: vmovdqa32 %zmm11, %zmm6 {%k1}
3448 ; AVX512-FCP-NEXT: vpermt2d %zmm5, %zmm8, %zmm4
3449 ; AVX512-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm5 = [0,9,10,11,12,17,24,31,0,9,10,11,12,17,24,31]
3450 ; AVX512-FCP-NEXT: # zmm5 = mem[0,1,2,3,0,1,2,3]
3451 ; AVX512-FCP-NEXT: vpermi2d %zmm1, %zmm4, %zmm5
3452 ; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} xmm1 = [6,13,20,27]
3453 ; AVX512-FCP-NEXT: vpermi2d %zmm2, %zmm0, %zmm1
3454 ; AVX512-FCP-NEXT: vinserti32x4 $0, %xmm1, %zmm13, %zmm0
3455 ; AVX512-FCP-NEXT: vmovdqa32 %zmm5, %zmm0 {%k1}
3456 ; AVX512-FCP-NEXT: vmovdqa64 %zmm3, (%rsi)
3457 ; AVX512-FCP-NEXT: vmovdqa64 %zmm14, (%rdx)
3458 ; AVX512-FCP-NEXT: vmovdqa64 %zmm7, (%rcx)
3459 ; AVX512-FCP-NEXT: vmovdqa64 %zmm9, (%r8)
3460 ; AVX512-FCP-NEXT: vmovdqa64 %zmm10, (%r9)
3461 ; AVX512-FCP-NEXT: vmovdqa64 %zmm6, (%r10)
3462 ; AVX512-FCP-NEXT: vmovdqa64 %zmm0, (%rax)
3463 ; AVX512-FCP-NEXT: vzeroupper
3464 ; AVX512-FCP-NEXT: retq
3466 ; AVX512DQ-LABEL: load_i32_stride7_vf16:
3467 ; AVX512DQ: # %bb.0:
3468 ; AVX512DQ-NEXT: movq {{[0-9]+}}(%rsp), %rax
3469 ; AVX512DQ-NEXT: movq {{[0-9]+}}(%rsp), %r10
3470 ; AVX512DQ-NEXT: vmovdqa64 384(%rdi), %zmm1
3471 ; AVX512DQ-NEXT: vmovdqa64 320(%rdi), %zmm5
3472 ; AVX512DQ-NEXT: vmovdqa64 256(%rdi), %zmm4
3473 ; AVX512DQ-NEXT: vmovdqa64 (%rdi), %zmm0
3474 ; AVX512DQ-NEXT: vmovdqa64 64(%rdi), %zmm2
3475 ; AVX512DQ-NEXT: vmovdqa64 128(%rdi), %zmm8
3476 ; AVX512DQ-NEXT: vmovdqa64 192(%rdi), %zmm7
3477 ; AVX512DQ-NEXT: vbroadcasti64x4 {{.*#+}} zmm6 = [8,15,0,0,0,19,26,1,8,15,0,0,0,19,26,1]
3478 ; AVX512DQ-NEXT: # zmm6 = mem[0,1,2,3,0,1,2,3]
3479 ; AVX512DQ-NEXT: vpermi2d %zmm8, %zmm7, %zmm6
3480 ; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} ymm3 = [0,7,14,21,28,0,0,0]
3481 ; AVX512DQ-NEXT: vpermi2d %zmm2, %zmm0, %zmm3
3482 ; AVX512DQ-NEXT: movw $992, %di # imm = 0x3E0
3483 ; AVX512DQ-NEXT: kmovw %edi, %k1
3484 ; AVX512DQ-NEXT: vmovdqa32 %zmm6, %zmm3 {%k1}
3485 ; AVX512DQ-NEXT: vbroadcasti32x4 {{.*#+}} zmm6 = [20,27,6,13,20,27,6,13,20,27,6,13,20,27,6,13]
3486 ; AVX512DQ-NEXT: # zmm6 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
3487 ; AVX512DQ-NEXT: vpermi2d %zmm5, %zmm4, %zmm6
3488 ; AVX512DQ-NEXT: vbroadcasti64x4 {{.*#+}} zmm9 = [0,0,10,11,12,13,18,25,0,0,10,11,12,13,18,25]
3489 ; AVX512DQ-NEXT: # zmm9 = mem[0,1,2,3,0,1,2,3]
3490 ; AVX512DQ-NEXT: vpermi2d %zmm1, %zmm6, %zmm9
3491 ; AVX512DQ-NEXT: movb $-32, %dil
3492 ; AVX512DQ-NEXT: kmovw %edi, %k1
3493 ; AVX512DQ-NEXT: vmovdqa64 %zmm9, %zmm3 {%k1}
3494 ; AVX512DQ-NEXT: vbroadcasti32x4 {{.*#+}} zmm6 = [25,4,11,18,25,4,11,18,25,4,11,18,25,4,11,18]
3495 ; AVX512DQ-NEXT: # zmm6 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
3496 ; AVX512DQ-NEXT: vmovdqa64 %zmm8, %zmm9
3497 ; AVX512DQ-NEXT: vbroadcasti32x4 {{.*#+}} zmm10 = [27,6,13,20,27,6,13,20,27,6,13,20,27,6,13,20]
3498 ; AVX512DQ-NEXT: # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
3499 ; AVX512DQ-NEXT: vpermi2d %zmm7, %zmm8, %zmm10
3500 ; AVX512DQ-NEXT: vbroadcasti64x4 {{.*#+}} zmm11 = [28,0,0,0,0,7,14,21,28,0,0,0,0,7,14,21]
3501 ; AVX512DQ-NEXT: # zmm11 = mem[0,1,2,3,0,1,2,3]
3502 ; AVX512DQ-NEXT: vpermi2d %zmm7, %zmm8, %zmm11
3503 ; AVX512DQ-NEXT: vbroadcasti64x4 {{.*#+}} zmm12 = [29,0,0,0,1,8,15,22,29,0,0,0,1,8,15,22]
3504 ; AVX512DQ-NEXT: # zmm12 = mem[0,1,2,3,0,1,2,3]
3505 ; AVX512DQ-NEXT: vpermi2d %zmm7, %zmm8, %zmm12
3506 ; AVX512DQ-NEXT: vbroadcasti64x4 {{.*#+}} zmm13 = [14,0,0,0,18,25,0,7,14,0,0,0,18,25,0,7]
3507 ; AVX512DQ-NEXT: # zmm13 = mem[0,1,2,3,0,1,2,3]
3508 ; AVX512DQ-NEXT: vpermi2d %zmm8, %zmm7, %zmm13
3509 ; AVX512DQ-NEXT: vpermt2d %zmm7, %zmm6, %zmm8
3510 ; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} ymm14 = [1,8,15,22,29,0,0,0]
3511 ; AVX512DQ-NEXT: vpermi2d %zmm2, %zmm0, %zmm14
3512 ; AVX512DQ-NEXT: movw $480, %di # imm = 0x1E0
3513 ; AVX512DQ-NEXT: kmovw %edi, %k2
3514 ; AVX512DQ-NEXT: vmovdqa32 %zmm8, %zmm14 {%k2}
3515 ; AVX512DQ-NEXT: vbroadcasti64x4 {{.*#+}} zmm8 = [0,0,7,14,21,28,0,0,0,0,7,14,21,28,0,0]
3516 ; AVX512DQ-NEXT: # zmm8 = mem[0,1,2,3,0,1,2,3]
3517 ; AVX512DQ-NEXT: vpermi2d %zmm5, %zmm4, %zmm8
3518 ; AVX512DQ-NEXT: vbroadcasti64x4 {{.*#+}} zmm15 = [0,9,10,11,12,13,19,26,0,9,10,11,12,13,19,26]
3519 ; AVX512DQ-NEXT: # zmm15 = mem[0,1,2,3,0,1,2,3]
3520 ; AVX512DQ-NEXT: vpermi2d %zmm1, %zmm8, %zmm15
3521 ; AVX512DQ-NEXT: movw $-512, %di # imm = 0xFE00
3522 ; AVX512DQ-NEXT: kmovw %edi, %k1
3523 ; AVX512DQ-NEXT: vmovdqa32 %zmm15, %zmm14 {%k1}
3524 ; AVX512DQ-NEXT: vbroadcasti32x4 {{.*#+}} zmm8 = [26,5,12,19,26,5,12,19,26,5,12,19,26,5,12,19]
3525 ; AVX512DQ-NEXT: # zmm8 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
3526 ; AVX512DQ-NEXT: vpermt2d %zmm7, %zmm8, %zmm9
3527 ; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} ymm7 = [18,25,0,7,14,0,0,0]
3528 ; AVX512DQ-NEXT: vpermi2d %zmm0, %zmm2, %zmm7
3529 ; AVX512DQ-NEXT: vmovdqa32 %zmm9, %zmm7 {%k2}
3530 ; AVX512DQ-NEXT: vbroadcasti64x4 {{.*#+}} zmm9 = [0,1,8,15,22,29,0,0,0,1,8,15,22,29,0,0]
3531 ; AVX512DQ-NEXT: # zmm9 = mem[0,1,2,3,0,1,2,3]
3532 ; AVX512DQ-NEXT: vpermi2d %zmm5, %zmm4, %zmm9
3533 ; AVX512DQ-NEXT: vbroadcasti64x4 {{.*#+}} zmm15 = [0,9,10,11,12,13,20,27,0,9,10,11,12,13,20,27]
3534 ; AVX512DQ-NEXT: # zmm15 = mem[0,1,2,3,0,1,2,3]
3535 ; AVX512DQ-NEXT: vpermi2d %zmm1, %zmm9, %zmm15
3536 ; AVX512DQ-NEXT: vmovdqa32 %zmm15, %zmm7 {%k1}
3537 ; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} ymm9 = [19,26,1,8,15,0,0,0]
3538 ; AVX512DQ-NEXT: vpermi2d %zmm0, %zmm2, %zmm9
3539 ; AVX512DQ-NEXT: vmovdqa32 %zmm10, %zmm9 {%k2}
3540 ; AVX512DQ-NEXT: vbroadcasti64x4 {{.*#+}} zmm10 = [0,18,25,0,7,14,0,0,0,18,25,0,7,14,0,0]
3541 ; AVX512DQ-NEXT: # zmm10 = mem[0,1,2,3,0,1,2,3]
3542 ; AVX512DQ-NEXT: vpermi2d %zmm4, %zmm5, %zmm10
3543 ; AVX512DQ-NEXT: vbroadcasti64x4 {{.*#+}} zmm15 = [0,9,10,11,12,13,21,28,0,9,10,11,12,13,21,28]
3544 ; AVX512DQ-NEXT: # zmm15 = mem[0,1,2,3,0,1,2,3]
3545 ; AVX512DQ-NEXT: vpermi2d %zmm1, %zmm10, %zmm15
3546 ; AVX512DQ-NEXT: vmovdqa32 %zmm15, %zmm9 {%k1}
3547 ; AVX512DQ-NEXT: vbroadcasti64x4 {{.*#+}} zmm10 = [0,19,26,1,8,15,0,0,0,19,26,1,8,15,0,0]
3548 ; AVX512DQ-NEXT: # zmm10 = mem[0,1,2,3,0,1,2,3]
3549 ; AVX512DQ-NEXT: vpermi2d %zmm4, %zmm5, %zmm10
3550 ; AVX512DQ-NEXT: vbroadcasti64x4 {{.*#+}} zmm15 = [0,9,10,11,12,13,22,29,0,9,10,11,12,13,22,29]
3551 ; AVX512DQ-NEXT: # zmm15 = mem[0,1,2,3,0,1,2,3]
3552 ; AVX512DQ-NEXT: vpermi2d %zmm1, %zmm10, %zmm15
3553 ; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} xmm10 = [4,11,18,25]
3554 ; AVX512DQ-NEXT: vpermi2d %zmm2, %zmm0, %zmm10
3555 ; AVX512DQ-NEXT: vinserti32x4 $0, %xmm10, %zmm11, %zmm10
3556 ; AVX512DQ-NEXT: vmovdqa32 %zmm15, %zmm10 {%k1}
3557 ; AVX512DQ-NEXT: vpermi2d %zmm5, %zmm4, %zmm6
3558 ; AVX512DQ-NEXT: vbroadcasti64x4 {{.*#+}} zmm11 = [0,9,10,11,12,16,23,30,0,9,10,11,12,16,23,30]
3559 ; AVX512DQ-NEXT: # zmm11 = mem[0,1,2,3,0,1,2,3]
3560 ; AVX512DQ-NEXT: vpermi2d %zmm1, %zmm6, %zmm11
3561 ; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} xmm6 = [5,12,19,26]
3562 ; AVX512DQ-NEXT: vpermi2d %zmm2, %zmm0, %zmm6
3563 ; AVX512DQ-NEXT: vinserti32x4 $0, %xmm6, %zmm12, %zmm6
3564 ; AVX512DQ-NEXT: vmovdqa32 %zmm11, %zmm6 {%k1}
3565 ; AVX512DQ-NEXT: vpermt2d %zmm5, %zmm8, %zmm4
3566 ; AVX512DQ-NEXT: vbroadcasti64x4 {{.*#+}} zmm5 = [0,9,10,11,12,17,24,31,0,9,10,11,12,17,24,31]
3567 ; AVX512DQ-NEXT: # zmm5 = mem[0,1,2,3,0,1,2,3]
3568 ; AVX512DQ-NEXT: vpermi2d %zmm1, %zmm4, %zmm5
3569 ; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} xmm1 = [6,13,20,27]
3570 ; AVX512DQ-NEXT: vpermi2d %zmm2, %zmm0, %zmm1
3571 ; AVX512DQ-NEXT: vinserti32x4 $0, %xmm1, %zmm13, %zmm0
3572 ; AVX512DQ-NEXT: vmovdqa32 %zmm5, %zmm0 {%k1}
3573 ; AVX512DQ-NEXT: vmovdqa64 %zmm3, (%rsi)
3574 ; AVX512DQ-NEXT: vmovdqa64 %zmm14, (%rdx)
3575 ; AVX512DQ-NEXT: vmovdqa64 %zmm7, (%rcx)
3576 ; AVX512DQ-NEXT: vmovdqa64 %zmm9, (%r8)
3577 ; AVX512DQ-NEXT: vmovdqa64 %zmm10, (%r9)
3578 ; AVX512DQ-NEXT: vmovdqa64 %zmm6, (%r10)
3579 ; AVX512DQ-NEXT: vmovdqa64 %zmm0, (%rax)
3580 ; AVX512DQ-NEXT: vzeroupper
3581 ; AVX512DQ-NEXT: retq
3583 ; AVX512DQ-FCP-LABEL: load_i32_stride7_vf16:
3584 ; AVX512DQ-FCP: # %bb.0:
3585 ; AVX512DQ-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
3586 ; AVX512DQ-FCP-NEXT: movq {{[0-9]+}}(%rsp), %r10
3587 ; AVX512DQ-FCP-NEXT: vmovdqa64 384(%rdi), %zmm1
3588 ; AVX512DQ-FCP-NEXT: vmovdqa64 320(%rdi), %zmm5
3589 ; AVX512DQ-FCP-NEXT: vmovdqa64 256(%rdi), %zmm4
3590 ; AVX512DQ-FCP-NEXT: vmovdqa64 (%rdi), %zmm0
3591 ; AVX512DQ-FCP-NEXT: vmovdqa64 64(%rdi), %zmm2
3592 ; AVX512DQ-FCP-NEXT: vmovdqa64 128(%rdi), %zmm8
3593 ; AVX512DQ-FCP-NEXT: vmovdqa64 192(%rdi), %zmm7
3594 ; AVX512DQ-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm6 = [8,15,0,0,0,19,26,1,8,15,0,0,0,19,26,1]
3595 ; AVX512DQ-FCP-NEXT: # zmm6 = mem[0,1,2,3,0,1,2,3]
3596 ; AVX512DQ-FCP-NEXT: vpermi2d %zmm8, %zmm7, %zmm6
3597 ; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm3 = [0,7,14,21,28,0,0,0]
3598 ; AVX512DQ-FCP-NEXT: vpermi2d %zmm2, %zmm0, %zmm3
3599 ; AVX512DQ-FCP-NEXT: movw $992, %di # imm = 0x3E0
3600 ; AVX512DQ-FCP-NEXT: kmovw %edi, %k1
3601 ; AVX512DQ-FCP-NEXT: vmovdqa32 %zmm6, %zmm3 {%k1}
3602 ; AVX512DQ-FCP-NEXT: vbroadcasti32x4 {{.*#+}} zmm6 = [20,27,6,13,20,27,6,13,20,27,6,13,20,27,6,13]
3603 ; AVX512DQ-FCP-NEXT: # zmm6 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
3604 ; AVX512DQ-FCP-NEXT: vpermi2d %zmm5, %zmm4, %zmm6
3605 ; AVX512DQ-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm9 = [0,0,10,11,12,13,18,25,0,0,10,11,12,13,18,25]
3606 ; AVX512DQ-FCP-NEXT: # zmm9 = mem[0,1,2,3,0,1,2,3]
3607 ; AVX512DQ-FCP-NEXT: vpermi2d %zmm1, %zmm6, %zmm9
3608 ; AVX512DQ-FCP-NEXT: movb $-32, %dil
3609 ; AVX512DQ-FCP-NEXT: kmovw %edi, %k1
3610 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm9, %zmm3 {%k1}
3611 ; AVX512DQ-FCP-NEXT: vbroadcasti32x4 {{.*#+}} zmm6 = [25,4,11,18,25,4,11,18,25,4,11,18,25,4,11,18]
3612 ; AVX512DQ-FCP-NEXT: # zmm6 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
3613 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm8, %zmm9
3614 ; AVX512DQ-FCP-NEXT: vbroadcasti32x4 {{.*#+}} zmm10 = [27,6,13,20,27,6,13,20,27,6,13,20,27,6,13,20]
3615 ; AVX512DQ-FCP-NEXT: # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
3616 ; AVX512DQ-FCP-NEXT: vpermi2d %zmm7, %zmm8, %zmm10
3617 ; AVX512DQ-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm11 = [28,0,0,0,0,7,14,21,28,0,0,0,0,7,14,21]
3618 ; AVX512DQ-FCP-NEXT: # zmm11 = mem[0,1,2,3,0,1,2,3]
3619 ; AVX512DQ-FCP-NEXT: vpermi2d %zmm7, %zmm8, %zmm11
3620 ; AVX512DQ-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm12 = [29,0,0,0,1,8,15,22,29,0,0,0,1,8,15,22]
3621 ; AVX512DQ-FCP-NEXT: # zmm12 = mem[0,1,2,3,0,1,2,3]
3622 ; AVX512DQ-FCP-NEXT: vpermi2d %zmm7, %zmm8, %zmm12
3623 ; AVX512DQ-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm13 = [14,0,0,0,18,25,0,7,14,0,0,0,18,25,0,7]
3624 ; AVX512DQ-FCP-NEXT: # zmm13 = mem[0,1,2,3,0,1,2,3]
3625 ; AVX512DQ-FCP-NEXT: vpermi2d %zmm8, %zmm7, %zmm13
3626 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm7, %zmm6, %zmm8
3627 ; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm14 = [1,8,15,22,29,0,0,0]
3628 ; AVX512DQ-FCP-NEXT: vpermi2d %zmm2, %zmm0, %zmm14
3629 ; AVX512DQ-FCP-NEXT: movw $480, %di # imm = 0x1E0
3630 ; AVX512DQ-FCP-NEXT: kmovw %edi, %k2
3631 ; AVX512DQ-FCP-NEXT: vmovdqa32 %zmm8, %zmm14 {%k2}
3632 ; AVX512DQ-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm8 = [0,0,7,14,21,28,0,0,0,0,7,14,21,28,0,0]
3633 ; AVX512DQ-FCP-NEXT: # zmm8 = mem[0,1,2,3,0,1,2,3]
3634 ; AVX512DQ-FCP-NEXT: vpermi2d %zmm5, %zmm4, %zmm8
3635 ; AVX512DQ-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm15 = [0,9,10,11,12,13,19,26,0,9,10,11,12,13,19,26]
3636 ; AVX512DQ-FCP-NEXT: # zmm15 = mem[0,1,2,3,0,1,2,3]
3637 ; AVX512DQ-FCP-NEXT: vpermi2d %zmm1, %zmm8, %zmm15
3638 ; AVX512DQ-FCP-NEXT: movw $-512, %di # imm = 0xFE00
3639 ; AVX512DQ-FCP-NEXT: kmovw %edi, %k1
3640 ; AVX512DQ-FCP-NEXT: vmovdqa32 %zmm15, %zmm14 {%k1}
3641 ; AVX512DQ-FCP-NEXT: vbroadcasti32x4 {{.*#+}} zmm8 = [26,5,12,19,26,5,12,19,26,5,12,19,26,5,12,19]
3642 ; AVX512DQ-FCP-NEXT: # zmm8 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
3643 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm7, %zmm8, %zmm9
3644 ; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm7 = [18,25,0,7,14,0,0,0]
3645 ; AVX512DQ-FCP-NEXT: vpermi2d %zmm0, %zmm2, %zmm7
3646 ; AVX512DQ-FCP-NEXT: vmovdqa32 %zmm9, %zmm7 {%k2}
3647 ; AVX512DQ-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm9 = [0,1,8,15,22,29,0,0,0,1,8,15,22,29,0,0]
3648 ; AVX512DQ-FCP-NEXT: # zmm9 = mem[0,1,2,3,0,1,2,3]
3649 ; AVX512DQ-FCP-NEXT: vpermi2d %zmm5, %zmm4, %zmm9
3650 ; AVX512DQ-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm15 = [0,9,10,11,12,13,20,27,0,9,10,11,12,13,20,27]
3651 ; AVX512DQ-FCP-NEXT: # zmm15 = mem[0,1,2,3,0,1,2,3]
3652 ; AVX512DQ-FCP-NEXT: vpermi2d %zmm1, %zmm9, %zmm15
3653 ; AVX512DQ-FCP-NEXT: vmovdqa32 %zmm15, %zmm7 {%k1}
3654 ; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm9 = [19,26,1,8,15,0,0,0]
3655 ; AVX512DQ-FCP-NEXT: vpermi2d %zmm0, %zmm2, %zmm9
3656 ; AVX512DQ-FCP-NEXT: vmovdqa32 %zmm10, %zmm9 {%k2}
3657 ; AVX512DQ-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm10 = [0,18,25,0,7,14,0,0,0,18,25,0,7,14,0,0]
3658 ; AVX512DQ-FCP-NEXT: # zmm10 = mem[0,1,2,3,0,1,2,3]
3659 ; AVX512DQ-FCP-NEXT: vpermi2d %zmm4, %zmm5, %zmm10
3660 ; AVX512DQ-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm15 = [0,9,10,11,12,13,21,28,0,9,10,11,12,13,21,28]
3661 ; AVX512DQ-FCP-NEXT: # zmm15 = mem[0,1,2,3,0,1,2,3]
3662 ; AVX512DQ-FCP-NEXT: vpermi2d %zmm1, %zmm10, %zmm15
3663 ; AVX512DQ-FCP-NEXT: vmovdqa32 %zmm15, %zmm9 {%k1}
3664 ; AVX512DQ-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm10 = [0,19,26,1,8,15,0,0,0,19,26,1,8,15,0,0]
3665 ; AVX512DQ-FCP-NEXT: # zmm10 = mem[0,1,2,3,0,1,2,3]
3666 ; AVX512DQ-FCP-NEXT: vpermi2d %zmm4, %zmm5, %zmm10
3667 ; AVX512DQ-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm15 = [0,9,10,11,12,13,22,29,0,9,10,11,12,13,22,29]
3668 ; AVX512DQ-FCP-NEXT: # zmm15 = mem[0,1,2,3,0,1,2,3]
3669 ; AVX512DQ-FCP-NEXT: vpermi2d %zmm1, %zmm10, %zmm15
3670 ; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} xmm10 = [4,11,18,25]
3671 ; AVX512DQ-FCP-NEXT: vpermi2d %zmm2, %zmm0, %zmm10
3672 ; AVX512DQ-FCP-NEXT: vinserti32x4 $0, %xmm10, %zmm11, %zmm10
3673 ; AVX512DQ-FCP-NEXT: vmovdqa32 %zmm15, %zmm10 {%k1}
3674 ; AVX512DQ-FCP-NEXT: vpermi2d %zmm5, %zmm4, %zmm6
3675 ; AVX512DQ-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm11 = [0,9,10,11,12,16,23,30,0,9,10,11,12,16,23,30]
3676 ; AVX512DQ-FCP-NEXT: # zmm11 = mem[0,1,2,3,0,1,2,3]
3677 ; AVX512DQ-FCP-NEXT: vpermi2d %zmm1, %zmm6, %zmm11
3678 ; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} xmm6 = [5,12,19,26]
3679 ; AVX512DQ-FCP-NEXT: vpermi2d %zmm2, %zmm0, %zmm6
3680 ; AVX512DQ-FCP-NEXT: vinserti32x4 $0, %xmm6, %zmm12, %zmm6
3681 ; AVX512DQ-FCP-NEXT: vmovdqa32 %zmm11, %zmm6 {%k1}
3682 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm5, %zmm8, %zmm4
3683 ; AVX512DQ-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm5 = [0,9,10,11,12,17,24,31,0,9,10,11,12,17,24,31]
3684 ; AVX512DQ-FCP-NEXT: # zmm5 = mem[0,1,2,3,0,1,2,3]
3685 ; AVX512DQ-FCP-NEXT: vpermi2d %zmm1, %zmm4, %zmm5
3686 ; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} xmm1 = [6,13,20,27]
3687 ; AVX512DQ-FCP-NEXT: vpermi2d %zmm2, %zmm0, %zmm1
3688 ; AVX512DQ-FCP-NEXT: vinserti32x4 $0, %xmm1, %zmm13, %zmm0
3689 ; AVX512DQ-FCP-NEXT: vmovdqa32 %zmm5, %zmm0 {%k1}
3690 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm3, (%rsi)
3691 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm14, (%rdx)
3692 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm7, (%rcx)
3693 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm9, (%r8)
3694 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm10, (%r9)
3695 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm6, (%r10)
3696 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm0, (%rax)
3697 ; AVX512DQ-FCP-NEXT: vzeroupper
3698 ; AVX512DQ-FCP-NEXT: retq
3700 ; AVX512BW-LABEL: load_i32_stride7_vf16:
3701 ; AVX512BW: # %bb.0:
3702 ; AVX512BW-NEXT: movq {{[0-9]+}}(%rsp), %rax
3703 ; AVX512BW-NEXT: movq {{[0-9]+}}(%rsp), %r10
3704 ; AVX512BW-NEXT: vmovdqa64 384(%rdi), %zmm1
3705 ; AVX512BW-NEXT: vmovdqa64 320(%rdi), %zmm5
3706 ; AVX512BW-NEXT: vmovdqa64 256(%rdi), %zmm4
3707 ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0
3708 ; AVX512BW-NEXT: vmovdqa64 64(%rdi), %zmm2
3709 ; AVX512BW-NEXT: vmovdqa64 128(%rdi), %zmm8
3710 ; AVX512BW-NEXT: vmovdqa64 192(%rdi), %zmm7
3711 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm6 = [8,15,0,0,0,19,26,1,8,15,0,0,0,19,26,1]
3712 ; AVX512BW-NEXT: # zmm6 = mem[0,1,2,3,0,1,2,3]
3713 ; AVX512BW-NEXT: vpermi2d %zmm8, %zmm7, %zmm6
3714 ; AVX512BW-NEXT: vpmovsxbd {{.*#+}} ymm3 = [0,7,14,21,28,0,0,0]
3715 ; AVX512BW-NEXT: vpermi2d %zmm2, %zmm0, %zmm3
3716 ; AVX512BW-NEXT: movw $992, %di # imm = 0x3E0
3717 ; AVX512BW-NEXT: kmovd %edi, %k1
3718 ; AVX512BW-NEXT: vmovdqa32 %zmm6, %zmm3 {%k1}
3719 ; AVX512BW-NEXT: vbroadcasti32x4 {{.*#+}} zmm6 = [20,27,6,13,20,27,6,13,20,27,6,13,20,27,6,13]
3720 ; AVX512BW-NEXT: # zmm6 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
3721 ; AVX512BW-NEXT: vpermi2d %zmm5, %zmm4, %zmm6
3722 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm9 = [0,0,10,11,12,13,18,25,0,0,10,11,12,13,18,25]
3723 ; AVX512BW-NEXT: # zmm9 = mem[0,1,2,3,0,1,2,3]
3724 ; AVX512BW-NEXT: vpermi2d %zmm1, %zmm6, %zmm9
3725 ; AVX512BW-NEXT: movb $-32, %dil
3726 ; AVX512BW-NEXT: kmovd %edi, %k1
3727 ; AVX512BW-NEXT: vmovdqa64 %zmm9, %zmm3 {%k1}
3728 ; AVX512BW-NEXT: vbroadcasti32x4 {{.*#+}} zmm6 = [25,4,11,18,25,4,11,18,25,4,11,18,25,4,11,18]
3729 ; AVX512BW-NEXT: # zmm6 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
3730 ; AVX512BW-NEXT: vmovdqa64 %zmm8, %zmm9
3731 ; AVX512BW-NEXT: vbroadcasti32x4 {{.*#+}} zmm10 = [27,6,13,20,27,6,13,20,27,6,13,20,27,6,13,20]
3732 ; AVX512BW-NEXT: # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
3733 ; AVX512BW-NEXT: vpermi2d %zmm7, %zmm8, %zmm10
3734 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm11 = [28,0,0,0,0,7,14,21,28,0,0,0,0,7,14,21]
3735 ; AVX512BW-NEXT: # zmm11 = mem[0,1,2,3,0,1,2,3]
3736 ; AVX512BW-NEXT: vpermi2d %zmm7, %zmm8, %zmm11
3737 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm12 = [29,0,0,0,1,8,15,22,29,0,0,0,1,8,15,22]
3738 ; AVX512BW-NEXT: # zmm12 = mem[0,1,2,3,0,1,2,3]
3739 ; AVX512BW-NEXT: vpermi2d %zmm7, %zmm8, %zmm12
3740 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm13 = [14,0,0,0,18,25,0,7,14,0,0,0,18,25,0,7]
3741 ; AVX512BW-NEXT: # zmm13 = mem[0,1,2,3,0,1,2,3]
3742 ; AVX512BW-NEXT: vpermi2d %zmm8, %zmm7, %zmm13
3743 ; AVX512BW-NEXT: vpermt2d %zmm7, %zmm6, %zmm8
3744 ; AVX512BW-NEXT: vpmovsxbd {{.*#+}} ymm14 = [1,8,15,22,29,0,0,0]
3745 ; AVX512BW-NEXT: vpermi2d %zmm2, %zmm0, %zmm14
3746 ; AVX512BW-NEXT: movw $480, %di # imm = 0x1E0
3747 ; AVX512BW-NEXT: kmovd %edi, %k2
3748 ; AVX512BW-NEXT: vmovdqa32 %zmm8, %zmm14 {%k2}
3749 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm8 = [0,0,7,14,21,28,0,0,0,0,7,14,21,28,0,0]
3750 ; AVX512BW-NEXT: # zmm8 = mem[0,1,2,3,0,1,2,3]
3751 ; AVX512BW-NEXT: vpermi2d %zmm5, %zmm4, %zmm8
3752 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm15 = [0,9,10,11,12,13,19,26,0,9,10,11,12,13,19,26]
3753 ; AVX512BW-NEXT: # zmm15 = mem[0,1,2,3,0,1,2,3]
3754 ; AVX512BW-NEXT: vpermi2d %zmm1, %zmm8, %zmm15
3755 ; AVX512BW-NEXT: movw $-512, %di # imm = 0xFE00
3756 ; AVX512BW-NEXT: kmovd %edi, %k1
3757 ; AVX512BW-NEXT: vmovdqa32 %zmm15, %zmm14 {%k1}
3758 ; AVX512BW-NEXT: vbroadcasti32x4 {{.*#+}} zmm8 = [26,5,12,19,26,5,12,19,26,5,12,19,26,5,12,19]
3759 ; AVX512BW-NEXT: # zmm8 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
3760 ; AVX512BW-NEXT: vpermt2d %zmm7, %zmm8, %zmm9
3761 ; AVX512BW-NEXT: vpmovsxbd {{.*#+}} ymm7 = [18,25,0,7,14,0,0,0]
3762 ; AVX512BW-NEXT: vpermi2d %zmm0, %zmm2, %zmm7
3763 ; AVX512BW-NEXT: vmovdqa32 %zmm9, %zmm7 {%k2}
3764 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm9 = [0,1,8,15,22,29,0,0,0,1,8,15,22,29,0,0]
3765 ; AVX512BW-NEXT: # zmm9 = mem[0,1,2,3,0,1,2,3]
3766 ; AVX512BW-NEXT: vpermi2d %zmm5, %zmm4, %zmm9
3767 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm15 = [0,9,10,11,12,13,20,27,0,9,10,11,12,13,20,27]
3768 ; AVX512BW-NEXT: # zmm15 = mem[0,1,2,3,0,1,2,3]
3769 ; AVX512BW-NEXT: vpermi2d %zmm1, %zmm9, %zmm15
3770 ; AVX512BW-NEXT: vmovdqa32 %zmm15, %zmm7 {%k1}
3771 ; AVX512BW-NEXT: vpmovsxbd {{.*#+}} ymm9 = [19,26,1,8,15,0,0,0]
3772 ; AVX512BW-NEXT: vpermi2d %zmm0, %zmm2, %zmm9
3773 ; AVX512BW-NEXT: vmovdqa32 %zmm10, %zmm9 {%k2}
3774 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm10 = [0,18,25,0,7,14,0,0,0,18,25,0,7,14,0,0]
3775 ; AVX512BW-NEXT: # zmm10 = mem[0,1,2,3,0,1,2,3]
3776 ; AVX512BW-NEXT: vpermi2d %zmm4, %zmm5, %zmm10
3777 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm15 = [0,9,10,11,12,13,21,28,0,9,10,11,12,13,21,28]
3778 ; AVX512BW-NEXT: # zmm15 = mem[0,1,2,3,0,1,2,3]
3779 ; AVX512BW-NEXT: vpermi2d %zmm1, %zmm10, %zmm15
3780 ; AVX512BW-NEXT: vmovdqa32 %zmm15, %zmm9 {%k1}
3781 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm10 = [0,19,26,1,8,15,0,0,0,19,26,1,8,15,0,0]
3782 ; AVX512BW-NEXT: # zmm10 = mem[0,1,2,3,0,1,2,3]
3783 ; AVX512BW-NEXT: vpermi2d %zmm4, %zmm5, %zmm10
3784 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm15 = [0,9,10,11,12,13,22,29,0,9,10,11,12,13,22,29]
3785 ; AVX512BW-NEXT: # zmm15 = mem[0,1,2,3,0,1,2,3]
3786 ; AVX512BW-NEXT: vpermi2d %zmm1, %zmm10, %zmm15
3787 ; AVX512BW-NEXT: vpmovsxbd {{.*#+}} xmm10 = [4,11,18,25]
3788 ; AVX512BW-NEXT: vpermi2d %zmm2, %zmm0, %zmm10
3789 ; AVX512BW-NEXT: vinserti32x4 $0, %xmm10, %zmm11, %zmm10
3790 ; AVX512BW-NEXT: vmovdqa32 %zmm15, %zmm10 {%k1}
3791 ; AVX512BW-NEXT: vpermi2d %zmm5, %zmm4, %zmm6
3792 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm11 = [0,9,10,11,12,16,23,30,0,9,10,11,12,16,23,30]
3793 ; AVX512BW-NEXT: # zmm11 = mem[0,1,2,3,0,1,2,3]
3794 ; AVX512BW-NEXT: vpermi2d %zmm1, %zmm6, %zmm11
3795 ; AVX512BW-NEXT: vpmovsxbd {{.*#+}} xmm6 = [5,12,19,26]
3796 ; AVX512BW-NEXT: vpermi2d %zmm2, %zmm0, %zmm6
3797 ; AVX512BW-NEXT: vinserti32x4 $0, %xmm6, %zmm12, %zmm6
3798 ; AVX512BW-NEXT: vmovdqa32 %zmm11, %zmm6 {%k1}
3799 ; AVX512BW-NEXT: vpermt2d %zmm5, %zmm8, %zmm4
3800 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm5 = [0,9,10,11,12,17,24,31,0,9,10,11,12,17,24,31]
3801 ; AVX512BW-NEXT: # zmm5 = mem[0,1,2,3,0,1,2,3]
3802 ; AVX512BW-NEXT: vpermi2d %zmm1, %zmm4, %zmm5
3803 ; AVX512BW-NEXT: vpmovsxbd {{.*#+}} xmm1 = [6,13,20,27]
3804 ; AVX512BW-NEXT: vpermi2d %zmm2, %zmm0, %zmm1
3805 ; AVX512BW-NEXT: vinserti32x4 $0, %xmm1, %zmm13, %zmm0
3806 ; AVX512BW-NEXT: vmovdqa32 %zmm5, %zmm0 {%k1}
3807 ; AVX512BW-NEXT: vmovdqa64 %zmm3, (%rsi)
3808 ; AVX512BW-NEXT: vmovdqa64 %zmm14, (%rdx)
3809 ; AVX512BW-NEXT: vmovdqa64 %zmm7, (%rcx)
3810 ; AVX512BW-NEXT: vmovdqa64 %zmm9, (%r8)
3811 ; AVX512BW-NEXT: vmovdqa64 %zmm10, (%r9)
3812 ; AVX512BW-NEXT: vmovdqa64 %zmm6, (%r10)
3813 ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rax)
3814 ; AVX512BW-NEXT: vzeroupper
3815 ; AVX512BW-NEXT: retq
3817 ; AVX512BW-FCP-LABEL: load_i32_stride7_vf16:
3818 ; AVX512BW-FCP: # %bb.0:
3819 ; AVX512BW-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
3820 ; AVX512BW-FCP-NEXT: movq {{[0-9]+}}(%rsp), %r10
3821 ; AVX512BW-FCP-NEXT: vmovdqa64 384(%rdi), %zmm1
3822 ; AVX512BW-FCP-NEXT: vmovdqa64 320(%rdi), %zmm5
3823 ; AVX512BW-FCP-NEXT: vmovdqa64 256(%rdi), %zmm4
3824 ; AVX512BW-FCP-NEXT: vmovdqa64 (%rdi), %zmm0
3825 ; AVX512BW-FCP-NEXT: vmovdqa64 64(%rdi), %zmm2
3826 ; AVX512BW-FCP-NEXT: vmovdqa64 128(%rdi), %zmm8
3827 ; AVX512BW-FCP-NEXT: vmovdqa64 192(%rdi), %zmm7
3828 ; AVX512BW-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm6 = [8,15,0,0,0,19,26,1,8,15,0,0,0,19,26,1]
3829 ; AVX512BW-FCP-NEXT: # zmm6 = mem[0,1,2,3,0,1,2,3]
3830 ; AVX512BW-FCP-NEXT: vpermi2d %zmm8, %zmm7, %zmm6
3831 ; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm3 = [0,7,14,21,28,0,0,0]
3832 ; AVX512BW-FCP-NEXT: vpermi2d %zmm2, %zmm0, %zmm3
3833 ; AVX512BW-FCP-NEXT: movw $992, %di # imm = 0x3E0
3834 ; AVX512BW-FCP-NEXT: kmovd %edi, %k1
3835 ; AVX512BW-FCP-NEXT: vmovdqa32 %zmm6, %zmm3 {%k1}
3836 ; AVX512BW-FCP-NEXT: vbroadcasti32x4 {{.*#+}} zmm6 = [20,27,6,13,20,27,6,13,20,27,6,13,20,27,6,13]
3837 ; AVX512BW-FCP-NEXT: # zmm6 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
3838 ; AVX512BW-FCP-NEXT: vpermi2d %zmm5, %zmm4, %zmm6
3839 ; AVX512BW-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm9 = [0,0,10,11,12,13,18,25,0,0,10,11,12,13,18,25]
3840 ; AVX512BW-FCP-NEXT: # zmm9 = mem[0,1,2,3,0,1,2,3]
3841 ; AVX512BW-FCP-NEXT: vpermi2d %zmm1, %zmm6, %zmm9
3842 ; AVX512BW-FCP-NEXT: movb $-32, %dil
3843 ; AVX512BW-FCP-NEXT: kmovd %edi, %k1
3844 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm9, %zmm3 {%k1}
3845 ; AVX512BW-FCP-NEXT: vbroadcasti32x4 {{.*#+}} zmm6 = [25,4,11,18,25,4,11,18,25,4,11,18,25,4,11,18]
3846 ; AVX512BW-FCP-NEXT: # zmm6 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
3847 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm8, %zmm9
3848 ; AVX512BW-FCP-NEXT: vbroadcasti32x4 {{.*#+}} zmm10 = [27,6,13,20,27,6,13,20,27,6,13,20,27,6,13,20]
3849 ; AVX512BW-FCP-NEXT: # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
3850 ; AVX512BW-FCP-NEXT: vpermi2d %zmm7, %zmm8, %zmm10
3851 ; AVX512BW-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm11 = [28,0,0,0,0,7,14,21,28,0,0,0,0,7,14,21]
3852 ; AVX512BW-FCP-NEXT: # zmm11 = mem[0,1,2,3,0,1,2,3]
3853 ; AVX512BW-FCP-NEXT: vpermi2d %zmm7, %zmm8, %zmm11
3854 ; AVX512BW-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm12 = [29,0,0,0,1,8,15,22,29,0,0,0,1,8,15,22]
3855 ; AVX512BW-FCP-NEXT: # zmm12 = mem[0,1,2,3,0,1,2,3]
3856 ; AVX512BW-FCP-NEXT: vpermi2d %zmm7, %zmm8, %zmm12
3857 ; AVX512BW-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm13 = [14,0,0,0,18,25,0,7,14,0,0,0,18,25,0,7]
3858 ; AVX512BW-FCP-NEXT: # zmm13 = mem[0,1,2,3,0,1,2,3]
3859 ; AVX512BW-FCP-NEXT: vpermi2d %zmm8, %zmm7, %zmm13
3860 ; AVX512BW-FCP-NEXT: vpermt2d %zmm7, %zmm6, %zmm8
3861 ; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm14 = [1,8,15,22,29,0,0,0]
3862 ; AVX512BW-FCP-NEXT: vpermi2d %zmm2, %zmm0, %zmm14
3863 ; AVX512BW-FCP-NEXT: movw $480, %di # imm = 0x1E0
3864 ; AVX512BW-FCP-NEXT: kmovd %edi, %k2
3865 ; AVX512BW-FCP-NEXT: vmovdqa32 %zmm8, %zmm14 {%k2}
3866 ; AVX512BW-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm8 = [0,0,7,14,21,28,0,0,0,0,7,14,21,28,0,0]
3867 ; AVX512BW-FCP-NEXT: # zmm8 = mem[0,1,2,3,0,1,2,3]
3868 ; AVX512BW-FCP-NEXT: vpermi2d %zmm5, %zmm4, %zmm8
3869 ; AVX512BW-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm15 = [0,9,10,11,12,13,19,26,0,9,10,11,12,13,19,26]
3870 ; AVX512BW-FCP-NEXT: # zmm15 = mem[0,1,2,3,0,1,2,3]
3871 ; AVX512BW-FCP-NEXT: vpermi2d %zmm1, %zmm8, %zmm15
3872 ; AVX512BW-FCP-NEXT: movw $-512, %di # imm = 0xFE00
3873 ; AVX512BW-FCP-NEXT: kmovd %edi, %k1
3874 ; AVX512BW-FCP-NEXT: vmovdqa32 %zmm15, %zmm14 {%k1}
3875 ; AVX512BW-FCP-NEXT: vbroadcasti32x4 {{.*#+}} zmm8 = [26,5,12,19,26,5,12,19,26,5,12,19,26,5,12,19]
3876 ; AVX512BW-FCP-NEXT: # zmm8 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
3877 ; AVX512BW-FCP-NEXT: vpermt2d %zmm7, %zmm8, %zmm9
3878 ; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm7 = [18,25,0,7,14,0,0,0]
3879 ; AVX512BW-FCP-NEXT: vpermi2d %zmm0, %zmm2, %zmm7
3880 ; AVX512BW-FCP-NEXT: vmovdqa32 %zmm9, %zmm7 {%k2}
3881 ; AVX512BW-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm9 = [0,1,8,15,22,29,0,0,0,1,8,15,22,29,0,0]
3882 ; AVX512BW-FCP-NEXT: # zmm9 = mem[0,1,2,3,0,1,2,3]
3883 ; AVX512BW-FCP-NEXT: vpermi2d %zmm5, %zmm4, %zmm9
3884 ; AVX512BW-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm15 = [0,9,10,11,12,13,20,27,0,9,10,11,12,13,20,27]
3885 ; AVX512BW-FCP-NEXT: # zmm15 = mem[0,1,2,3,0,1,2,3]
3886 ; AVX512BW-FCP-NEXT: vpermi2d %zmm1, %zmm9, %zmm15
3887 ; AVX512BW-FCP-NEXT: vmovdqa32 %zmm15, %zmm7 {%k1}
3888 ; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm9 = [19,26,1,8,15,0,0,0]
3889 ; AVX512BW-FCP-NEXT: vpermi2d %zmm0, %zmm2, %zmm9
3890 ; AVX512BW-FCP-NEXT: vmovdqa32 %zmm10, %zmm9 {%k2}
3891 ; AVX512BW-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm10 = [0,18,25,0,7,14,0,0,0,18,25,0,7,14,0,0]
3892 ; AVX512BW-FCP-NEXT: # zmm10 = mem[0,1,2,3,0,1,2,3]
3893 ; AVX512BW-FCP-NEXT: vpermi2d %zmm4, %zmm5, %zmm10
3894 ; AVX512BW-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm15 = [0,9,10,11,12,13,21,28,0,9,10,11,12,13,21,28]
3895 ; AVX512BW-FCP-NEXT: # zmm15 = mem[0,1,2,3,0,1,2,3]
3896 ; AVX512BW-FCP-NEXT: vpermi2d %zmm1, %zmm10, %zmm15
3897 ; AVX512BW-FCP-NEXT: vmovdqa32 %zmm15, %zmm9 {%k1}
3898 ; AVX512BW-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm10 = [0,19,26,1,8,15,0,0,0,19,26,1,8,15,0,0]
3899 ; AVX512BW-FCP-NEXT: # zmm10 = mem[0,1,2,3,0,1,2,3]
3900 ; AVX512BW-FCP-NEXT: vpermi2d %zmm4, %zmm5, %zmm10
3901 ; AVX512BW-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm15 = [0,9,10,11,12,13,22,29,0,9,10,11,12,13,22,29]
3902 ; AVX512BW-FCP-NEXT: # zmm15 = mem[0,1,2,3,0,1,2,3]
3903 ; AVX512BW-FCP-NEXT: vpermi2d %zmm1, %zmm10, %zmm15
3904 ; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm10 = [4,11,18,25]
3905 ; AVX512BW-FCP-NEXT: vpermi2d %zmm2, %zmm0, %zmm10
3906 ; AVX512BW-FCP-NEXT: vinserti32x4 $0, %xmm10, %zmm11, %zmm10
3907 ; AVX512BW-FCP-NEXT: vmovdqa32 %zmm15, %zmm10 {%k1}
3908 ; AVX512BW-FCP-NEXT: vpermi2d %zmm5, %zmm4, %zmm6
3909 ; AVX512BW-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm11 = [0,9,10,11,12,16,23,30,0,9,10,11,12,16,23,30]
3910 ; AVX512BW-FCP-NEXT: # zmm11 = mem[0,1,2,3,0,1,2,3]
3911 ; AVX512BW-FCP-NEXT: vpermi2d %zmm1, %zmm6, %zmm11
3912 ; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm6 = [5,12,19,26]
3913 ; AVX512BW-FCP-NEXT: vpermi2d %zmm2, %zmm0, %zmm6
3914 ; AVX512BW-FCP-NEXT: vinserti32x4 $0, %xmm6, %zmm12, %zmm6
3915 ; AVX512BW-FCP-NEXT: vmovdqa32 %zmm11, %zmm6 {%k1}
3916 ; AVX512BW-FCP-NEXT: vpermt2d %zmm5, %zmm8, %zmm4
3917 ; AVX512BW-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm5 = [0,9,10,11,12,17,24,31,0,9,10,11,12,17,24,31]
3918 ; AVX512BW-FCP-NEXT: # zmm5 = mem[0,1,2,3,0,1,2,3]
3919 ; AVX512BW-FCP-NEXT: vpermi2d %zmm1, %zmm4, %zmm5
3920 ; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm1 = [6,13,20,27]
3921 ; AVX512BW-FCP-NEXT: vpermi2d %zmm2, %zmm0, %zmm1
3922 ; AVX512BW-FCP-NEXT: vinserti32x4 $0, %xmm1, %zmm13, %zmm0
3923 ; AVX512BW-FCP-NEXT: vmovdqa32 %zmm5, %zmm0 {%k1}
3924 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm3, (%rsi)
3925 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm14, (%rdx)
3926 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm7, (%rcx)
3927 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm9, (%r8)
3928 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm10, (%r9)
3929 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm6, (%r10)
3930 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm0, (%rax)
3931 ; AVX512BW-FCP-NEXT: vzeroupper
3932 ; AVX512BW-FCP-NEXT: retq
3934 ; AVX512DQ-BW-LABEL: load_i32_stride7_vf16:
3935 ; AVX512DQ-BW: # %bb.0:
3936 ; AVX512DQ-BW-NEXT: movq {{[0-9]+}}(%rsp), %rax
3937 ; AVX512DQ-BW-NEXT: movq {{[0-9]+}}(%rsp), %r10
3938 ; AVX512DQ-BW-NEXT: vmovdqa64 384(%rdi), %zmm1
3939 ; AVX512DQ-BW-NEXT: vmovdqa64 320(%rdi), %zmm5
3940 ; AVX512DQ-BW-NEXT: vmovdqa64 256(%rdi), %zmm4
3941 ; AVX512DQ-BW-NEXT: vmovdqa64 (%rdi), %zmm0
3942 ; AVX512DQ-BW-NEXT: vmovdqa64 64(%rdi), %zmm2
3943 ; AVX512DQ-BW-NEXT: vmovdqa64 128(%rdi), %zmm8
3944 ; AVX512DQ-BW-NEXT: vmovdqa64 192(%rdi), %zmm7
3945 ; AVX512DQ-BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm6 = [8,15,0,0,0,19,26,1,8,15,0,0,0,19,26,1]
3946 ; AVX512DQ-BW-NEXT: # zmm6 = mem[0,1,2,3,0,1,2,3]
3947 ; AVX512DQ-BW-NEXT: vpermi2d %zmm8, %zmm7, %zmm6
3948 ; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} ymm3 = [0,7,14,21,28,0,0,0]
3949 ; AVX512DQ-BW-NEXT: vpermi2d %zmm2, %zmm0, %zmm3
3950 ; AVX512DQ-BW-NEXT: movw $992, %di # imm = 0x3E0
3951 ; AVX512DQ-BW-NEXT: kmovd %edi, %k1
3952 ; AVX512DQ-BW-NEXT: vmovdqa32 %zmm6, %zmm3 {%k1}
3953 ; AVX512DQ-BW-NEXT: vbroadcasti32x4 {{.*#+}} zmm6 = [20,27,6,13,20,27,6,13,20,27,6,13,20,27,6,13]
3954 ; AVX512DQ-BW-NEXT: # zmm6 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
3955 ; AVX512DQ-BW-NEXT: vpermi2d %zmm5, %zmm4, %zmm6
3956 ; AVX512DQ-BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm9 = [0,0,10,11,12,13,18,25,0,0,10,11,12,13,18,25]
3957 ; AVX512DQ-BW-NEXT: # zmm9 = mem[0,1,2,3,0,1,2,3]
3958 ; AVX512DQ-BW-NEXT: vpermi2d %zmm1, %zmm6, %zmm9
3959 ; AVX512DQ-BW-NEXT: movb $-32, %dil
3960 ; AVX512DQ-BW-NEXT: kmovd %edi, %k1
3961 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm9, %zmm3 {%k1}
3962 ; AVX512DQ-BW-NEXT: vbroadcasti32x4 {{.*#+}} zmm6 = [25,4,11,18,25,4,11,18,25,4,11,18,25,4,11,18]
3963 ; AVX512DQ-BW-NEXT: # zmm6 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
3964 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm8, %zmm9
3965 ; AVX512DQ-BW-NEXT: vbroadcasti32x4 {{.*#+}} zmm10 = [27,6,13,20,27,6,13,20,27,6,13,20,27,6,13,20]
3966 ; AVX512DQ-BW-NEXT: # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
3967 ; AVX512DQ-BW-NEXT: vpermi2d %zmm7, %zmm8, %zmm10
3968 ; AVX512DQ-BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm11 = [28,0,0,0,0,7,14,21,28,0,0,0,0,7,14,21]
3969 ; AVX512DQ-BW-NEXT: # zmm11 = mem[0,1,2,3,0,1,2,3]
3970 ; AVX512DQ-BW-NEXT: vpermi2d %zmm7, %zmm8, %zmm11
3971 ; AVX512DQ-BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm12 = [29,0,0,0,1,8,15,22,29,0,0,0,1,8,15,22]
3972 ; AVX512DQ-BW-NEXT: # zmm12 = mem[0,1,2,3,0,1,2,3]
3973 ; AVX512DQ-BW-NEXT: vpermi2d %zmm7, %zmm8, %zmm12
3974 ; AVX512DQ-BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm13 = [14,0,0,0,18,25,0,7,14,0,0,0,18,25,0,7]
3975 ; AVX512DQ-BW-NEXT: # zmm13 = mem[0,1,2,3,0,1,2,3]
3976 ; AVX512DQ-BW-NEXT: vpermi2d %zmm8, %zmm7, %zmm13
3977 ; AVX512DQ-BW-NEXT: vpermt2d %zmm7, %zmm6, %zmm8
3978 ; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} ymm14 = [1,8,15,22,29,0,0,0]
3979 ; AVX512DQ-BW-NEXT: vpermi2d %zmm2, %zmm0, %zmm14
3980 ; AVX512DQ-BW-NEXT: movw $480, %di # imm = 0x1E0
3981 ; AVX512DQ-BW-NEXT: kmovd %edi, %k2
3982 ; AVX512DQ-BW-NEXT: vmovdqa32 %zmm8, %zmm14 {%k2}
3983 ; AVX512DQ-BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm8 = [0,0,7,14,21,28,0,0,0,0,7,14,21,28,0,0]
3984 ; AVX512DQ-BW-NEXT: # zmm8 = mem[0,1,2,3,0,1,2,3]
3985 ; AVX512DQ-BW-NEXT: vpermi2d %zmm5, %zmm4, %zmm8
3986 ; AVX512DQ-BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm15 = [0,9,10,11,12,13,19,26,0,9,10,11,12,13,19,26]
3987 ; AVX512DQ-BW-NEXT: # zmm15 = mem[0,1,2,3,0,1,2,3]
3988 ; AVX512DQ-BW-NEXT: vpermi2d %zmm1, %zmm8, %zmm15
3989 ; AVX512DQ-BW-NEXT: movw $-512, %di # imm = 0xFE00
3990 ; AVX512DQ-BW-NEXT: kmovd %edi, %k1
3991 ; AVX512DQ-BW-NEXT: vmovdqa32 %zmm15, %zmm14 {%k1}
3992 ; AVX512DQ-BW-NEXT: vbroadcasti32x4 {{.*#+}} zmm8 = [26,5,12,19,26,5,12,19,26,5,12,19,26,5,12,19]
3993 ; AVX512DQ-BW-NEXT: # zmm8 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
3994 ; AVX512DQ-BW-NEXT: vpermt2d %zmm7, %zmm8, %zmm9
3995 ; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} ymm7 = [18,25,0,7,14,0,0,0]
3996 ; AVX512DQ-BW-NEXT: vpermi2d %zmm0, %zmm2, %zmm7
3997 ; AVX512DQ-BW-NEXT: vmovdqa32 %zmm9, %zmm7 {%k2}
3998 ; AVX512DQ-BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm9 = [0,1,8,15,22,29,0,0,0,1,8,15,22,29,0,0]
3999 ; AVX512DQ-BW-NEXT: # zmm9 = mem[0,1,2,3,0,1,2,3]
4000 ; AVX512DQ-BW-NEXT: vpermi2d %zmm5, %zmm4, %zmm9
4001 ; AVX512DQ-BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm15 = [0,9,10,11,12,13,20,27,0,9,10,11,12,13,20,27]
4002 ; AVX512DQ-BW-NEXT: # zmm15 = mem[0,1,2,3,0,1,2,3]
4003 ; AVX512DQ-BW-NEXT: vpermi2d %zmm1, %zmm9, %zmm15
4004 ; AVX512DQ-BW-NEXT: vmovdqa32 %zmm15, %zmm7 {%k1}
4005 ; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} ymm9 = [19,26,1,8,15,0,0,0]
4006 ; AVX512DQ-BW-NEXT: vpermi2d %zmm0, %zmm2, %zmm9
4007 ; AVX512DQ-BW-NEXT: vmovdqa32 %zmm10, %zmm9 {%k2}
4008 ; AVX512DQ-BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm10 = [0,18,25,0,7,14,0,0,0,18,25,0,7,14,0,0]
4009 ; AVX512DQ-BW-NEXT: # zmm10 = mem[0,1,2,3,0,1,2,3]
4010 ; AVX512DQ-BW-NEXT: vpermi2d %zmm4, %zmm5, %zmm10
4011 ; AVX512DQ-BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm15 = [0,9,10,11,12,13,21,28,0,9,10,11,12,13,21,28]
4012 ; AVX512DQ-BW-NEXT: # zmm15 = mem[0,1,2,3,0,1,2,3]
4013 ; AVX512DQ-BW-NEXT: vpermi2d %zmm1, %zmm10, %zmm15
4014 ; AVX512DQ-BW-NEXT: vmovdqa32 %zmm15, %zmm9 {%k1}
4015 ; AVX512DQ-BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm10 = [0,19,26,1,8,15,0,0,0,19,26,1,8,15,0,0]
4016 ; AVX512DQ-BW-NEXT: # zmm10 = mem[0,1,2,3,0,1,2,3]
4017 ; AVX512DQ-BW-NEXT: vpermi2d %zmm4, %zmm5, %zmm10
4018 ; AVX512DQ-BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm15 = [0,9,10,11,12,13,22,29,0,9,10,11,12,13,22,29]
4019 ; AVX512DQ-BW-NEXT: # zmm15 = mem[0,1,2,3,0,1,2,3]
4020 ; AVX512DQ-BW-NEXT: vpermi2d %zmm1, %zmm10, %zmm15
4021 ; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} xmm10 = [4,11,18,25]
4022 ; AVX512DQ-BW-NEXT: vpermi2d %zmm2, %zmm0, %zmm10
4023 ; AVX512DQ-BW-NEXT: vinserti32x4 $0, %xmm10, %zmm11, %zmm10
4024 ; AVX512DQ-BW-NEXT: vmovdqa32 %zmm15, %zmm10 {%k1}
4025 ; AVX512DQ-BW-NEXT: vpermi2d %zmm5, %zmm4, %zmm6
4026 ; AVX512DQ-BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm11 = [0,9,10,11,12,16,23,30,0,9,10,11,12,16,23,30]
4027 ; AVX512DQ-BW-NEXT: # zmm11 = mem[0,1,2,3,0,1,2,3]
4028 ; AVX512DQ-BW-NEXT: vpermi2d %zmm1, %zmm6, %zmm11
4029 ; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} xmm6 = [5,12,19,26]
4030 ; AVX512DQ-BW-NEXT: vpermi2d %zmm2, %zmm0, %zmm6
4031 ; AVX512DQ-BW-NEXT: vinserti32x4 $0, %xmm6, %zmm12, %zmm6
4032 ; AVX512DQ-BW-NEXT: vmovdqa32 %zmm11, %zmm6 {%k1}
4033 ; AVX512DQ-BW-NEXT: vpermt2d %zmm5, %zmm8, %zmm4
4034 ; AVX512DQ-BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm5 = [0,9,10,11,12,17,24,31,0,9,10,11,12,17,24,31]
4035 ; AVX512DQ-BW-NEXT: # zmm5 = mem[0,1,2,3,0,1,2,3]
4036 ; AVX512DQ-BW-NEXT: vpermi2d %zmm1, %zmm4, %zmm5
4037 ; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} xmm1 = [6,13,20,27]
4038 ; AVX512DQ-BW-NEXT: vpermi2d %zmm2, %zmm0, %zmm1
4039 ; AVX512DQ-BW-NEXT: vinserti32x4 $0, %xmm1, %zmm13, %zmm0
4040 ; AVX512DQ-BW-NEXT: vmovdqa32 %zmm5, %zmm0 {%k1}
4041 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm3, (%rsi)
4042 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm14, (%rdx)
4043 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm7, (%rcx)
4044 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm9, (%r8)
4045 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm10, (%r9)
4046 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm6, (%r10)
4047 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm0, (%rax)
4048 ; AVX512DQ-BW-NEXT: vzeroupper
4049 ; AVX512DQ-BW-NEXT: retq
4051 ; AVX512DQ-BW-FCP-LABEL: load_i32_stride7_vf16:
4052 ; AVX512DQ-BW-FCP: # %bb.0:
4053 ; AVX512DQ-BW-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
4054 ; AVX512DQ-BW-FCP-NEXT: movq {{[0-9]+}}(%rsp), %r10
4055 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 384(%rdi), %zmm1
4056 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 320(%rdi), %zmm5
4057 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 256(%rdi), %zmm4
4058 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rdi), %zmm0
4059 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 64(%rdi), %zmm2
4060 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 128(%rdi), %zmm8
4061 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 192(%rdi), %zmm7
4062 ; AVX512DQ-BW-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm6 = [8,15,0,0,0,19,26,1,8,15,0,0,0,19,26,1]
4063 ; AVX512DQ-BW-FCP-NEXT: # zmm6 = mem[0,1,2,3,0,1,2,3]
4064 ; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm8, %zmm7, %zmm6
4065 ; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm3 = [0,7,14,21,28,0,0,0]
4066 ; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm2, %zmm0, %zmm3
4067 ; AVX512DQ-BW-FCP-NEXT: movw $992, %di # imm = 0x3E0
4068 ; AVX512DQ-BW-FCP-NEXT: kmovd %edi, %k1
4069 ; AVX512DQ-BW-FCP-NEXT: vmovdqa32 %zmm6, %zmm3 {%k1}
4070 ; AVX512DQ-BW-FCP-NEXT: vbroadcasti32x4 {{.*#+}} zmm6 = [20,27,6,13,20,27,6,13,20,27,6,13,20,27,6,13]
4071 ; AVX512DQ-BW-FCP-NEXT: # zmm6 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
4072 ; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm5, %zmm4, %zmm6
4073 ; AVX512DQ-BW-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm9 = [0,0,10,11,12,13,18,25,0,0,10,11,12,13,18,25]
4074 ; AVX512DQ-BW-FCP-NEXT: # zmm9 = mem[0,1,2,3,0,1,2,3]
4075 ; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm1, %zmm6, %zmm9
4076 ; AVX512DQ-BW-FCP-NEXT: movb $-32, %dil
4077 ; AVX512DQ-BW-FCP-NEXT: kmovd %edi, %k1
4078 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm9, %zmm3 {%k1}
4079 ; AVX512DQ-BW-FCP-NEXT: vbroadcasti32x4 {{.*#+}} zmm6 = [25,4,11,18,25,4,11,18,25,4,11,18,25,4,11,18]
4080 ; AVX512DQ-BW-FCP-NEXT: # zmm6 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
4081 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm8, %zmm9
4082 ; AVX512DQ-BW-FCP-NEXT: vbroadcasti32x4 {{.*#+}} zmm10 = [27,6,13,20,27,6,13,20,27,6,13,20,27,6,13,20]
4083 ; AVX512DQ-BW-FCP-NEXT: # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
4084 ; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm7, %zmm8, %zmm10
4085 ; AVX512DQ-BW-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm11 = [28,0,0,0,0,7,14,21,28,0,0,0,0,7,14,21]
4086 ; AVX512DQ-BW-FCP-NEXT: # zmm11 = mem[0,1,2,3,0,1,2,3]
4087 ; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm7, %zmm8, %zmm11
4088 ; AVX512DQ-BW-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm12 = [29,0,0,0,1,8,15,22,29,0,0,0,1,8,15,22]
4089 ; AVX512DQ-BW-FCP-NEXT: # zmm12 = mem[0,1,2,3,0,1,2,3]
4090 ; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm7, %zmm8, %zmm12
4091 ; AVX512DQ-BW-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm13 = [14,0,0,0,18,25,0,7,14,0,0,0,18,25,0,7]
4092 ; AVX512DQ-BW-FCP-NEXT: # zmm13 = mem[0,1,2,3,0,1,2,3]
4093 ; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm8, %zmm7, %zmm13
4094 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm7, %zmm6, %zmm8
4095 ; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm14 = [1,8,15,22,29,0,0,0]
4096 ; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm2, %zmm0, %zmm14
4097 ; AVX512DQ-BW-FCP-NEXT: movw $480, %di # imm = 0x1E0
4098 ; AVX512DQ-BW-FCP-NEXT: kmovd %edi, %k2
4099 ; AVX512DQ-BW-FCP-NEXT: vmovdqa32 %zmm8, %zmm14 {%k2}
4100 ; AVX512DQ-BW-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm8 = [0,0,7,14,21,28,0,0,0,0,7,14,21,28,0,0]
4101 ; AVX512DQ-BW-FCP-NEXT: # zmm8 = mem[0,1,2,3,0,1,2,3]
4102 ; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm5, %zmm4, %zmm8
4103 ; AVX512DQ-BW-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm15 = [0,9,10,11,12,13,19,26,0,9,10,11,12,13,19,26]
4104 ; AVX512DQ-BW-FCP-NEXT: # zmm15 = mem[0,1,2,3,0,1,2,3]
4105 ; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm1, %zmm8, %zmm15
4106 ; AVX512DQ-BW-FCP-NEXT: movw $-512, %di # imm = 0xFE00
4107 ; AVX512DQ-BW-FCP-NEXT: kmovd %edi, %k1
4108 ; AVX512DQ-BW-FCP-NEXT: vmovdqa32 %zmm15, %zmm14 {%k1}
4109 ; AVX512DQ-BW-FCP-NEXT: vbroadcasti32x4 {{.*#+}} zmm8 = [26,5,12,19,26,5,12,19,26,5,12,19,26,5,12,19]
4110 ; AVX512DQ-BW-FCP-NEXT: # zmm8 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
4111 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm7, %zmm8, %zmm9
4112 ; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm7 = [18,25,0,7,14,0,0,0]
4113 ; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm0, %zmm2, %zmm7
4114 ; AVX512DQ-BW-FCP-NEXT: vmovdqa32 %zmm9, %zmm7 {%k2}
4115 ; AVX512DQ-BW-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm9 = [0,1,8,15,22,29,0,0,0,1,8,15,22,29,0,0]
4116 ; AVX512DQ-BW-FCP-NEXT: # zmm9 = mem[0,1,2,3,0,1,2,3]
4117 ; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm5, %zmm4, %zmm9
4118 ; AVX512DQ-BW-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm15 = [0,9,10,11,12,13,20,27,0,9,10,11,12,13,20,27]
4119 ; AVX512DQ-BW-FCP-NEXT: # zmm15 = mem[0,1,2,3,0,1,2,3]
4120 ; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm1, %zmm9, %zmm15
4121 ; AVX512DQ-BW-FCP-NEXT: vmovdqa32 %zmm15, %zmm7 {%k1}
4122 ; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm9 = [19,26,1,8,15,0,0,0]
4123 ; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm0, %zmm2, %zmm9
4124 ; AVX512DQ-BW-FCP-NEXT: vmovdqa32 %zmm10, %zmm9 {%k2}
4125 ; AVX512DQ-BW-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm10 = [0,18,25,0,7,14,0,0,0,18,25,0,7,14,0,0]
4126 ; AVX512DQ-BW-FCP-NEXT: # zmm10 = mem[0,1,2,3,0,1,2,3]
4127 ; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm4, %zmm5, %zmm10
4128 ; AVX512DQ-BW-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm15 = [0,9,10,11,12,13,21,28,0,9,10,11,12,13,21,28]
4129 ; AVX512DQ-BW-FCP-NEXT: # zmm15 = mem[0,1,2,3,0,1,2,3]
4130 ; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm1, %zmm10, %zmm15
4131 ; AVX512DQ-BW-FCP-NEXT: vmovdqa32 %zmm15, %zmm9 {%k1}
4132 ; AVX512DQ-BW-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm10 = [0,19,26,1,8,15,0,0,0,19,26,1,8,15,0,0]
4133 ; AVX512DQ-BW-FCP-NEXT: # zmm10 = mem[0,1,2,3,0,1,2,3]
4134 ; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm4, %zmm5, %zmm10
4135 ; AVX512DQ-BW-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm15 = [0,9,10,11,12,13,22,29,0,9,10,11,12,13,22,29]
4136 ; AVX512DQ-BW-FCP-NEXT: # zmm15 = mem[0,1,2,3,0,1,2,3]
4137 ; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm1, %zmm10, %zmm15
4138 ; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm10 = [4,11,18,25]
4139 ; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm2, %zmm0, %zmm10
4140 ; AVX512DQ-BW-FCP-NEXT: vinserti32x4 $0, %xmm10, %zmm11, %zmm10
4141 ; AVX512DQ-BW-FCP-NEXT: vmovdqa32 %zmm15, %zmm10 {%k1}
4142 ; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm5, %zmm4, %zmm6
4143 ; AVX512DQ-BW-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm11 = [0,9,10,11,12,16,23,30,0,9,10,11,12,16,23,30]
4144 ; AVX512DQ-BW-FCP-NEXT: # zmm11 = mem[0,1,2,3,0,1,2,3]
4145 ; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm1, %zmm6, %zmm11
4146 ; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm6 = [5,12,19,26]
4147 ; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm2, %zmm0, %zmm6
4148 ; AVX512DQ-BW-FCP-NEXT: vinserti32x4 $0, %xmm6, %zmm12, %zmm6
4149 ; AVX512DQ-BW-FCP-NEXT: vmovdqa32 %zmm11, %zmm6 {%k1}
4150 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm5, %zmm8, %zmm4
4151 ; AVX512DQ-BW-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm5 = [0,9,10,11,12,17,24,31,0,9,10,11,12,17,24,31]
4152 ; AVX512DQ-BW-FCP-NEXT: # zmm5 = mem[0,1,2,3,0,1,2,3]
4153 ; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm1, %zmm4, %zmm5
4154 ; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm1 = [6,13,20,27]
4155 ; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm2, %zmm0, %zmm1
4156 ; AVX512DQ-BW-FCP-NEXT: vinserti32x4 $0, %xmm1, %zmm13, %zmm0
4157 ; AVX512DQ-BW-FCP-NEXT: vmovdqa32 %zmm5, %zmm0 {%k1}
4158 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm3, (%rsi)
4159 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm14, (%rdx)
4160 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm7, (%rcx)
4161 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm9, (%r8)
4162 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm10, (%r9)
4163 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm6, (%r10)
4164 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm0, (%rax)
4165 ; AVX512DQ-BW-FCP-NEXT: vzeroupper
4166 ; AVX512DQ-BW-FCP-NEXT: retq
4167 %wide.vec = load <112 x i32>, ptr %in.vec, align 64
4168 %strided.vec0 = shufflevector <112 x i32> %wide.vec, <112 x i32> poison, <16 x i32> <i32 0, i32 7, i32 14, i32 21, i32 28, i32 35, i32 42, i32 49, i32 56, i32 63, i32 70, i32 77, i32 84, i32 91, i32 98, i32 105>
4169 %strided.vec1 = shufflevector <112 x i32> %wide.vec, <112 x i32> poison, <16 x i32> <i32 1, i32 8, i32 15, i32 22, i32 29, i32 36, i32 43, i32 50, i32 57, i32 64, i32 71, i32 78, i32 85, i32 92, i32 99, i32 106>
4170 %strided.vec2 = shufflevector <112 x i32> %wide.vec, <112 x i32> poison, <16 x i32> <i32 2, i32 9, i32 16, i32 23, i32 30, i32 37, i32 44, i32 51, i32 58, i32 65, i32 72, i32 79, i32 86, i32 93, i32 100, i32 107>
4171 %strided.vec3 = shufflevector <112 x i32> %wide.vec, <112 x i32> poison, <16 x i32> <i32 3, i32 10, i32 17, i32 24, i32 31, i32 38, i32 45, i32 52, i32 59, i32 66, i32 73, i32 80, i32 87, i32 94, i32 101, i32 108>
4172 %strided.vec4 = shufflevector <112 x i32> %wide.vec, <112 x i32> poison, <16 x i32> <i32 4, i32 11, i32 18, i32 25, i32 32, i32 39, i32 46, i32 53, i32 60, i32 67, i32 74, i32 81, i32 88, i32 95, i32 102, i32 109>
4173 %strided.vec5 = shufflevector <112 x i32> %wide.vec, <112 x i32> poison, <16 x i32> <i32 5, i32 12, i32 19, i32 26, i32 33, i32 40, i32 47, i32 54, i32 61, i32 68, i32 75, i32 82, i32 89, i32 96, i32 103, i32 110>
4174 %strided.vec6 = shufflevector <112 x i32> %wide.vec, <112 x i32> poison, <16 x i32> <i32 6, i32 13, i32 20, i32 27, i32 34, i32 41, i32 48, i32 55, i32 62, i32 69, i32 76, i32 83, i32 90, i32 97, i32 104, i32 111>
4175 store <16 x i32> %strided.vec0, ptr %out.vec0, align 64
4176 store <16 x i32> %strided.vec1, ptr %out.vec1, align 64
4177 store <16 x i32> %strided.vec2, ptr %out.vec2, align 64
4178 store <16 x i32> %strided.vec3, ptr %out.vec3, align 64
4179 store <16 x i32> %strided.vec4, ptr %out.vec4, align 64
4180 store <16 x i32> %strided.vec5, ptr %out.vec5, align 64
4181 store <16 x i32> %strided.vec6, ptr %out.vec6, align 64
4185 define void @load_i32_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2, ptr %out.vec3, ptr %out.vec4, ptr %out.vec5, ptr %out.vec6) nounwind {
4186 ; SSE-LABEL: load_i32_stride7_vf32:
4188 ; SSE-NEXT: subq $1160, %rsp # imm = 0x488
4189 ; SSE-NEXT: movdqa 80(%rdi), %xmm8
4190 ; SSE-NEXT: movdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4191 ; SSE-NEXT: movdqa (%rdi), %xmm13
4192 ; SSE-NEXT: movdqa %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4193 ; SSE-NEXT: movdqa 16(%rdi), %xmm6
4194 ; SSE-NEXT: movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4195 ; SSE-NEXT: movdqa 48(%rdi), %xmm5
4196 ; SSE-NEXT: movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4197 ; SSE-NEXT: movdqa 640(%rdi), %xmm3
4198 ; SSE-NEXT: movdqa 608(%rdi), %xmm4
4199 ; SSE-NEXT: movdqa 560(%rdi), %xmm10
4200 ; SSE-NEXT: movdqa 576(%rdi), %xmm1
4201 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4202 ; SSE-NEXT: movdqa 192(%rdi), %xmm14
4203 ; SSE-NEXT: movdqa 160(%rdi), %xmm12
4204 ; SSE-NEXT: movdqa 112(%rdi), %xmm2
4205 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4206 ; SSE-NEXT: movdqa 128(%rdi), %xmm0
4207 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4208 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,3,3,3]
4209 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
4210 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm12[2,2,3,3]
4211 ; SSE-NEXT: movdqa %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4212 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm14[0],xmm0[1],xmm14[1]
4213 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm2[0],xmm0[1]
4214 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4215 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[3,3,3,3]
4216 ; SSE-NEXT: movdqa %xmm10, %xmm2
4217 ; SSE-NEXT: movdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4218 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
4219 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm4[2,2,3,3]
4220 ; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4221 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
4222 ; SSE-NEXT: movdqa %xmm3, %xmm7
4223 ; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4224 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm2[0],xmm0[1]
4225 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4226 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm6[3,3,3,3]
4227 ; SSE-NEXT: movdqa %xmm13, %xmm2
4228 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
4229 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm5[2,2,3,3]
4230 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm8[0],xmm0[1],xmm8[1]
4231 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm2[0],xmm0[1]
4232 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4233 ; SSE-NEXT: movdqa 448(%rdi), %xmm2
4234 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4235 ; SSE-NEXT: movdqa 464(%rdi), %xmm0
4236 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4237 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,3,3,3]
4238 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
4239 ; SSE-NEXT: movdqa 528(%rdi), %xmm9
4240 ; SSE-NEXT: movdqa 496(%rdi), %xmm13
4241 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm13[2,2,3,3]
4242 ; SSE-NEXT: movdqa %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4243 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm9[0],xmm0[1],xmm9[1]
4244 ; SSE-NEXT: movdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4245 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm2[0],xmm0[1]
4246 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4247 ; SSE-NEXT: movdqa 336(%rdi), %xmm2
4248 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4249 ; SSE-NEXT: movdqa 352(%rdi), %xmm0
4250 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4251 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,3,3,3]
4252 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
4253 ; SSE-NEXT: movdqa 416(%rdi), %xmm3
4254 ; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4255 ; SSE-NEXT: movdqa 384(%rdi), %xmm11
4256 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm11[2,2,3,3]
4257 ; SSE-NEXT: movdqa %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4258 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
4259 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm2[0],xmm0[1]
4260 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4261 ; SSE-NEXT: movdqa 784(%rdi), %xmm2
4262 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4263 ; SSE-NEXT: movdqa 800(%rdi), %xmm0
4264 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4265 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,3,3,3]
4266 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
4267 ; SSE-NEXT: movdqa 864(%rdi), %xmm8
4268 ; SSE-NEXT: movdqa 832(%rdi), %xmm15
4269 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm15[2,2,3,3]
4270 ; SSE-NEXT: movdqa %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4271 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm8[0],xmm0[1],xmm8[1]
4272 ; SSE-NEXT: movdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4273 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm2[0],xmm0[1]
4274 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4275 ; SSE-NEXT: movdqa 224(%rdi), %xmm3
4276 ; SSE-NEXT: movdqa %xmm3, (%rsp) # 16-byte Spill
4277 ; SSE-NEXT: movdqa 240(%rdi), %xmm0
4278 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4279 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,3,3,3]
4280 ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1]
4281 ; SSE-NEXT: movdqa 304(%rdi), %xmm1
4282 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4283 ; SSE-NEXT: movdqa 272(%rdi), %xmm6
4284 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm6[2,2,3,3]
4285 ; SSE-NEXT: movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4286 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
4287 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm3[0],xmm0[1]
4288 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4289 ; SSE-NEXT: movdqa 672(%rdi), %xmm3
4290 ; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4291 ; SSE-NEXT: movdqa 688(%rdi), %xmm0
4292 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4293 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,3,3,3]
4294 ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1]
4295 ; SSE-NEXT: movdqa 752(%rdi), %xmm1
4296 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4297 ; SSE-NEXT: movdqa 720(%rdi), %xmm0
4298 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4299 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,2,3,3]
4300 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
4301 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm3[0],xmm0[1]
4302 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4303 ; SSE-NEXT: movdqa %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4304 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm14[2,2,2,2]
4305 ; SSE-NEXT: movdqa %xmm12, %xmm3
4306 ; SSE-NEXT: punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm0[2],xmm3[3],xmm0[3]
4307 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
4308 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,1,1]
4309 ; SSE-NEXT: movdqa 144(%rdi), %xmm1
4310 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4311 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
4312 ; SSE-NEXT: movsd {{.*#+}} xmm3 = xmm0[0],xmm3[1]
4313 ; SSE-NEXT: movapd %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4314 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm7[2,2,2,2]
4315 ; SSE-NEXT: punpckhdq {{.*#+}} xmm4 = xmm4[2],xmm0[2],xmm4[3],xmm0[3]
4316 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm10[1,1,1,1]
4317 ; SSE-NEXT: movdqa 592(%rdi), %xmm1
4318 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4319 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
4320 ; SSE-NEXT: movsd {{.*#+}} xmm4 = xmm0[0],xmm4[1]
4321 ; SSE-NEXT: movapd %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4322 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
4323 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm10[2,2,2,2]
4324 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
4325 ; SSE-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm0[2],xmm1[3],xmm0[3]
4326 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
4327 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,1,1,1]
4328 ; SSE-NEXT: movdqa 32(%rdi), %xmm4
4329 ; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4330 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
4331 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
4332 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4333 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm9[2,2,2,2]
4334 ; SSE-NEXT: punpckhdq {{.*#+}} xmm13 = xmm13[2],xmm0[2],xmm13[3],xmm0[3]
4335 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
4336 ; SSE-NEXT: # xmm0 = mem[1,1,1,1]
4337 ; SSE-NEXT: movdqa 480(%rdi), %xmm4
4338 ; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4339 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
4340 ; SSE-NEXT: movsd {{.*#+}} xmm13 = xmm0[0],xmm13[1]
4341 ; SSE-NEXT: movapd %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4342 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
4343 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm5[2,2,2,2]
4344 ; SSE-NEXT: movdqa %xmm11, %xmm4
4345 ; SSE-NEXT: punpckhdq {{.*#+}} xmm4 = xmm4[2],xmm0[2],xmm4[3],xmm0[3]
4346 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
4347 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1]
4348 ; SSE-NEXT: movdqa 368(%rdi), %xmm11
4349 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm11[0],xmm0[1],xmm11[1]
4350 ; SSE-NEXT: movdqa %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4351 ; SSE-NEXT: movsd {{.*#+}} xmm4 = xmm0[0],xmm4[1]
4352 ; SSE-NEXT: movapd %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4353 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm8[2,2,2,2]
4354 ; SSE-NEXT: movdqa %xmm15, %xmm4
4355 ; SSE-NEXT: punpckhdq {{.*#+}} xmm4 = xmm4[2],xmm0[2],xmm4[3],xmm0[3]
4356 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
4357 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm15[1,1,1,1]
4358 ; SSE-NEXT: movdqa 816(%rdi), %xmm7
4359 ; SSE-NEXT: movdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4360 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm7[0],xmm0[1],xmm7[1]
4361 ; SSE-NEXT: movsd {{.*#+}} xmm4 = xmm0[0],xmm4[1]
4362 ; SSE-NEXT: movapd %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4363 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
4364 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm4[2,2,2,2]
4365 ; SSE-NEXT: punpckhdq {{.*#+}} xmm6 = xmm6[2],xmm0[2],xmm6[3],xmm0[3]
4366 ; SSE-NEXT: movdqa (%rsp), %xmm9 # 16-byte Reload
4367 ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm9[1,1,1,1]
4368 ; SSE-NEXT: movdqa 256(%rdi), %xmm13
4369 ; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm13[0],xmm7[1],xmm13[1]
4370 ; SSE-NEXT: movsd {{.*#+}} xmm6 = xmm7[0],xmm6[1]
4371 ; SSE-NEXT: movapd %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4372 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
4373 ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm12[2,2,2,2]
4374 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
4375 ; SSE-NEXT: punpckhdq {{.*#+}} xmm6 = xmm6[2],xmm7[2],xmm6[3],xmm7[3]
4376 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload
4377 ; SSE-NEXT: # xmm7 = mem[1,1,1,1]
4378 ; SSE-NEXT: movdqa 704(%rdi), %xmm0
4379 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4380 ; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm0[0],xmm7[1],xmm0[1]
4381 ; SSE-NEXT: movsd {{.*#+}} xmm6 = xmm7[0],xmm6[1]
4382 ; SSE-NEXT: movapd %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4383 ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm2[2,3,2,3]
4384 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Folded Reload
4385 ; SSE-NEXT: # xmm8 = mem[1,1,1,1]
4386 ; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm8[0],xmm7[1],xmm8[1]
4387 ; SSE-NEXT: movdqa 176(%rdi), %xmm0
4388 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4389 ; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm14[2,3,2,3]
4390 ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm0[0,0,1,1]
4391 ; SSE-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm8[0],xmm6[1],xmm8[1]
4392 ; SSE-NEXT: movsd {{.*#+}} xmm6 = xmm7[0],xmm6[1]
4393 ; SSE-NEXT: movapd %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4394 ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm3[2,3,2,3]
4395 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
4396 ; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm0[1,1,1,1]
4397 ; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm8[0],xmm7[1],xmm8[1]
4398 ; SSE-NEXT: movdqa 64(%rdi), %xmm3
4399 ; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm10[2,3,2,3]
4400 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm3[0,0,1,1]
4401 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm8[0],xmm2[1],xmm8[1]
4402 ; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm7[0],xmm2[1]
4403 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4404 ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm1[2,3,2,3]
4405 ; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm11[1,1,1,1]
4406 ; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm8[0],xmm7[1],xmm8[1]
4407 ; SSE-NEXT: movdqa 400(%rdi), %xmm1
4408 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4409 ; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm5[2,3,2,3]
4410 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
4411 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm8[0],xmm1[1],xmm8[1]
4412 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm7[0],xmm1[1]
4413 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4414 ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm9[2,3,2,3]
4415 ; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm13[1,1,1,1]
4416 ; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm8[0],xmm7[1],xmm8[1]
4417 ; SSE-NEXT: movdqa 288(%rdi), %xmm10
4418 ; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm4[2,3,2,3]
4419 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm10[0,0,1,1]
4420 ; SSE-NEXT: movdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4421 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm8[0],xmm1[1],xmm8[1]
4422 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm7[0],xmm1[1]
4423 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4424 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload
4425 ; SSE-NEXT: # xmm7 = mem[2,3,2,3]
4426 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Folded Reload
4427 ; SSE-NEXT: # xmm8 = mem[1,1,1,1]
4428 ; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm8[0],xmm7[1],xmm8[1]
4429 ; SSE-NEXT: movdqa 624(%rdi), %xmm1
4430 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4431 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Folded Reload
4432 ; SSE-NEXT: # xmm8 = mem[2,3,2,3]
4433 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
4434 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm8[0],xmm1[1],xmm8[1]
4435 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm7[0],xmm1[1]
4436 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4437 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
4438 ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm9[2,3,2,3]
4439 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
4440 ; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm14[1,1,1,1]
4441 ; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm8[0],xmm7[1],xmm8[1]
4442 ; SSE-NEXT: movdqa 512(%rdi), %xmm1
4443 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4444 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Folded Reload
4445 ; SSE-NEXT: # xmm8 = mem[2,3,2,3]
4446 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
4447 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm8[0],xmm1[1],xmm8[1]
4448 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm7[0],xmm1[1]
4449 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4450 ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm15[2,3,2,3]
4451 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Folded Reload
4452 ; SSE-NEXT: # xmm8 = mem[1,1,1,1]
4453 ; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm8[0],xmm7[1],xmm8[1]
4454 ; SSE-NEXT: movdqa 848(%rdi), %xmm1
4455 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4456 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Folded Reload
4457 ; SSE-NEXT: # xmm8 = mem[2,3,2,3]
4458 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
4459 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm8[0],xmm1[1],xmm8[1]
4460 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm7[0],xmm1[1]
4461 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4462 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
4463 ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm6[2,3,2,3]
4464 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
4465 ; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm11[1,1,1,1]
4466 ; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm8[0],xmm7[1],xmm8[1]
4467 ; SSE-NEXT: movdqa 736(%rdi), %xmm2
4468 ; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm12[2,3,2,3]
4469 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,0,1,1]
4470 ; SSE-NEXT: movdqa %xmm2, %xmm12
4471 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4472 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm8[0],xmm1[1],xmm8[1]
4473 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm7[0],xmm1[1]
4474 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4475 ; SSE-NEXT: movdqa 96(%rdi), %xmm1
4476 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4477 ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm1[0,0,1,1]
4478 ; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4479 ; SSE-NEXT: movdqa %xmm3, %xmm1
4480 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm7[0],xmm1[1],xmm7[1]
4481 ; SSE-NEXT: pshufd $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Folded Reload
4482 ; SSE-NEXT: # xmm5 = mem[2,2,3,3]
4483 ; SSE-NEXT: punpckhdq {{.*#+}} xmm5 = xmm5[2],xmm0[2],xmm5[3],xmm0[3]
4484 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm5[0],xmm1[1]
4485 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4486 ; SSE-NEXT: movdqa 208(%rdi), %xmm0
4487 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4488 ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm0[0,0,1,1]
4489 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
4490 ; SSE-NEXT: movdqa %xmm4, %xmm2
4491 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1]
4492 ; SSE-NEXT: pshufd $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
4493 ; SSE-NEXT: # xmm1 = mem[2,2,3,3]
4494 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
4495 ; SSE-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm8[2],xmm1[3],xmm8[3]
4496 ; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm1[0],xmm2[1]
4497 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4498 ; SSE-NEXT: movdqa 320(%rdi), %xmm0
4499 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4500 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,1,1]
4501 ; SSE-NEXT: movdqa %xmm10, %xmm2
4502 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
4503 ; SSE-NEXT: pshufd $250, (%rsp), %xmm1 # 16-byte Folded Reload
4504 ; SSE-NEXT: # xmm1 = mem[2,2,3,3]
4505 ; SSE-NEXT: movdqa %xmm13, %xmm10
4506 ; SSE-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm13[2],xmm1[3],xmm13[3]
4507 ; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm1[0],xmm2[1]
4508 ; SSE-NEXT: movapd %xmm2, (%rsp) # 16-byte Spill
4509 ; SSE-NEXT: movdqa 432(%rdi), %xmm0
4510 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4511 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,1,1]
4512 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
4513 ; SSE-NEXT: movdqa %xmm13, %xmm2
4514 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
4515 ; SSE-NEXT: pshufd $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
4516 ; SSE-NEXT: # xmm1 = mem[2,2,3,3]
4517 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
4518 ; SSE-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm15[2],xmm1[3],xmm15[3]
4519 ; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm1[0],xmm2[1]
4520 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4521 ; SSE-NEXT: movdqa 544(%rdi), %xmm0
4522 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4523 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,1,1]
4524 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
4525 ; SSE-NEXT: movdqa %xmm5, %xmm2
4526 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
4527 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm9[2,2,3,3]
4528 ; SSE-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm14[2],xmm1[3],xmm14[3]
4529 ; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm1[0],xmm2[1]
4530 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4531 ; SSE-NEXT: movdqa 656(%rdi), %xmm0
4532 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4533 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,1,1]
4534 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
4535 ; SSE-NEXT: movdqa %xmm7, %xmm2
4536 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
4537 ; SSE-NEXT: pshufd $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
4538 ; SSE-NEXT: # xmm1 = mem[2,2,3,3]
4539 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
4540 ; SSE-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm14[2],xmm1[3],xmm14[3]
4541 ; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm1[0],xmm2[1]
4542 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4543 ; SSE-NEXT: movdqa 768(%rdi), %xmm0
4544 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4545 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,1,1]
4546 ; SSE-NEXT: movdqa %xmm12, %xmm2
4547 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
4548 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm6[2,2,3,3]
4549 ; SSE-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm11[2],xmm1[3],xmm11[3]
4550 ; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm1[0],xmm2[1]
4551 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4552 ; SSE-NEXT: movdqa 880(%rdi), %xmm0
4553 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4554 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,1,1]
4555 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
4556 ; SSE-NEXT: movdqa %xmm11, %xmm0
4557 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
4558 ; SSE-NEXT: pshufd $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
4559 ; SSE-NEXT: # xmm1 = mem[2,2,3,3]
4560 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
4561 ; SSE-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm12[2],xmm1[3],xmm12[3]
4562 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
4563 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4564 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
4565 ; SSE-NEXT: # xmm1 = mem[3,3,3,3]
4566 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
4567 ; SSE-NEXT: movdqa %xmm6, %xmm2
4568 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
4569 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm3[2,2,3,3]
4570 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
4571 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
4572 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm2[0],xmm0[1]
4573 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4574 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm8[3,3,3,3]
4575 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
4576 ; SSE-NEXT: movdqa %xmm8, %xmm2
4577 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
4578 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm4[2,2,3,3]
4579 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
4580 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
4581 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm2[0],xmm0[1]
4582 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4583 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm10[3,3,3,3]
4584 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
4585 ; SSE-NEXT: movdqa %xmm9, %xmm1
4586 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
4587 ; SSE-NEXT: pshufd $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
4588 ; SSE-NEXT: # xmm0 = mem[2,2,3,3]
4589 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
4590 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
4591 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
4592 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4593 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm15[3,3,3,3]
4594 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
4595 ; SSE-NEXT: movdqa %xmm10, %xmm1
4596 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
4597 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm13[2,2,3,3]
4598 ; SSE-NEXT: punpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
4599 ; SSE-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
4600 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
4601 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4602 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
4603 ; SSE-NEXT: # xmm0 = mem[3,3,3,3]
4604 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
4605 ; SSE-NEXT: movdqa %xmm13, %xmm1
4606 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
4607 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm5[2,2,3,3]
4608 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
4609 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1]
4610 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
4611 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4612 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm14[3,3,3,3]
4613 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
4614 ; SSE-NEXT: movdqa %xmm14, %xmm1
4615 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
4616 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm7[2,2,3,3]
4617 ; SSE-NEXT: punpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
4618 ; SSE-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
4619 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
4620 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4621 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
4622 ; SSE-NEXT: # xmm0 = mem[3,3,3,3]
4623 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
4624 ; SSE-NEXT: movdqa %xmm15, %xmm1
4625 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
4626 ; SSE-NEXT: pshufd $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
4627 ; SSE-NEXT: # xmm0 = mem[2,2,3,3]
4628 ; SSE-NEXT: punpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
4629 ; SSE-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
4630 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
4631 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4632 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm12[3,3,3,3]
4633 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
4634 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
4635 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm11[2,2,3,3]
4636 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
4637 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm7[0],xmm0[1],xmm7[1]
4638 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
4639 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4640 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm3[2,2,2,2]
4641 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
4642 ; SSE-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm0[2],xmm1[3],xmm0[3]
4643 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm6[1,1,1,1]
4644 ; SSE-NEXT: punpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
4645 ; SSE-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
4646 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
4647 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4648 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm4[2,2,2,2]
4649 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
4650 ; SSE-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm0[2],xmm1[3],xmm0[3]
4651 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm8[1,1,1,1]
4652 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
4653 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1]
4654 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
4655 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4656 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,2,2,2]
4657 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
4658 ; SSE-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm0[2],xmm1[3],xmm0[3]
4659 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm9[1,1,1,1]
4660 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
4661 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
4662 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
4663 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4664 ; SSE-NEXT: pshufd $170, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
4665 ; SSE-NEXT: # xmm0 = mem[2,2,2,2]
4666 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
4667 ; SSE-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm0[2],xmm1[3],xmm0[3]
4668 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm10[1,1,1,1]
4669 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
4670 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm10[0],xmm0[1],xmm10[1]
4671 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
4672 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4673 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm5[2,2,2,2]
4674 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
4675 ; SSE-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm0[2],xmm1[3],xmm0[3]
4676 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm13[1,1,1,1]
4677 ; SSE-NEXT: movdqa %xmm13, %xmm5
4678 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
4679 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
4680 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
4681 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4682 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
4683 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm13[2,2,2,2]
4684 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
4685 ; SSE-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm0[2],xmm1[3],xmm0[3]
4686 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm14[1,1,1,1]
4687 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
4688 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
4689 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
4690 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4691 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
4692 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm14[2,2,2,2]
4693 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
4694 ; SSE-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm0[2],xmm1[3],xmm0[3]
4695 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm15[1,1,1,1]
4696 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
4697 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm12[0],xmm0[1],xmm12[1]
4698 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
4699 ; SSE-NEXT: movapd %xmm1, %xmm15
4700 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm7[2,2,2,2]
4701 ; SSE-NEXT: movdqa %xmm11, %xmm1
4702 ; SSE-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm0[2],xmm1[3],xmm0[3]
4703 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
4704 ; SSE-NEXT: # xmm0 = mem[1,1,1,1]
4705 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
4706 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm11[0],xmm0[1],xmm11[1]
4707 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
4708 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4709 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
4710 ; SSE-NEXT: # xmm0 = mem[1,1,1,1]
4711 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
4712 ; SSE-NEXT: # xmm1 = mem[2,3,2,3]
4713 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
4714 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
4715 ; SSE-NEXT: # xmm0 = mem[2,3,2,3]
4716 ; SSE-NEXT: pshufd $80, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Folded Reload
4717 ; SSE-NEXT: # xmm9 = mem[0,0,1,1]
4718 ; SSE-NEXT: punpckldq {{.*#+}} xmm9 = xmm9[0],xmm0[0],xmm9[1],xmm0[1]
4719 ; SSE-NEXT: movsd {{.*#+}} xmm9 = xmm1[0],xmm9[1]
4720 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm6[1,1,1,1]
4721 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm8[2,3,2,3]
4722 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
4723 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
4724 ; SSE-NEXT: # xmm0 = mem[2,3,2,3]
4725 ; SSE-NEXT: pshufd $80, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Folded Reload
4726 ; SSE-NEXT: # xmm8 = mem[0,0,1,1]
4727 ; SSE-NEXT: punpckldq {{.*#+}} xmm8 = xmm8[0],xmm0[0],xmm8[1],xmm0[1]
4728 ; SSE-NEXT: movsd {{.*#+}} xmm8 = xmm1[0],xmm8[1]
4729 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,1,1]
4730 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
4731 ; SSE-NEXT: # xmm1 = mem[2,3,2,3]
4732 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
4733 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
4734 ; SSE-NEXT: # xmm0 = mem[2,3,2,3]
4735 ; SSE-NEXT: pshufd $80, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload
4736 ; SSE-NEXT: # xmm7 = mem[0,0,1,1]
4737 ; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm0[0],xmm7[1],xmm0[1]
4738 ; SSE-NEXT: movsd {{.*#+}} xmm7 = xmm1[0],xmm7[1]
4739 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm10[1,1,1,1]
4740 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
4741 ; SSE-NEXT: # xmm1 = mem[2,3,2,3]
4742 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
4743 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
4744 ; SSE-NEXT: # xmm0 = mem[2,3,2,3]
4745 ; SSE-NEXT: pshufd $80, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
4746 ; SSE-NEXT: # xmm6 = mem[0,0,1,1]
4747 ; SSE-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm0[0],xmm6[1],xmm0[1]
4748 ; SSE-NEXT: movsd {{.*#+}} xmm6 = xmm1[0],xmm6[1]
4749 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm4[1,1,1,1]
4750 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm5[2,3,2,3]
4751 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
4752 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
4753 ; SSE-NEXT: # xmm0 = mem[2,3,2,3]
4754 ; SSE-NEXT: pshufd $80, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Folded Reload
4755 ; SSE-NEXT: # xmm5 = mem[0,0,1,1]
4756 ; SSE-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1]
4757 ; SSE-NEXT: movsd {{.*#+}} xmm5 = xmm1[0],xmm5[1]
4758 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,1,1,1]
4759 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
4760 ; SSE-NEXT: # xmm1 = mem[2,3,2,3]
4761 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
4762 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm13[2,3,2,3]
4763 ; SSE-NEXT: pshufd $80, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
4764 ; SSE-NEXT: # xmm4 = mem[0,0,1,1]
4765 ; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1]
4766 ; SSE-NEXT: movsd {{.*#+}} xmm4 = xmm1[0],xmm4[1]
4767 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm12[1,1,1,1]
4768 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
4769 ; SSE-NEXT: # xmm0 = mem[2,3,2,3]
4770 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
4771 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm14[2,3,2,3]
4772 ; SSE-NEXT: pshufd $80, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
4773 ; SSE-NEXT: # xmm3 = mem[0,0,1,1]
4774 ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
4775 ; SSE-NEXT: movsd {{.*#+}} xmm3 = xmm0[0],xmm3[1]
4776 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm11[1,1,1,1]
4777 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
4778 ; SSE-NEXT: # xmm1 = mem[2,3,2,3]
4779 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
4780 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
4781 ; SSE-NEXT: # xmm2 = mem[2,3,2,3]
4782 ; SSE-NEXT: pshufd $80, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
4783 ; SSE-NEXT: # xmm0 = mem[0,0,1,1]
4784 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
4785 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
4786 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
4787 ; SSE-NEXT: movaps %xmm1, 96(%rsi)
4788 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
4789 ; SSE-NEXT: movaps %xmm1, 32(%rsi)
4790 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
4791 ; SSE-NEXT: movaps %xmm1, 112(%rsi)
4792 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
4793 ; SSE-NEXT: movaps %xmm1, 48(%rsi)
4794 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
4795 ; SSE-NEXT: movaps %xmm1, 64(%rsi)
4796 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
4797 ; SSE-NEXT: movaps %xmm1, (%rsi)
4798 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
4799 ; SSE-NEXT: movaps %xmm1, 80(%rsi)
4800 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
4801 ; SSE-NEXT: movaps %xmm1, 16(%rsi)
4802 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
4803 ; SSE-NEXT: movaps %xmm1, 96(%rdx)
4804 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
4805 ; SSE-NEXT: movaps %xmm1, 32(%rdx)
4806 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
4807 ; SSE-NEXT: movaps %xmm1, 112(%rdx)
4808 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
4809 ; SSE-NEXT: movaps %xmm1, 48(%rdx)
4810 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
4811 ; SSE-NEXT: movaps %xmm1, 64(%rdx)
4812 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
4813 ; SSE-NEXT: movaps %xmm1, (%rdx)
4814 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
4815 ; SSE-NEXT: movaps %xmm1, 80(%rdx)
4816 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
4817 ; SSE-NEXT: movaps %xmm1, 16(%rdx)
4818 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
4819 ; SSE-NEXT: movaps %xmm1, 96(%rcx)
4820 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
4821 ; SSE-NEXT: movaps %xmm1, 112(%rcx)
4822 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
4823 ; SSE-NEXT: movaps %xmm1, 64(%rcx)
4824 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
4825 ; SSE-NEXT: movaps %xmm1, 80(%rcx)
4826 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
4827 ; SSE-NEXT: movaps %xmm1, 32(%rcx)
4828 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
4829 ; SSE-NEXT: movaps %xmm1, 48(%rcx)
4830 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
4831 ; SSE-NEXT: movaps %xmm1, (%rcx)
4832 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
4833 ; SSE-NEXT: movaps %xmm1, 16(%rcx)
4834 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
4835 ; SSE-NEXT: movaps %xmm1, 112(%r8)
4836 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
4837 ; SSE-NEXT: movaps %xmm1, 96(%r8)
4838 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
4839 ; SSE-NEXT: movaps %xmm1, 80(%r8)
4840 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
4841 ; SSE-NEXT: movaps %xmm1, 64(%r8)
4842 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
4843 ; SSE-NEXT: movaps %xmm1, 48(%r8)
4844 ; SSE-NEXT: movaps (%rsp), %xmm1 # 16-byte Reload
4845 ; SSE-NEXT: movaps %xmm1, 32(%r8)
4846 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
4847 ; SSE-NEXT: movaps %xmm1, 16(%r8)
4848 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
4849 ; SSE-NEXT: movaps %xmm1, (%r8)
4850 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
4851 ; SSE-NEXT: movaps %xmm1, 112(%r9)
4852 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
4853 ; SSE-NEXT: movaps %xmm1, 96(%r9)
4854 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
4855 ; SSE-NEXT: movaps %xmm1, 80(%r9)
4856 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
4857 ; SSE-NEXT: movaps %xmm1, 64(%r9)
4858 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
4859 ; SSE-NEXT: movaps %xmm1, 48(%r9)
4860 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
4861 ; SSE-NEXT: movaps %xmm1, 32(%r9)
4862 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
4863 ; SSE-NEXT: movaps %xmm1, 16(%r9)
4864 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
4865 ; SSE-NEXT: movaps %xmm1, (%r9)
4866 ; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax
4867 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
4868 ; SSE-NEXT: movaps %xmm1, 112(%rax)
4869 ; SSE-NEXT: movapd %xmm15, 96(%rax)
4870 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
4871 ; SSE-NEXT: movaps %xmm1, 80(%rax)
4872 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
4873 ; SSE-NEXT: movaps %xmm1, 64(%rax)
4874 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
4875 ; SSE-NEXT: movaps %xmm1, 48(%rax)
4876 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
4877 ; SSE-NEXT: movaps %xmm1, 32(%rax)
4878 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
4879 ; SSE-NEXT: movaps %xmm1, 16(%rax)
4880 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
4881 ; SSE-NEXT: movaps %xmm1, (%rax)
4882 ; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax
4883 ; SSE-NEXT: movapd %xmm0, 112(%rax)
4884 ; SSE-NEXT: movapd %xmm3, 96(%rax)
4885 ; SSE-NEXT: movapd %xmm4, 80(%rax)
4886 ; SSE-NEXT: movapd %xmm5, 64(%rax)
4887 ; SSE-NEXT: movapd %xmm6, 48(%rax)
4888 ; SSE-NEXT: movapd %xmm7, 32(%rax)
4889 ; SSE-NEXT: movapd %xmm8, 16(%rax)
4890 ; SSE-NEXT: movapd %xmm9, (%rax)
4891 ; SSE-NEXT: addq $1160, %rsp # imm = 0x488
4894 ; AVX-LABEL: load_i32_stride7_vf32:
4896 ; AVX-NEXT: subq $1464, %rsp # imm = 0x5B8
4897 ; AVX-NEXT: vmovaps 480(%rdi), %ymm4
4898 ; AVX-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4899 ; AVX-NEXT: vmovaps 448(%rdi), %ymm3
4900 ; AVX-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4901 ; AVX-NEXT: vmovaps 544(%rdi), %ymm7
4902 ; AVX-NEXT: vmovaps 32(%rdi), %ymm2
4903 ; AVX-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4904 ; AVX-NEXT: vmovaps (%rdi), %ymm1
4905 ; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4906 ; AVX-NEXT: vmovaps 96(%rdi), %ymm6
4907 ; AVX-NEXT: vmovaps 80(%rdi), %xmm0
4908 ; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4909 ; AVX-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm6[0],ymm0[0],ymm6[2],ymm0[2]
4910 ; AVX-NEXT: vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4911 ; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm2[6],ymm1[7]
4912 ; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
4913 ; AVX-NEXT: vmovaps (%rdi), %xmm9
4914 ; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm9[0,1],xmm1[2,3]
4915 ; AVX-NEXT: vmovaps %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4916 ; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,3,2,3]
4917 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
4918 ; AVX-NEXT: vmovaps 160(%rdi), %xmm2
4919 ; AVX-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4920 ; AVX-NEXT: vmovaps 128(%rdi), %xmm1
4921 ; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4922 ; AVX-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm1[1],xmm2[1]
4923 ; AVX-NEXT: vmovaps 192(%rdi), %xmm10
4924 ; AVX-NEXT: vinsertps {{.*#+}} xmm1 = zero,xmm1[1,2],xmm10[1]
4925 ; AVX-NEXT: vmovaps %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4926 ; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
4927 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
4928 ; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4929 ; AVX-NEXT: vmovaps 528(%rdi), %xmm0
4930 ; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4931 ; AVX-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm7[0],ymm0[0],ymm7[2],ymm0[2]
4932 ; AVX-NEXT: vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4933 ; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm3[0,1,2,3,4,5],ymm4[6],ymm3[7]
4934 ; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
4935 ; AVX-NEXT: vmovaps 448(%rdi), %xmm8
4936 ; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm8[0,1],xmm1[2,3]
4937 ; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,3,2,3]
4938 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
4939 ; AVX-NEXT: vmovaps 608(%rdi), %xmm2
4940 ; AVX-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4941 ; AVX-NEXT: vmovaps 576(%rdi), %xmm1
4942 ; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4943 ; AVX-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm1[1],xmm2[1]
4944 ; AVX-NEXT: vmovaps 640(%rdi), %xmm4
4945 ; AVX-NEXT: vinsertps {{.*#+}} xmm1 = zero,xmm1[1,2],xmm4[1]
4946 ; AVX-NEXT: vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4947 ; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
4948 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
4949 ; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4950 ; AVX-NEXT: vmovaps 256(%rdi), %ymm1
4951 ; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4952 ; AVX-NEXT: vmovaps 224(%rdi), %ymm0
4953 ; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4954 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6],ymm0[7]
4955 ; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0
4956 ; AVX-NEXT: vmovaps 224(%rdi), %xmm5
4957 ; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm5[0,1],xmm0[2,3]
4958 ; AVX-NEXT: vmovaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4959 ; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,3,2,3]
4960 ; AVX-NEXT: vmovaps 320(%rdi), %ymm13
4961 ; AVX-NEXT: vmovaps 304(%rdi), %xmm1
4962 ; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4963 ; AVX-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm13[0],ymm1[0],ymm13[2],ymm1[2]
4964 ; AVX-NEXT: vmovups %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4965 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
4966 ; AVX-NEXT: vmovaps 384(%rdi), %xmm2
4967 ; AVX-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4968 ; AVX-NEXT: vmovaps 352(%rdi), %xmm1
4969 ; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4970 ; AVX-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm1[1],xmm2[1]
4971 ; AVX-NEXT: vmovaps 416(%rdi), %xmm11
4972 ; AVX-NEXT: vinsertps {{.*#+}} xmm1 = zero,xmm1[1,2],xmm11[1]
4973 ; AVX-NEXT: vmovaps %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4974 ; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
4975 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
4976 ; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4977 ; AVX-NEXT: vmovaps 704(%rdi), %ymm1
4978 ; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4979 ; AVX-NEXT: vmovaps 672(%rdi), %ymm0
4980 ; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4981 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6],ymm0[7]
4982 ; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0
4983 ; AVX-NEXT: vmovaps 672(%rdi), %xmm1
4984 ; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4985 ; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
4986 ; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,3,2,3]
4987 ; AVX-NEXT: vmovaps 768(%rdi), %ymm3
4988 ; AVX-NEXT: vmovaps 752(%rdi), %xmm1
4989 ; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4990 ; AVX-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm3[0],ymm1[0],ymm3[2],ymm1[2]
4991 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
4992 ; AVX-NEXT: vmovaps 832(%rdi), %xmm2
4993 ; AVX-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4994 ; AVX-NEXT: vmovaps 800(%rdi), %xmm1
4995 ; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4996 ; AVX-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm1[1],xmm2[1]
4997 ; AVX-NEXT: vmovaps 864(%rdi), %xmm2
4998 ; AVX-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4999 ; AVX-NEXT: vinsertps {{.*#+}} xmm1 = zero,xmm1[1,2],xmm2[1]
5000 ; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
5001 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
5002 ; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5003 ; AVX-NEXT: vmovaps 64(%rdi), %ymm0
5004 ; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5005 ; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm6[1,1],ymm0[2,2],ymm6[5,5],ymm0[6,6]
5006 ; AVX-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
5007 ; AVX-NEXT: vmovaps 32(%rdi), %xmm1
5008 ; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5009 ; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0],xmm9[1],xmm1[2,3]
5010 ; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,0],mem[3,3]
5011 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
5012 ; AVX-NEXT: vmovaps 160(%rdi), %ymm1
5013 ; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5014 ; AVX-NEXT: vmovaps 128(%rdi), %ymm2
5015 ; AVX-NEXT: vmovups %ymm2, (%rsp) # 32-byte Spill
5016 ; AVX-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm2[2,3],ymm1[0,1]
5017 ; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm2[0,0],ymm1[3,3],ymm2[4,4],ymm1[7,7]
5018 ; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
5019 ; AVX-NEXT: vinsertps {{.*#+}} xmm1 = zero,xmm1[1,2],xmm10[2]
5020 ; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
5021 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
5022 ; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5023 ; AVX-NEXT: vmovaps 512(%rdi), %ymm0
5024 ; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5025 ; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm7[1,1],ymm0[2,2],ymm7[5,5],ymm0[6,6]
5026 ; AVX-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
5027 ; AVX-NEXT: vmovaps 480(%rdi), %xmm10
5028 ; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm10[0],xmm8[1],xmm10[2,3]
5029 ; AVX-NEXT: vmovaps %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5030 ; AVX-NEXT: vmovaps %xmm8, %xmm7
5031 ; AVX-NEXT: vmovaps %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5032 ; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,0],mem[3,3]
5033 ; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2],ymm0[3,4,5,6,7]
5034 ; AVX-NEXT: vmovaps 608(%rdi), %ymm14
5035 ; AVX-NEXT: vmovaps 576(%rdi), %ymm12
5036 ; AVX-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm12[2,3],ymm14[0,1]
5037 ; AVX-NEXT: vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5038 ; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm12[0,0],ymm2[3,3],ymm12[4,4],ymm2[7,7]
5039 ; AVX-NEXT: vextractf128 $1, %ymm2, %xmm2
5040 ; AVX-NEXT: vinsertps {{.*#+}} xmm2 = zero,xmm2[1,2],xmm4[2]
5041 ; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2
5042 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm2[5,6,7]
5043 ; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5044 ; AVX-NEXT: vmovaps 288(%rdi), %ymm0
5045 ; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5046 ; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm13[1,1],ymm0[2,2],ymm13[5,5],ymm0[6,6]
5047 ; AVX-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[2,3,2,3]
5048 ; AVX-NEXT: vmovaps 256(%rdi), %xmm0
5049 ; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5050 ; AVX-NEXT: vblendps {{.*#+}} xmm2 = xmm0[0],xmm5[1],xmm0[2,3]
5051 ; AVX-NEXT: vshufps {{.*#+}} xmm2 = xmm2[1,0],mem[3,3]
5052 ; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3,4,5,6,7]
5053 ; AVX-NEXT: vmovaps 384(%rdi), %ymm0
5054 ; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5055 ; AVX-NEXT: vmovaps 352(%rdi), %ymm2
5056 ; AVX-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5057 ; AVX-NEXT: vperm2f128 {{.*#+}} ymm5 = ymm2[2,3],ymm0[0,1]
5058 ; AVX-NEXT: vshufps {{.*#+}} ymm5 = ymm2[0,0],ymm5[3,3],ymm2[4,4],ymm5[7,7]
5059 ; AVX-NEXT: vextractf128 $1, %ymm5, %xmm5
5060 ; AVX-NEXT: vinsertps {{.*#+}} xmm5 = zero,xmm5[1,2],xmm11[2]
5061 ; AVX-NEXT: vinsertf128 $1, %xmm5, %ymm0, %ymm5
5062 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm5[5,6,7]
5063 ; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5064 ; AVX-NEXT: vmovaps 736(%rdi), %ymm8
5065 ; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm3[1,1],ymm8[2,2],ymm3[5,5],ymm8[6,6]
5066 ; AVX-NEXT: vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5067 ; AVX-NEXT: vmovaps %ymm3, %ymm9
5068 ; AVX-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5069 ; AVX-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[2,3,2,3]
5070 ; AVX-NEXT: vmovaps 704(%rdi), %xmm6
5071 ; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
5072 ; AVX-NEXT: vblendps {{.*#+}} xmm11 = xmm6[0],xmm2[1],xmm6[2,3]
5073 ; AVX-NEXT: vmovaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5074 ; AVX-NEXT: vshufps {{.*#+}} xmm11 = xmm11[1,0],mem[3,3]
5075 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm11[0,1,2],ymm1[3,4,5,6,7]
5076 ; AVX-NEXT: vmovaps 832(%rdi), %ymm5
5077 ; AVX-NEXT: vmovaps 800(%rdi), %ymm15
5078 ; AVX-NEXT: vperm2f128 {{.*#+}} ymm13 = ymm15[2,3],ymm5[0,1]
5079 ; AVX-NEXT: vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5080 ; AVX-NEXT: vshufps {{.*#+}} ymm13 = ymm15[0,0],ymm13[3,3],ymm15[4,4],ymm13[7,7]
5081 ; AVX-NEXT: vextractf128 $1, %ymm13, %xmm13
5082 ; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
5083 ; AVX-NEXT: vinsertps {{.*#+}} xmm13 = zero,xmm13[1,2],xmm4[2]
5084 ; AVX-NEXT: vinsertf128 $1, %xmm13, %ymm0, %ymm13
5085 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm13[5,6,7]
5086 ; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5087 ; AVX-NEXT: vpermilps $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
5088 ; AVX-NEXT: # xmm0 = mem[2,3,2,3]
5089 ; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
5090 ; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
5091 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
5092 ; AVX-NEXT: vshufps $199, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm13 # 32-byte Folded Reload
5093 ; AVX-NEXT: # ymm13 = ymm13[3,1],mem[0,3],ymm13[7,5],mem[4,7]
5094 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
5095 ; AVX-NEXT: vshufps {{.*#+}} ymm13 = ymm11[2,1],ymm13[2,0],ymm11[6,5],ymm13[6,4]
5096 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm13[2,3,4,5,6,7]
5097 ; AVX-NEXT: vmovups (%rsp), %ymm3 # 32-byte Reload
5098 ; AVX-NEXT: vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm13 # 32-byte Folded Reload
5099 ; AVX-NEXT: # ymm13 = ymm3[0],mem[0],ymm3[2],mem[2]
5100 ; AVX-NEXT: vextractf128 $1, %ymm13, %xmm13
5101 ; AVX-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm13, %xmm13 # 16-byte Folded Reload
5102 ; AVX-NEXT: # xmm13 = xmm13[0,1,2],mem[3]
5103 ; AVX-NEXT: vinsertf128 $1, %xmm13, %ymm0, %ymm13
5104 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm13[5,6,7]
5105 ; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5106 ; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm7[2,3,2,3]
5107 ; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm10[1],xmm0[2,3]
5108 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
5109 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
5110 ; AVX-NEXT: vshufps {{.*#+}} ymm13 = ymm13[3,1],ymm10[0,3],ymm13[7,5],ymm10[4,7]
5111 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
5112 ; AVX-NEXT: vshufps {{.*#+}} ymm13 = ymm7[2,1],ymm13[2,0],ymm7[6,5],ymm13[6,4]
5113 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm13[2,3,4,5,6,7]
5114 ; AVX-NEXT: vunpcklpd {{.*#+}} ymm13 = ymm12[0],ymm14[0],ymm12[2],ymm14[2]
5115 ; AVX-NEXT: vextractf128 $1, %ymm13, %xmm13
5116 ; AVX-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm13, %xmm13 # 16-byte Folded Reload
5117 ; AVX-NEXT: # xmm13 = xmm13[0,1,2],mem[3]
5118 ; AVX-NEXT: vinsertf128 $1, %xmm13, %ymm0, %ymm13
5119 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm13[5,6,7]
5120 ; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5121 ; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm2[2,3,2,3]
5122 ; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm6[1],xmm0[2,3]
5123 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
5124 ; AVX-NEXT: vshufps {{.*#+}} ymm13 = ymm13[3,1],ymm8[0,3],ymm13[7,5],ymm8[4,7]
5125 ; AVX-NEXT: vshufps {{.*#+}} ymm13 = ymm9[2,1],ymm13[2,0],ymm9[6,5],ymm13[6,4]
5126 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm13[2,3,4,5,6,7]
5127 ; AVX-NEXT: vunpcklpd {{.*#+}} ymm13 = ymm15[0],ymm5[0],ymm15[2],ymm5[2]
5128 ; AVX-NEXT: vextractf128 $1, %ymm13, %xmm13
5129 ; AVX-NEXT: vblendps {{.*#+}} xmm8 = xmm13[0,1,2],xmm4[3]
5130 ; AVX-NEXT: vinsertf128 $1, %xmm8, %ymm0, %ymm8
5131 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm8[5,6,7]
5132 ; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5133 ; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
5134 ; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm5[2,3,2,3]
5135 ; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
5136 ; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2,3]
5137 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
5138 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
5139 ; AVX-NEXT: vshufps {{.*#+}} ymm8 = ymm8[3,1],ymm14[0,3],ymm8[7,5],ymm14[4,7]
5140 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
5141 ; AVX-NEXT: vshufps {{.*#+}} ymm8 = ymm4[2,1],ymm8[2,0],ymm4[6,5],ymm8[6,4]
5142 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm8[2,3,4,5,6,7]
5143 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
5144 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
5145 ; AVX-NEXT: vunpcklpd {{.*#+}} ymm8 = ymm6[0],ymm13[0],ymm6[2],ymm13[2]
5146 ; AVX-NEXT: vextractf128 $1, %ymm8, %xmm8
5147 ; AVX-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8, %xmm8 # 16-byte Folded Reload
5148 ; AVX-NEXT: # xmm8 = xmm8[0,1,2],mem[3]
5149 ; AVX-NEXT: vinsertf128 $1, %xmm8, %ymm0, %ymm8
5150 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm8[5,6,7]
5151 ; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5152 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5153 ; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm0[1,0],ymm11[0,0],ymm0[5,4],ymm11[4,4]
5154 ; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm11[3,1],ymm0[0,2],ymm11[7,5],ymm0[4,6]
5155 ; AVX-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm8 # 16-byte Folded Reload
5156 ; AVX-NEXT: # xmm8 = xmm1[0,1,2],mem[3]
5157 ; AVX-NEXT: vshufps {{.*#+}} xmm8 = xmm8[3,2,2,3]
5158 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm8[0,1],ymm0[2,3,4,5,6,7]
5159 ; AVX-NEXT: vmovaps 192(%rdi), %ymm1
5160 ; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5161 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
5162 ; AVX-NEXT: vshufps {{.*#+}} ymm8 = ymm1[0,1],ymm9[1,3],ymm1[4,5],ymm9[5,7]
5163 ; AVX-NEXT: vshufps {{.*#+}} ymm8 = ymm3[0,2],ymm8[2,0],ymm3[4,6],ymm8[6,4]
5164 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm8[5,6,7]
5165 ; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5166 ; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm10[1,0],ymm7[0,0],ymm10[5,4],ymm7[4,4]
5167 ; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm7[3,1],ymm0[0,2],ymm7[7,5],ymm0[4,6]
5168 ; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
5169 ; AVX-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm11, %xmm8 # 16-byte Folded Reload
5170 ; AVX-NEXT: # xmm8 = xmm11[0,1,2],mem[3]
5171 ; AVX-NEXT: vshufps {{.*#+}} xmm8 = xmm8[3,2,2,3]
5172 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm8[0,1],ymm0[2,3,4,5,6,7]
5173 ; AVX-NEXT: vmovaps 640(%rdi), %ymm1
5174 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
5175 ; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm1[0,1],ymm8[1,3],ymm1[4,5],ymm8[5,7]
5176 ; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5177 ; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm12[0,2],ymm10[2,0],ymm12[4,6],ymm10[6,4]
5178 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm10[5,6,7]
5179 ; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5180 ; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm14[1,0],ymm4[0,0],ymm14[5,4],ymm4[4,4]
5181 ; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm4[3,1],ymm0[0,2],ymm4[7,5],ymm0[4,6]
5182 ; AVX-NEXT: vblendps {{.*#+}} xmm7 = xmm2[0,1,2],xmm5[3]
5183 ; AVX-NEXT: vshufps {{.*#+}} xmm7 = xmm7[3,2,2,3]
5184 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm7[0,1],ymm0[2,3,4,5,6,7]
5185 ; AVX-NEXT: vmovaps 416(%rdi), %ymm14
5186 ; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm14[0,1],ymm13[1,3],ymm14[4,5],ymm13[5,7]
5187 ; AVX-NEXT: vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5188 ; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm6[0,2],ymm10[2,0],ymm6[4,6],ymm10[6,4]
5189 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm10[5,6,7]
5190 ; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5191 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
5192 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5193 ; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm0[1,0],ymm2[0,0],ymm0[5,4],ymm2[4,4]
5194 ; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm2[3,1],ymm0[0,2],ymm2[7,5],ymm0[4,6]
5195 ; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
5196 ; AVX-NEXT: vblendps $7, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm5 # 16-byte Folded Reload
5197 ; AVX-NEXT: # xmm5 = mem[0,1,2],xmm2[3]
5198 ; AVX-NEXT: vshufps {{.*#+}} xmm5 = xmm5[3,2,2,3]
5199 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm5[0,1],ymm0[2,3,4,5,6,7]
5200 ; AVX-NEXT: vmovaps 864(%rdi), %ymm5
5201 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
5202 ; AVX-NEXT: vshufps {{.*#+}} ymm6 = ymm5[0,1],ymm7[1,3],ymm5[4,5],ymm7[5,7]
5203 ; AVX-NEXT: vshufps {{.*#+}} ymm6 = ymm15[0,2],ymm6[2,0],ymm15[4,6],ymm6[6,4]
5204 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm6[5,6,7]
5205 ; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5206 ; AVX-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm12[2,3,0,1]
5207 ; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm12[3,0],ymm0[0,0],ymm12[7,4],ymm0[4,4]
5208 ; AVX-NEXT: vshufps {{.*#+}} ymm3 = ymm1[1,0],ymm8[2,0],ymm1[5,4],ymm8[6,4]
5209 ; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0],ymm3[2,0],ymm0[6,4],ymm3[6,4]
5210 ; AVX-NEXT: vmovaps 544(%rdi), %xmm1
5211 ; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5212 ; AVX-NEXT: vshufps {{.*#+}} xmm10 = xmm1[0,1,0,1]
5213 ; AVX-NEXT: vmovaps 512(%rdi), %xmm6
5214 ; AVX-NEXT: vblendps {{.*#+}} xmm10 = xmm6[0,1,2],xmm10[3]
5215 ; AVX-NEXT: vshufps {{.*#+}} xmm12 = xmm11[2,3,2,3]
5216 ; AVX-NEXT: vblendps {{.*#+}} xmm12 = mem[0],xmm12[1],mem[2,3]
5217 ; AVX-NEXT: vblendps {{.*#+}} xmm10 = xmm12[0,1],xmm10[2,3]
5218 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm10[0,1,2,3],ymm0[4,5,6,7]
5219 ; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5220 ; AVX-NEXT: vmovups (%rsp), %ymm2 # 32-byte Reload
5221 ; AVX-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm2[2,3,0,1]
5222 ; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm2[3,0],ymm0[0,0],ymm2[7,4],ymm0[4,4]
5223 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
5224 ; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm3[1,0],ymm9[2,0],ymm3[5,4],ymm9[6,4]
5225 ; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm0[2,0],ymm4[2,0],ymm0[6,4],ymm4[6,4]
5226 ; AVX-NEXT: vmovaps 64(%rdi), %xmm0
5227 ; AVX-NEXT: vmovaps 96(%rdi), %xmm4
5228 ; AVX-NEXT: vshufps {{.*#+}} xmm12 = xmm4[0,1,0,1]
5229 ; AVX-NEXT: vmovaps %xmm4, (%rsp) # 16-byte Spill
5230 ; AVX-NEXT: vblendps {{.*#+}} xmm12 = xmm0[0,1,2],xmm12[3]
5231 ; AVX-NEXT: vpermilps $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Folded Reload
5232 ; AVX-NEXT: # xmm13 = mem[2,3,2,3]
5233 ; AVX-NEXT: vblendps {{.*#+}} xmm13 = mem[0],xmm13[1],mem[2,3]
5234 ; AVX-NEXT: vblendps {{.*#+}} xmm12 = xmm13[0,1],xmm12[2,3]
5235 ; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm12[0,1,2,3],ymm10[4,5,6,7]
5236 ; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5237 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
5238 ; AVX-NEXT: vperm2f128 {{.*#+}} ymm10 = ymm1[2,3,0,1]
5239 ; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm1[3,0],ymm10[0,0],ymm1[7,4],ymm10[4,4]
5240 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
5241 ; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm14[1,0],ymm11[2,0],ymm14[5,4],ymm11[6,4]
5242 ; AVX-NEXT: vshufps {{.*#+}} ymm12 = ymm2[2,0],ymm10[2,0],ymm2[6,4],ymm10[6,4]
5243 ; AVX-NEXT: vmovaps 320(%rdi), %xmm1
5244 ; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5245 ; AVX-NEXT: vshufps {{.*#+}} xmm13 = xmm1[0,1,0,1]
5246 ; AVX-NEXT: vmovaps 288(%rdi), %xmm2
5247 ; AVX-NEXT: vblendps {{.*#+}} xmm13 = xmm2[0,1,2],xmm13[3]
5248 ; AVX-NEXT: vpermilps $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Folded Reload
5249 ; AVX-NEXT: # xmm14 = mem[2,3,2,3]
5250 ; AVX-NEXT: vblendps {{.*#+}} xmm14 = mem[0],xmm14[1],mem[2,3]
5251 ; AVX-NEXT: vblendps {{.*#+}} xmm13 = xmm14[0,1],xmm13[2,3]
5252 ; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm13[0,1,2,3],ymm12[4,5,6,7]
5253 ; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5254 ; AVX-NEXT: vperm2f128 {{.*#+}} ymm12 = ymm15[2,3,0,1]
5255 ; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm15[3,0],ymm12[0,0],ymm15[7,4],ymm12[4,4]
5256 ; AVX-NEXT: vshufps {{.*#+}} ymm12 = ymm5[1,0],ymm7[2,0],ymm5[5,4],ymm7[6,4]
5257 ; AVX-NEXT: vshufps {{.*#+}} ymm12 = ymm1[2,0],ymm12[2,0],ymm1[6,4],ymm12[6,4]
5258 ; AVX-NEXT: vmovaps 768(%rdi), %xmm1
5259 ; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5260 ; AVX-NEXT: vshufps {{.*#+}} xmm14 = xmm1[0,1,0,1]
5261 ; AVX-NEXT: vmovaps 736(%rdi), %xmm13
5262 ; AVX-NEXT: vblendps {{.*#+}} xmm14 = xmm13[0,1,2],xmm14[3]
5263 ; AVX-NEXT: vpermilps $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Folded Reload
5264 ; AVX-NEXT: # xmm15 = mem[2,3,2,3]
5265 ; AVX-NEXT: vblendps {{.*#+}} xmm15 = mem[0],xmm15[1],mem[2,3]
5266 ; AVX-NEXT: vblendps {{.*#+}} xmm14 = xmm15[0,1],xmm14[2,3]
5267 ; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm14[0,1,2,3],ymm12[4,5,6,7]
5268 ; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5269 ; AVX-NEXT: vshufps {{.*#+}} ymm12 = ymm3[2,1],ymm9[3,3],ymm3[6,5],ymm9[7,7]
5270 ; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
5271 ; AVX-NEXT: vblendps $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm15 # 16-byte Folded Reload
5272 ; AVX-NEXT: # xmm15 = xmm1[0],mem[1],xmm1[2,3]
5273 ; AVX-NEXT: vinsertf128 $1, %xmm15, %ymm0, %ymm15
5274 ; AVX-NEXT: vshufps {{.*#+}} ymm12 = ymm15[1,0],ymm12[2,0],ymm15[5,4],ymm12[6,4]
5275 ; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm4[0,1,2],xmm0[3]
5276 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
5277 ; AVX-NEXT: vshufps $16, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm15 # 32-byte Folded Reload
5278 ; AVX-NEXT: # ymm15 = ymm1[0,0],mem[1,0],ymm1[4,4],mem[5,4]
5279 ; AVX-NEXT: vshufps {{.*#+}} ymm15 = ymm15[2,0,2,3,6,4,6,7]
5280 ; AVX-NEXT: vextractf128 $1, %ymm15, %xmm15
5281 ; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm15[0,1],xmm0[3,2]
5282 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm12[4,5,6,7]
5283 ; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5284 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
5285 ; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm3[2,1],ymm8[3,3],ymm3[6,5],ymm8[7,7]
5286 ; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
5287 ; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
5288 ; AVX-NEXT: vblendps {{.*#+}} xmm15 = xmm8[0],xmm7[1],xmm8[2,3]
5289 ; AVX-NEXT: vinsertf128 $1, %xmm15, %ymm0, %ymm15
5290 ; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm15[1,0],ymm0[2,0],ymm15[5,4],ymm0[6,4]
5291 ; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
5292 ; AVX-NEXT: vblendps {{.*#+}} xmm6 = xmm10[0,1,2],xmm6[3]
5293 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
5294 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
5295 ; AVX-NEXT: vshufps {{.*#+}} ymm15 = ymm9[0,0],ymm12[1,0],ymm9[4,4],ymm12[5,4]
5296 ; AVX-NEXT: vshufps {{.*#+}} ymm15 = ymm15[2,0,2,3,6,4,6,7]
5297 ; AVX-NEXT: vextractf128 $1, %ymm15, %xmm15
5298 ; AVX-NEXT: vshufps {{.*#+}} xmm6 = xmm15[0,1],xmm6[3,2]
5299 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm6[0,1,2,3],ymm0[4,5,6,7]
5300 ; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5301 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5302 ; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,1],ymm11[3,3],ymm0[6,5],ymm11[7,7]
5303 ; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
5304 ; AVX-NEXT: vblendps $13, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm15 # 16-byte Folded Reload
5305 ; AVX-NEXT: # xmm15 = mem[0],xmm1[1],mem[2,3]
5306 ; AVX-NEXT: vinsertf128 $1, %xmm15, %ymm0, %ymm15
5307 ; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm15[1,0],ymm0[2,0],ymm15[5,4],ymm0[6,4]
5308 ; AVX-NEXT: vblendps $7, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm2 # 16-byte Folded Reload
5309 ; AVX-NEXT: # xmm2 = mem[0,1,2],xmm2[3]
5310 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
5311 ; AVX-NEXT: vshufps $16, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm15 # 32-byte Folded Reload
5312 ; AVX-NEXT: # ymm15 = ymm1[0,0],mem[1,0],ymm1[4,4],mem[5,4]
5313 ; AVX-NEXT: vshufps {{.*#+}} ymm15 = ymm15[2,0,2,3,6,4,6,7]
5314 ; AVX-NEXT: vextractf128 $1, %ymm15, %xmm15
5315 ; AVX-NEXT: vshufps {{.*#+}} xmm2 = xmm15[0,1],xmm2[3,2]
5316 ; AVX-NEXT: vblendps {{.*#+}} ymm15 = ymm2[0,1,2,3],ymm0[4,5,6,7]
5317 ; AVX-NEXT: vshufps $246, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm0 # 32-byte Folded Reload
5318 ; AVX-NEXT: # ymm0 = ymm5[2,1],mem[3,3],ymm5[6,5],mem[7,7]
5319 ; AVX-NEXT: vmovaps %ymm5, %ymm14
5320 ; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
5321 ; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
5322 ; AVX-NEXT: vblendps {{.*#+}} xmm2 = xmm4[0],xmm1[1],xmm4[2,3]
5323 ; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2
5324 ; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm2[1,0],ymm0[2,0],ymm2[5,4],ymm0[6,4]
5325 ; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
5326 ; AVX-NEXT: vblendps {{.*#+}} xmm2 = xmm6[0,1,2],xmm13[3]
5327 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
5328 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
5329 ; AVX-NEXT: vshufps {{.*#+}} ymm11 = ymm5[0,0],ymm13[1,0],ymm5[4,4],ymm13[5,4]
5330 ; AVX-NEXT: vshufps {{.*#+}} ymm11 = ymm11[2,0,2,3,6,4,6,7]
5331 ; AVX-NEXT: vextractf128 $1, %ymm11, %xmm11
5332 ; AVX-NEXT: vshufps {{.*#+}} xmm2 = xmm11[0,1],xmm2[3,2]
5333 ; AVX-NEXT: vblendps {{.*#+}} ymm11 = ymm2[0,1,2,3],ymm0[4,5,6,7]
5334 ; AVX-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm3[2,3,0,1]
5335 ; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm3[3,0],ymm0[0,0],ymm3[7,4],ymm0[4,4]
5336 ; AVX-NEXT: vshufps {{.*#+}} xmm2 = xmm7[2,3,2,3]
5337 ; AVX-NEXT: vblendps {{.*#+}} xmm2 = xmm2[0],xmm8[1],xmm2[2,3]
5338 ; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2
5339 ; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm2[0,1],ymm0[2,0],ymm2[4,5],ymm0[6,4]
5340 ; AVX-NEXT: vpermilps $68, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
5341 ; AVX-NEXT: # xmm2 = mem[0,1,0,1]
5342 ; AVX-NEXT: vblendps {{.*#+}} xmm2 = xmm2[0,1,2],xmm10[3]
5343 ; AVX-NEXT: vshufps {{.*#+}} ymm3 = ymm9[1,0],ymm12[2,0],ymm9[5,4],ymm12[6,4]
5344 ; AVX-NEXT: vshufps {{.*#+}} ymm3 = ymm3[2,0,2,3,6,4,6,7]
5345 ; AVX-NEXT: vextractf128 $1, %ymm3, %xmm3
5346 ; AVX-NEXT: vblendps {{.*#+}} xmm2 = xmm3[0,1],xmm2[2,3]
5347 ; AVX-NEXT: vblendps {{.*#+}} ymm3 = ymm2[0,1,2,3],ymm0[4,5,6,7]
5348 ; AVX-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm14[2,3,0,1]
5349 ; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm14[3,0],ymm0[0,0],ymm14[7,4],ymm0[4,4]
5350 ; AVX-NEXT: vshufps {{.*#+}} xmm2 = xmm1[2,3,2,3]
5351 ; AVX-NEXT: vblendps {{.*#+}} xmm2 = xmm2[0],xmm4[1],xmm2[2,3]
5352 ; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2
5353 ; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm2[0,1],ymm0[2,0],ymm2[4,5],ymm0[6,4]
5354 ; AVX-NEXT: vpermilps $68, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
5355 ; AVX-NEXT: # xmm2 = mem[0,1,0,1]
5356 ; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm2[0,1,2],xmm6[3]
5357 ; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm5[1,0],ymm13[2,0],ymm5[5,4],ymm13[6,4]
5358 ; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm2[2,0,2,3,6,4,6,7]
5359 ; AVX-NEXT: vextractf128 $1, %ymm2, %xmm2
5360 ; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3]
5361 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
5362 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
5363 ; AVX-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm2[2,3,0,1]
5364 ; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm2[3,0],ymm1[0,0],ymm2[7,4],ymm1[4,4]
5365 ; AVX-NEXT: vpermilps $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
5366 ; AVX-NEXT: # xmm2 = mem[2,3,2,3]
5367 ; AVX-NEXT: vblendps $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm2 # 16-byte Folded Reload
5368 ; AVX-NEXT: # xmm2 = xmm2[0],mem[1],xmm2[2,3]
5369 ; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2
5370 ; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm2[0,1],ymm1[2,0],ymm2[4,5],ymm1[6,4]
5371 ; AVX-NEXT: vpermilps $68, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
5372 ; AVX-NEXT: # xmm2 = mem[0,1,0,1]
5373 ; AVX-NEXT: vblendps $8, (%rsp), %xmm2, %xmm2 # 16-byte Folded Reload
5374 ; AVX-NEXT: # xmm2 = xmm2[0,1,2],mem[3]
5375 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
5376 ; AVX-NEXT: vshufps $33, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm4 # 32-byte Folded Reload
5377 ; AVX-NEXT: # ymm4 = ymm4[1,0],mem[2,0],ymm4[5,4],mem[6,4]
5378 ; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm4[2,0,2,3,6,4,6,7]
5379 ; AVX-NEXT: vextractf128 $1, %ymm4, %xmm4
5380 ; AVX-NEXT: vblendps {{.*#+}} xmm2 = xmm4[0,1],xmm2[2,3]
5381 ; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
5382 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
5383 ; AVX-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm4[2,3,0,1]
5384 ; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm4[3,0],ymm2[0,0],ymm4[7,4],ymm2[4,4]
5385 ; AVX-NEXT: vpermilps $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
5386 ; AVX-NEXT: # xmm4 = mem[2,3,2,3]
5387 ; AVX-NEXT: vblendps $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm4 # 16-byte Folded Reload
5388 ; AVX-NEXT: # xmm4 = xmm4[0],mem[1],xmm4[2,3]
5389 ; AVX-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm4
5390 ; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm4[0,1],ymm2[2,0],ymm4[4,5],ymm2[6,4]
5391 ; AVX-NEXT: vpermilps $68, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
5392 ; AVX-NEXT: # xmm4 = mem[0,1,0,1]
5393 ; AVX-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm4 # 16-byte Folded Reload
5394 ; AVX-NEXT: # xmm4 = xmm4[0,1,2],mem[3]
5395 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
5396 ; AVX-NEXT: vshufps $33, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm5 # 32-byte Folded Reload
5397 ; AVX-NEXT: # ymm5 = ymm5[1,0],mem[2,0],ymm5[5,4],mem[6,4]
5398 ; AVX-NEXT: vshufps {{.*#+}} ymm5 = ymm5[2,0,2,3,6,4,6,7]
5399 ; AVX-NEXT: vextractf128 $1, %ymm5, %xmm5
5400 ; AVX-NEXT: vblendps {{.*#+}} xmm4 = xmm5[0,1],xmm4[2,3]
5401 ; AVX-NEXT: vblendps {{.*#+}} ymm2 = ymm4[0,1,2,3],ymm2[4,5,6,7]
5402 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
5403 ; AVX-NEXT: vmovaps %ymm4, 96(%rsi)
5404 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
5405 ; AVX-NEXT: vmovaps %ymm4, 32(%rsi)
5406 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
5407 ; AVX-NEXT: vmovaps %ymm4, 64(%rsi)
5408 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
5409 ; AVX-NEXT: vmovaps %ymm4, (%rsi)
5410 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
5411 ; AVX-NEXT: vmovaps %ymm4, 96(%rdx)
5412 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
5413 ; AVX-NEXT: vmovaps %ymm4, 32(%rdx)
5414 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
5415 ; AVX-NEXT: vmovaps %ymm4, 64(%rdx)
5416 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
5417 ; AVX-NEXT: vmovaps %ymm4, (%rdx)
5418 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
5419 ; AVX-NEXT: vmovaps %ymm4, 32(%rcx)
5420 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
5421 ; AVX-NEXT: vmovaps %ymm4, 96(%rcx)
5422 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
5423 ; AVX-NEXT: vmovaps %ymm4, 64(%rcx)
5424 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
5425 ; AVX-NEXT: vmovaps %ymm4, (%rcx)
5426 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
5427 ; AVX-NEXT: vmovaps %ymm4, 96(%r8)
5428 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
5429 ; AVX-NEXT: vmovaps %ymm4, 32(%r8)
5430 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
5431 ; AVX-NEXT: vmovaps %ymm4, 64(%r8)
5432 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
5433 ; AVX-NEXT: vmovaps %ymm4, (%r8)
5434 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
5435 ; AVX-NEXT: vmovaps %ymm4, 96(%r9)
5436 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
5437 ; AVX-NEXT: vmovaps %ymm4, 32(%r9)
5438 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
5439 ; AVX-NEXT: vmovaps %ymm4, (%r9)
5440 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
5441 ; AVX-NEXT: vmovaps %ymm4, 64(%r9)
5442 ; AVX-NEXT: movq {{[0-9]+}}(%rsp), %rax
5443 ; AVX-NEXT: vmovaps %ymm11, 96(%rax)
5444 ; AVX-NEXT: vmovaps %ymm15, 32(%rax)
5445 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
5446 ; AVX-NEXT: vmovaps %ymm4, 64(%rax)
5447 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
5448 ; AVX-NEXT: vmovaps %ymm4, (%rax)
5449 ; AVX-NEXT: movq {{[0-9]+}}(%rsp), %rax
5450 ; AVX-NEXT: vmovaps %ymm2, 32(%rax)
5451 ; AVX-NEXT: vmovaps %ymm1, (%rax)
5452 ; AVX-NEXT: vmovaps %ymm0, 96(%rax)
5453 ; AVX-NEXT: vmovaps %ymm3, 64(%rax)
5454 ; AVX-NEXT: addq $1464, %rsp # imm = 0x5B8
5455 ; AVX-NEXT: vzeroupper
5458 ; AVX2-LABEL: load_i32_stride7_vf32:
5460 ; AVX2-NEXT: subq $1192, %rsp # imm = 0x4A8
5461 ; AVX2-NEXT: vmovdqa 320(%rdi), %ymm9
5462 ; AVX2-NEXT: vmovdqa 256(%rdi), %ymm4
5463 ; AVX2-NEXT: vmovdqa 224(%rdi), %ymm5
5464 ; AVX2-NEXT: vmovdqa 544(%rdi), %ymm12
5465 ; AVX2-NEXT: vmovdqa 480(%rdi), %ymm7
5466 ; AVX2-NEXT: vmovdqa 448(%rdi), %ymm8
5467 ; AVX2-NEXT: vmovdqa (%rdi), %ymm14
5468 ; AVX2-NEXT: vmovdqa 32(%rdi), %ymm13
5469 ; AVX2-NEXT: vmovdqa 96(%rdi), %ymm11
5470 ; AVX2-NEXT: vpbroadcastq 80(%rdi), %ymm0
5471 ; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm0[0,1,2,3],ymm11[4,5,6,7]
5472 ; AVX2-NEXT: vpmovsxbd {{.*#+}} xmm0 = [0,7,6,0]
5473 ; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm14[0,1,2,3,4,5],ymm13[6],ymm14[7]
5474 ; AVX2-NEXT: vmovdqu %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5475 ; AVX2-NEXT: vmovdqu %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5476 ; AVX2-NEXT: vpermd %ymm2, %ymm0, %ymm2
5477 ; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3,4,5,6,7]
5478 ; AVX2-NEXT: vmovdqa 128(%rdi), %xmm2
5479 ; AVX2-NEXT: vmovdqa 160(%rdi), %xmm3
5480 ; AVX2-NEXT: vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5481 ; AVX2-NEXT: vpunpckhqdq {{.*#+}} xmm2 = xmm2[1],xmm3[1]
5482 ; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
5483 ; AVX2-NEXT: vpbroadcastd 196(%rdi), %ymm3
5484 ; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,6],ymm3[7]
5485 ; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5,6,7]
5486 ; AVX2-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5487 ; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm8[0,1,2,3,4,5],ymm7[6],ymm8[7]
5488 ; AVX2-NEXT: vmovdqu %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5489 ; AVX2-NEXT: vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5490 ; AVX2-NEXT: vpermd %ymm1, %ymm0, %ymm1
5491 ; AVX2-NEXT: vpbroadcastq 528(%rdi), %ymm2
5492 ; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm12[4,5,6,7]
5493 ; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4,5,6,7]
5494 ; AVX2-NEXT: vmovdqa 576(%rdi), %xmm2
5495 ; AVX2-NEXT: vmovdqa 608(%rdi), %xmm3
5496 ; AVX2-NEXT: vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5497 ; AVX2-NEXT: vpunpckhqdq {{.*#+}} xmm2 = xmm2[1],xmm3[1]
5498 ; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
5499 ; AVX2-NEXT: vpbroadcastd 644(%rdi), %ymm3
5500 ; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,6],ymm3[7]
5501 ; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5,6,7]
5502 ; AVX2-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5503 ; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm5[0,1,2,3,4,5],ymm4[6],ymm5[7]
5504 ; AVX2-NEXT: vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5505 ; AVX2-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5506 ; AVX2-NEXT: vpermd %ymm1, %ymm0, %ymm1
5507 ; AVX2-NEXT: vpbroadcastq 304(%rdi), %ymm2
5508 ; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm9[4,5,6,7]
5509 ; AVX2-NEXT: vmovdqa %ymm9, %ymm10
5510 ; AVX2-NEXT: vmovdqu %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5511 ; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4,5,6,7]
5512 ; AVX2-NEXT: vmovdqa 352(%rdi), %xmm2
5513 ; AVX2-NEXT: vmovdqa 384(%rdi), %xmm3
5514 ; AVX2-NEXT: vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5515 ; AVX2-NEXT: vpunpckhqdq {{.*#+}} xmm2 = xmm2[1],xmm3[1]
5516 ; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
5517 ; AVX2-NEXT: vpbroadcastd 420(%rdi), %ymm3
5518 ; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,6],ymm3[7]
5519 ; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5,6,7]
5520 ; AVX2-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5521 ; AVX2-NEXT: vmovdqa 704(%rdi), %ymm2
5522 ; AVX2-NEXT: vmovdqa 672(%rdi), %ymm6
5523 ; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm6[0,1,2,3,4,5],ymm2[6],ymm6[7]
5524 ; AVX2-NEXT: vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5525 ; AVX2-NEXT: vmovdqa %ymm2, %ymm3
5526 ; AVX2-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5527 ; AVX2-NEXT: vpermd %ymm1, %ymm0, %ymm0
5528 ; AVX2-NEXT: vmovdqa 768(%rdi), %ymm15
5529 ; AVX2-NEXT: vpbroadcastq 752(%rdi), %ymm1
5530 ; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm15[4,5,6,7]
5531 ; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
5532 ; AVX2-NEXT: vmovdqa 800(%rdi), %xmm1
5533 ; AVX2-NEXT: vmovdqa 832(%rdi), %xmm2
5534 ; AVX2-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5535 ; AVX2-NEXT: vpunpckhqdq {{.*#+}} xmm1 = xmm1[1],xmm2[1]
5536 ; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
5537 ; AVX2-NEXT: vpbroadcastd 868(%rdi), %ymm2
5538 ; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm2[7]
5539 ; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
5540 ; AVX2-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5541 ; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = mem[2,2,2,2]
5542 ; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
5543 ; AVX2-NEXT: vmovdqa 608(%rdi), %ymm2
5544 ; AVX2-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5545 ; AVX2-NEXT: vmovdqa 576(%rdi), %ymm1
5546 ; AVX2-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5547 ; AVX2-NEXT: vpalignr {{.*#+}} ymm1 = ymm2[12,13,14,15],ymm1[0,1,2,3,4,5,6,7,8,9,10,11],ymm2[28,29,30,31],ymm1[16,17,18,19,20,21,22,23,24,25,26,27]
5548 ; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,1,2,0]
5549 ; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm0[7]
5550 ; AVX2-NEXT: vmovdqa 512(%rdi), %ymm9
5551 ; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm12[0,1],ymm9[2,3],ymm12[4,5],ymm9[6,7]
5552 ; AVX2-NEXT: vmovdqu %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5553 ; AVX2-NEXT: vmovdqu %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5554 ; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm7[0],ymm8[1],ymm7[2,3,4],ymm8[5],ymm7[6,7]
5555 ; AVX2-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5556 ; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm0[5,6],ymm2[7]
5557 ; AVX2-NEXT: vpmovsxbd {{.*#+}} ymm0 = [1,0,7,6,5,6,5,6]
5558 ; AVX2-NEXT: vpermd %ymm2, %ymm0, %ymm2
5559 ; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3,4],ymm1[5,6,7]
5560 ; AVX2-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5561 ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = mem[2,2,2,2]
5562 ; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
5563 ; AVX2-NEXT: vmovdqa 384(%rdi), %ymm7
5564 ; AVX2-NEXT: vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5565 ; AVX2-NEXT: vmovdqa 352(%rdi), %ymm2
5566 ; AVX2-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5567 ; AVX2-NEXT: vpalignr {{.*#+}} ymm2 = ymm7[12,13,14,15],ymm2[0,1,2,3,4,5,6,7,8,9,10,11],ymm7[28,29,30,31],ymm2[16,17,18,19,20,21,22,23,24,25,26,27]
5568 ; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,1,2,0]
5569 ; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3,4,5,6],ymm1[7]
5570 ; AVX2-NEXT: vmovdqa 288(%rdi), %ymm2
5571 ; AVX2-NEXT: vmovdqu %ymm2, (%rsp) # 32-byte Spill
5572 ; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm10[0,1],ymm2[2,3],ymm10[4,5],ymm2[6,7]
5573 ; AVX2-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0],ymm5[1],ymm4[2,3,4],ymm5[5],ymm4[6,7]
5574 ; AVX2-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5575 ; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm4[0,1,2,3,4],ymm2[5,6],ymm4[7]
5576 ; AVX2-NEXT: vpermd %ymm2, %ymm0, %ymm2
5577 ; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3,4],ymm1[5,6,7]
5578 ; AVX2-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5579 ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = mem[2,2,2,2]
5580 ; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
5581 ; AVX2-NEXT: vmovdqa 832(%rdi), %ymm8
5582 ; AVX2-NEXT: vmovdqa 800(%rdi), %ymm10
5583 ; AVX2-NEXT: vpalignr {{.*#+}} ymm2 = ymm8[12,13,14,15],ymm10[0,1,2,3,4,5,6,7,8,9,10,11],ymm8[28,29,30,31],ymm10[16,17,18,19,20,21,22,23,24,25,26,27]
5584 ; AVX2-NEXT: vmovdqu %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5585 ; AVX2-NEXT: vmovdqu %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5586 ; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,1,2,0]
5587 ; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3,4,5,6],ymm1[7]
5588 ; AVX2-NEXT: vmovdqa 736(%rdi), %ymm7
5589 ; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm15[0,1],ymm7[2,3],ymm15[4,5],ymm7[6,7]
5590 ; AVX2-NEXT: vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5591 ; AVX2-NEXT: vmovdqa %ymm15, %ymm5
5592 ; AVX2-NEXT: vmovdqu %ymm15, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5593 ; AVX2-NEXT: vpblendd {{.*#+}} ymm4 = ymm3[0],ymm6[1],ymm3[2,3,4],ymm6[5],ymm3[6,7]
5594 ; AVX2-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5595 ; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm4[0,1,2,3,4],ymm2[5,6],ymm4[7]
5596 ; AVX2-NEXT: vpermd %ymm2, %ymm0, %ymm2
5597 ; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3,4],ymm1[5,6,7]
5598 ; AVX2-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5599 ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = mem[2,2,2,2]
5600 ; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
5601 ; AVX2-NEXT: vmovdqa 160(%rdi), %ymm6
5602 ; AVX2-NEXT: vmovdqa 128(%rdi), %ymm15
5603 ; AVX2-NEXT: vpalignr {{.*#+}} ymm2 = ymm6[12,13,14,15],ymm15[0,1,2,3,4,5,6,7,8,9,10,11],ymm6[28,29,30,31],ymm15[16,17,18,19,20,21,22,23,24,25,26,27]
5604 ; AVX2-NEXT: vmovdqu %ymm15, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5605 ; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,1,2,0]
5606 ; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,6],ymm1[7]
5607 ; AVX2-NEXT: vmovdqa 64(%rdi), %ymm1
5608 ; AVX2-NEXT: vmovdqu %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5609 ; AVX2-NEXT: vpblendd {{.*#+}} ymm3 = ymm11[0,1],ymm1[2,3],ymm11[4,5],ymm1[6,7]
5610 ; AVX2-NEXT: vpblendd {{.*#+}} ymm4 = ymm13[0],ymm14[1],ymm13[2,3,4],ymm14[5],ymm13[6,7]
5611 ; AVX2-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5612 ; AVX2-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3,4],ymm3[5,6],ymm4[7]
5613 ; AVX2-NEXT: vpermd %ymm3, %ymm0, %ymm0
5614 ; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm2[5,6,7]
5615 ; AVX2-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5616 ; AVX2-NEXT: vmovdqa 80(%rdi), %xmm0
5617 ; AVX2-NEXT: vpalignr {{.*#+}} ymm2 = ymm11[8,9,10,11,12,13,14,15],ymm1[0,1,2,3,4,5,6,7],ymm11[24,25,26,27,28,29,30,31],ymm1[16,17,18,19,20,21,22,23]
5618 ; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2],ymm0[3],ymm2[4,5,6,7]
5619 ; AVX2-NEXT: vpbroadcastd 8(%rdi), %xmm2
5620 ; AVX2-NEXT: vmovdqa 32(%rdi), %xmm3
5621 ; AVX2-NEXT: vpblendd {{.*#+}} xmm2 = xmm2[0],xmm3[1],xmm2[2,3]
5622 ; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1],ymm0[2,3,4,5,6,7]
5623 ; AVX2-NEXT: vpunpcklqdq {{.*#+}} ymm2 = ymm15[0],ymm6[0],ymm15[2],ymm6[2]
5624 ; AVX2-NEXT: vmovdqa %ymm6, %ymm11
5625 ; AVX2-NEXT: vpbroadcastd 204(%rdi), %ymm4
5626 ; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,6],ymm4[7]
5627 ; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm2[5,6,7]
5628 ; AVX2-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5629 ; AVX2-NEXT: vmovdqa 528(%rdi), %xmm0
5630 ; AVX2-NEXT: vpalignr {{.*#+}} ymm2 = ymm12[8,9,10,11,12,13,14,15],ymm9[0,1,2,3,4,5,6,7],ymm12[24,25,26,27,28,29,30,31],ymm9[16,17,18,19,20,21,22,23]
5631 ; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2],ymm0[3],ymm2[4,5,6,7]
5632 ; AVX2-NEXT: vpbroadcastd 456(%rdi), %xmm4
5633 ; AVX2-NEXT: vmovdqa 480(%rdi), %xmm2
5634 ; AVX2-NEXT: vpblendd {{.*#+}} xmm4 = xmm4[0],xmm2[1],xmm4[2,3]
5635 ; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm4[0,1],ymm0[2,3,4,5,6,7]
5636 ; AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
5637 ; AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
5638 ; AVX2-NEXT: vpunpcklqdq {{.*#+}} ymm4 = ymm12[0],ymm9[0],ymm12[2],ymm9[2]
5639 ; AVX2-NEXT: vpbroadcastd 652(%rdi), %ymm15
5640 ; AVX2-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3,4,5,6],ymm15[7]
5641 ; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm4[5,6,7]
5642 ; AVX2-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5643 ; AVX2-NEXT: vmovdqa 752(%rdi), %xmm0
5644 ; AVX2-NEXT: vpalignr {{.*#+}} ymm4 = ymm5[8,9,10,11,12,13,14,15],ymm7[0,1,2,3,4,5,6,7],ymm5[24,25,26,27,28,29,30,31],ymm7[16,17,18,19,20,21,22,23]
5645 ; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm4[0,1,2],ymm0[3],ymm4[4,5,6,7]
5646 ; AVX2-NEXT: vpbroadcastd 680(%rdi), %xmm15
5647 ; AVX2-NEXT: vmovdqa 704(%rdi), %xmm7
5648 ; AVX2-NEXT: vpblendd {{.*#+}} xmm15 = xmm15[0],xmm7[1],xmm15[2,3]
5649 ; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm15[0,1],ymm0[2,3,4,5,6,7]
5650 ; AVX2-NEXT: vpunpcklqdq {{.*#+}} ymm15 = ymm10[0],ymm8[0],ymm10[2],ymm8[2]
5651 ; AVX2-NEXT: vpbroadcastd 876(%rdi), %ymm13
5652 ; AVX2-NEXT: vpblendd {{.*#+}} ymm13 = ymm15[0,1,2,3,4,5,6],ymm13[7]
5653 ; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm13[5,6,7]
5654 ; AVX2-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5655 ; AVX2-NEXT: vmovdqa 304(%rdi), %xmm0
5656 ; AVX2-NEXT: vmovdqu (%rsp), %ymm5 # 32-byte Reload
5657 ; AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
5658 ; AVX2-NEXT: vpalignr {{.*#+}} ymm13 = ymm4[8,9,10,11,12,13,14,15],ymm5[0,1,2,3,4,5,6,7],ymm4[24,25,26,27,28,29,30,31],ymm5[16,17,18,19,20,21,22,23]
5659 ; AVX2-NEXT: vpblendd {{.*#+}} ymm13 = ymm13[0,1,2],ymm0[3],ymm13[4,5,6,7]
5660 ; AVX2-NEXT: vpbroadcastd 232(%rdi), %xmm15
5661 ; AVX2-NEXT: vmovdqa 256(%rdi), %xmm0
5662 ; AVX2-NEXT: vpblendd {{.*#+}} xmm15 = xmm15[0],xmm0[1],xmm15[2,3]
5663 ; AVX2-NEXT: vpblendd {{.*#+}} ymm13 = ymm15[0,1],ymm13[2,3,4,5,6,7]
5664 ; AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
5665 ; AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
5666 ; AVX2-NEXT: vpunpcklqdq {{.*#+}} ymm15 = ymm6[0],ymm8[0],ymm6[2],ymm8[2]
5667 ; AVX2-NEXT: vpbroadcastd 428(%rdi), %ymm14
5668 ; AVX2-NEXT: vpblendd {{.*#+}} ymm14 = ymm15[0,1,2,3,4,5,6],ymm14[7]
5669 ; AVX2-NEXT: vpblendd {{.*#+}} ymm13 = ymm13[0,1,2,3,4],ymm14[5,6,7]
5670 ; AVX2-NEXT: vmovdqu %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5671 ; AVX2-NEXT: vpblendd $253, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
5672 ; AVX2-NEXT: # ymm1 = mem[0],ymm1[1],mem[2,3,4,5,6,7]
5673 ; AVX2-NEXT: vpblendd {{.*#+}} xmm3 = xmm3[0,1,2],mem[3]
5674 ; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[3,2,2,3]
5675 ; AVX2-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[3,1,1,0,7,5,5,4]
5676 ; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm3[0,1],ymm1[2,3,4,5,6,7]
5677 ; AVX2-NEXT: vmovdqa %ymm11, %ymm13
5678 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
5679 ; AVX2-NEXT: vshufps {{.*#+}} ymm3 = ymm10[0,2],ymm11[1,3],ymm10[4,6],ymm11[5,7]
5680 ; AVX2-NEXT: vbroadcastss 208(%rdi), %ymm11
5681 ; AVX2-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5,6],ymm11[7]
5682 ; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm3[5,6,7]
5683 ; AVX2-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5684 ; AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
5685 ; AVX2-NEXT: vpblendd $253, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
5686 ; AVX2-NEXT: # ymm1 = mem[0],ymm1[1],mem[2,3,4,5,6,7]
5687 ; AVX2-NEXT: vpblendd {{.*#+}} xmm2 = xmm2[0,1,2],mem[3]
5688 ; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[3,2,2,3]
5689 ; AVX2-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[3,1,1,0,7,5,5,4]
5690 ; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1],ymm1[2,3,4,5,6,7]
5691 ; AVX2-NEXT: vshufps {{.*#+}} ymm2 = ymm12[0,2],ymm9[1,3],ymm12[4,6],ymm9[5,7]
5692 ; AVX2-NEXT: vbroadcastss 656(%rdi), %ymm3
5693 ; AVX2-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,6],ymm3[7]
5694 ; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5,6,7]
5695 ; AVX2-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5696 ; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm4[0],ymm5[1],ymm4[2,3,4,5,6,7]
5697 ; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1,2],mem[3]
5698 ; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[3,2,2,3]
5699 ; AVX2-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[3,1,1,0,7,5,5,4]
5700 ; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5,6,7]
5701 ; AVX2-NEXT: vshufps {{.*#+}} ymm1 = ymm6[0,2],ymm8[1,3],ymm6[4,6],ymm8[5,7]
5702 ; AVX2-NEXT: vmovaps %ymm6, %ymm15
5703 ; AVX2-NEXT: vbroadcastss 432(%rdi), %ymm2
5704 ; AVX2-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm2[7]
5705 ; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
5706 ; AVX2-NEXT: vmovdqu %ymm0, (%rsp) # 32-byte Spill
5707 ; AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5708 ; AVX2-NEXT: vpblendd $253, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
5709 ; AVX2-NEXT: # ymm0 = mem[0],ymm0[1],mem[2,3,4,5,6,7]
5710 ; AVX2-NEXT: vpblendd {{.*#+}} xmm1 = xmm7[0,1,2],mem[3]
5711 ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[3,2,2,3]
5712 ; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[3,1,1,0,7,5,5,4]
5713 ; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5,6,7]
5714 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
5715 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
5716 ; AVX2-NEXT: vshufps {{.*#+}} ymm1 = ymm14[0,2],ymm6[1,3],ymm14[4,6],ymm6[5,7]
5717 ; AVX2-NEXT: vbroadcastss 880(%rdi), %ymm2
5718 ; AVX2-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm2[7]
5719 ; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
5720 ; AVX2-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5721 ; AVX2-NEXT: vmovsd {{.*#+}} xmm1 = [4,3,0,0]
5722 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5723 ; AVX2-NEXT: vblendps $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
5724 ; AVX2-NEXT: # ymm0 = mem[0,1,2,3],ymm0[4,5,6,7]
5725 ; AVX2-NEXT: vpermps %ymm0, %ymm1, %ymm0
5726 ; AVX2-NEXT: vbroadcastss 548(%rdi), %xmm2
5727 ; AVX2-NEXT: vmovaps 512(%rdi), %xmm7
5728 ; AVX2-NEXT: vblendps {{.*#+}} xmm2 = xmm7[0,1,2],xmm2[3]
5729 ; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3]
5730 ; AVX2-NEXT: vbroadcastsd {{.*#+}} ymm11 = [0,7,0,7,0,7,0,7]
5731 ; AVX2-NEXT: vpermps %ymm12, %ymm11, %ymm2
5732 ; AVX2-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5],ymm9[6,7]
5733 ; AVX2-NEXT: vmovaps %ymm9, %ymm12
5734 ; AVX2-NEXT: vbroadcastss 660(%rdi), %ymm3
5735 ; AVX2-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,6],ymm3[7]
5736 ; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7]
5737 ; AVX2-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5738 ; AVX2-NEXT: vbroadcastss 100(%rdi), %xmm2
5739 ; AVX2-NEXT: vmovaps 64(%rdi), %xmm0
5740 ; AVX2-NEXT: vblendps {{.*#+}} xmm2 = xmm0[0,1,2],xmm2[3]
5741 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
5742 ; AVX2-NEXT: vblendps $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
5743 ; AVX2-NEXT: # ymm3 = mem[0,1,2,3],ymm3[4,5,6,7]
5744 ; AVX2-NEXT: vpermps %ymm3, %ymm1, %ymm3
5745 ; AVX2-NEXT: vblendps {{.*#+}} xmm2 = xmm3[0,1],xmm2[2,3]
5746 ; AVX2-NEXT: vpermps %ymm10, %ymm11, %ymm3
5747 ; AVX2-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5],ymm13[6,7]
5748 ; AVX2-NEXT: vbroadcastss 212(%rdi), %ymm4
5749 ; AVX2-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5,6],ymm4[7]
5750 ; AVX2-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
5751 ; AVX2-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5752 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
5753 ; AVX2-NEXT: vblendps $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
5754 ; AVX2-NEXT: # ymm2 = mem[0,1,2,3],ymm2[4,5,6,7]
5755 ; AVX2-NEXT: vpermps %ymm2, %ymm1, %ymm3
5756 ; AVX2-NEXT: vbroadcastss 324(%rdi), %xmm4
5757 ; AVX2-NEXT: vmovaps 288(%rdi), %xmm2
5758 ; AVX2-NEXT: vblendps {{.*#+}} xmm4 = xmm2[0,1,2],xmm4[3]
5759 ; AVX2-NEXT: vblendps {{.*#+}} xmm3 = xmm3[0,1],xmm4[2,3]
5760 ; AVX2-NEXT: vpermps %ymm15, %ymm11, %ymm4
5761 ; AVX2-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3,4,5],ymm8[6,7]
5762 ; AVX2-NEXT: vmovaps %ymm8, %ymm9
5763 ; AVX2-NEXT: vbroadcastss 436(%rdi), %ymm8
5764 ; AVX2-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3,4,5,6],ymm8[7]
5765 ; AVX2-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm4[4,5,6,7]
5766 ; AVX2-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5767 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
5768 ; AVX2-NEXT: vblendps $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
5769 ; AVX2-NEXT: # ymm3 = mem[0,1,2,3],ymm3[4,5,6,7]
5770 ; AVX2-NEXT: vpermps %ymm3, %ymm1, %ymm1
5771 ; AVX2-NEXT: vbroadcastss 772(%rdi), %xmm4
5772 ; AVX2-NEXT: vmovaps 736(%rdi), %xmm3
5773 ; AVX2-NEXT: vblendps {{.*#+}} xmm4 = xmm3[0,1,2],xmm4[3]
5774 ; AVX2-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm4[2,3]
5775 ; AVX2-NEXT: vpermps %ymm14, %ymm11, %ymm4
5776 ; AVX2-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3,4,5],ymm6[6,7]
5777 ; AVX2-NEXT: vbroadcastss 884(%rdi), %ymm8
5778 ; AVX2-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3,4,5,6],ymm8[7]
5779 ; AVX2-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm4[4,5,6,7]
5780 ; AVX2-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5781 ; AVX2-NEXT: vblendps {{.*#+}} ymm1 = ymm13[0],ymm10[1],ymm13[2,3,4],ymm10[5],ymm13[6,7]
5782 ; AVX2-NEXT: vshufps {{.*#+}} ymm1 = ymm1[1,0,3,3,5,4,7,7]
5783 ; AVX2-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
5784 ; AVX2-NEXT: vbroadcastss 216(%rdi), %ymm4
5785 ; AVX2-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm4[7]
5786 ; AVX2-NEXT: vmovaps 96(%rdi), %xmm10
5787 ; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm10[0,1,2],xmm0[3]
5788 ; AVX2-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,1,3,2]
5789 ; AVX2-NEXT: vpermilps $225, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Folded Reload
5790 ; AVX2-NEXT: # ymm4 = mem[1,0,2,3,5,4,6,7]
5791 ; AVX2-NEXT: vextractf128 $1, %ymm4, %xmm4
5792 ; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm4[0,1],xmm0[2,3]
5793 ; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
5794 ; AVX2-NEXT: vmovaps 544(%rdi), %xmm4
5795 ; AVX2-NEXT: vblendps {{.*#+}} xmm1 = xmm4[0,1,2],xmm7[3]
5796 ; AVX2-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,1,3,2]
5797 ; AVX2-NEXT: vpermilps $225, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload
5798 ; AVX2-NEXT: # ymm5 = mem[1,0,2,3,5,4,6,7]
5799 ; AVX2-NEXT: vextractf128 $1, %ymm5, %xmm5
5800 ; AVX2-NEXT: vblendps {{.*#+}} xmm1 = xmm5[0,1],xmm1[2,3]
5801 ; AVX2-NEXT: vblendps $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm5 # 32-byte Folded Reload
5802 ; AVX2-NEXT: # ymm5 = ymm12[0],mem[1],ymm12[2,3,4],mem[5],ymm12[6,7]
5803 ; AVX2-NEXT: vshufps {{.*#+}} ymm5 = ymm5[1,0,3,3,5,4,7,7]
5804 ; AVX2-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,1,0,3]
5805 ; AVX2-NEXT: vbroadcastss 664(%rdi), %ymm7
5806 ; AVX2-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3,4,5,6],ymm7[7]
5807 ; AVX2-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm5[4,5,6,7]
5808 ; AVX2-NEXT: vmovaps 320(%rdi), %xmm12
5809 ; AVX2-NEXT: vblendps {{.*#+}} xmm2 = xmm12[0,1,2],xmm2[3]
5810 ; AVX2-NEXT: vshufps {{.*#+}} xmm2 = xmm2[0,1,3,2]
5811 ; AVX2-NEXT: vpermilps $225, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload
5812 ; AVX2-NEXT: # ymm5 = mem[1,0,2,3,5,4,6,7]
5813 ; AVX2-NEXT: vextractf128 $1, %ymm5, %xmm5
5814 ; AVX2-NEXT: vblendps {{.*#+}} xmm2 = xmm5[0,1],xmm2[2,3]
5815 ; AVX2-NEXT: vblendps {{.*#+}} ymm5 = ymm9[0],ymm15[1],ymm9[2,3,4],ymm15[5],ymm9[6,7]
5816 ; AVX2-NEXT: vshufps {{.*#+}} ymm5 = ymm5[1,0,3,3,5,4,7,7]
5817 ; AVX2-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,1,0,3]
5818 ; AVX2-NEXT: vbroadcastss 440(%rdi), %ymm7
5819 ; AVX2-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3,4,5,6],ymm7[7]
5820 ; AVX2-NEXT: vblendps {{.*#+}} ymm5 = ymm2[0,1,2,3],ymm5[4,5,6,7]
5821 ; AVX2-NEXT: vmovaps 768(%rdi), %xmm2
5822 ; AVX2-NEXT: vblendps {{.*#+}} xmm3 = xmm2[0,1,2],xmm3[3]
5823 ; AVX2-NEXT: vshufps {{.*#+}} xmm3 = xmm3[0,1,3,2]
5824 ; AVX2-NEXT: vpermilps $225, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Folded Reload
5825 ; AVX2-NEXT: # ymm7 = mem[1,0,2,3,5,4,6,7]
5826 ; AVX2-NEXT: vextractf128 $1, %ymm7, %xmm7
5827 ; AVX2-NEXT: vblendps {{.*#+}} xmm3 = xmm7[0,1],xmm3[2,3]
5828 ; AVX2-NEXT: vblendps {{.*#+}} ymm7 = ymm6[0],ymm14[1],ymm6[2,3,4],ymm14[5],ymm6[6,7]
5829 ; AVX2-NEXT: vshufps {{.*#+}} ymm7 = ymm7[1,0,3,3,5,4,7,7]
5830 ; AVX2-NEXT: vpermpd {{.*#+}} ymm7 = ymm7[0,1,0,3]
5831 ; AVX2-NEXT: vbroadcastss 888(%rdi), %ymm8
5832 ; AVX2-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3,4,5,6],ymm8[7]
5833 ; AVX2-NEXT: vblendps {{.*#+}} ymm7 = ymm3[0,1,2,3],ymm7[4,5,6,7]
5834 ; AVX2-NEXT: vbroadcastss 584(%rdi), %xmm3
5835 ; AVX2-NEXT: vblendps $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm3 # 16-byte Folded Reload
5836 ; AVX2-NEXT: # xmm3 = xmm3[0],mem[1],xmm3[2,3]
5837 ; AVX2-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm3
5838 ; AVX2-NEXT: vpermps 640(%rdi), %ymm11, %ymm8
5839 ; AVX2-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5],ymm8[6,7]
5840 ; AVX2-NEXT: vbroadcastss 528(%rdi), %ymm8
5841 ; AVX2-NEXT: vblendps {{.*#+}} xmm4 = xmm8[0,1,2],xmm4[3]
5842 ; AVX2-NEXT: vpermilps $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Folded Reload
5843 ; AVX2-NEXT: # ymm8 = mem[2,3,2,3,6,7,6,7]
5844 ; AVX2-NEXT: vblendps $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm8 # 32-byte Folded Reload
5845 ; AVX2-NEXT: # ymm8 = ymm8[0],mem[1],ymm8[2,3,4],mem[5],ymm8[6,7]
5846 ; AVX2-NEXT: vextractf128 $1, %ymm8, %xmm8
5847 ; AVX2-NEXT: vblendps {{.*#+}} xmm4 = xmm8[0,1],xmm4[2,3]
5848 ; AVX2-NEXT: vblendps {{.*#+}} ymm8 = ymm4[0,1,2,3],ymm3[4,5,6,7]
5849 ; AVX2-NEXT: vbroadcastss 808(%rdi), %xmm3
5850 ; AVX2-NEXT: vblendps $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm3 # 16-byte Folded Reload
5851 ; AVX2-NEXT: # xmm3 = xmm3[0],mem[1],xmm3[2,3]
5852 ; AVX2-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm3
5853 ; AVX2-NEXT: vpermps 864(%rdi), %ymm11, %ymm4
5854 ; AVX2-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5],ymm4[6,7]
5855 ; AVX2-NEXT: vbroadcastss 752(%rdi), %ymm4
5856 ; AVX2-NEXT: vblendps {{.*#+}} xmm2 = xmm4[0,1,2],xmm2[3]
5857 ; AVX2-NEXT: vpermilps $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Folded Reload
5858 ; AVX2-NEXT: # ymm4 = mem[2,3,2,3,6,7,6,7]
5859 ; AVX2-NEXT: vblendps $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm4 # 32-byte Folded Reload
5860 ; AVX2-NEXT: # ymm4 = ymm4[0],mem[1],ymm4[2,3,4],mem[5],ymm4[6,7]
5861 ; AVX2-NEXT: vextractf128 $1, %ymm4, %xmm4
5862 ; AVX2-NEXT: vblendps {{.*#+}} xmm2 = xmm4[0,1],xmm2[2,3]
5863 ; AVX2-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
5864 ; AVX2-NEXT: vbroadcastss 136(%rdi), %xmm3
5865 ; AVX2-NEXT: vblendps $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm3 # 16-byte Folded Reload
5866 ; AVX2-NEXT: # xmm3 = xmm3[0],mem[1],xmm3[2,3]
5867 ; AVX2-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm3
5868 ; AVX2-NEXT: vpermps 192(%rdi), %ymm11, %ymm4
5869 ; AVX2-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5],ymm4[6,7]
5870 ; AVX2-NEXT: vbroadcastss 80(%rdi), %ymm4
5871 ; AVX2-NEXT: vblendps {{.*#+}} xmm4 = xmm4[0,1,2],xmm10[3]
5872 ; AVX2-NEXT: vpermilps $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Folded Reload
5873 ; AVX2-NEXT: # ymm6 = mem[2,3,2,3,6,7,6,7]
5874 ; AVX2-NEXT: vblendps $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm6 # 32-byte Folded Reload
5875 ; AVX2-NEXT: # ymm6 = ymm6[0],mem[1],ymm6[2,3,4],mem[5],ymm6[6,7]
5876 ; AVX2-NEXT: vextractf128 $1, %ymm6, %xmm6
5877 ; AVX2-NEXT: vblendps {{.*#+}} xmm4 = xmm6[0,1],xmm4[2,3]
5878 ; AVX2-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
5879 ; AVX2-NEXT: vbroadcastss 360(%rdi), %xmm4
5880 ; AVX2-NEXT: vblendps $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm4 # 16-byte Folded Reload
5881 ; AVX2-NEXT: # xmm4 = xmm4[0],mem[1],xmm4[2,3]
5882 ; AVX2-NEXT: vpermps 416(%rdi), %ymm11, %ymm6
5883 ; AVX2-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm4
5884 ; AVX2-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3,4,5],ymm6[6,7]
5885 ; AVX2-NEXT: vbroadcastss 304(%rdi), %ymm6
5886 ; AVX2-NEXT: vblendps {{.*#+}} xmm6 = xmm6[0,1,2],xmm12[3]
5887 ; AVX2-NEXT: vpermilps $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Folded Reload
5888 ; AVX2-NEXT: # ymm10 = mem[2,3,2,3,6,7,6,7]
5889 ; AVX2-NEXT: vblendps $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm10 # 32-byte Folded Reload
5890 ; AVX2-NEXT: # ymm10 = ymm10[0],mem[1],ymm10[2,3,4],mem[5],ymm10[6,7]
5891 ; AVX2-NEXT: vextractf128 $1, %ymm10, %xmm10
5892 ; AVX2-NEXT: vblendps {{.*#+}} xmm6 = xmm10[0,1],xmm6[2,3]
5893 ; AVX2-NEXT: vblendps {{.*#+}} ymm4 = ymm6[0,1,2,3],ymm4[4,5,6,7]
5894 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
5895 ; AVX2-NEXT: vmovaps %ymm6, 96(%rsi)
5896 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
5897 ; AVX2-NEXT: vmovaps %ymm6, 32(%rsi)
5898 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
5899 ; AVX2-NEXT: vmovaps %ymm6, 64(%rsi)
5900 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
5901 ; AVX2-NEXT: vmovaps %ymm6, (%rsi)
5902 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
5903 ; AVX2-NEXT: vmovaps %ymm6, 96(%rdx)
5904 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
5905 ; AVX2-NEXT: vmovaps %ymm6, 32(%rdx)
5906 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
5907 ; AVX2-NEXT: vmovaps %ymm6, 64(%rdx)
5908 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
5909 ; AVX2-NEXT: vmovaps %ymm6, (%rdx)
5910 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
5911 ; AVX2-NEXT: vmovaps %ymm6, 32(%rcx)
5912 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
5913 ; AVX2-NEXT: vmovaps %ymm6, 96(%rcx)
5914 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
5915 ; AVX2-NEXT: vmovaps %ymm6, 64(%rcx)
5916 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
5917 ; AVX2-NEXT: vmovaps %ymm6, (%rcx)
5918 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
5919 ; AVX2-NEXT: vmovaps %ymm6, 96(%r8)
5920 ; AVX2-NEXT: vmovups (%rsp), %ymm6 # 32-byte Reload
5921 ; AVX2-NEXT: vmovaps %ymm6, 32(%r8)
5922 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
5923 ; AVX2-NEXT: vmovaps %ymm6, 64(%r8)
5924 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
5925 ; AVX2-NEXT: vmovaps %ymm6, (%r8)
5926 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
5927 ; AVX2-NEXT: vmovaps %ymm6, 96(%r9)
5928 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
5929 ; AVX2-NEXT: vmovaps %ymm6, 32(%r9)
5930 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
5931 ; AVX2-NEXT: vmovaps %ymm6, (%r9)
5932 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
5933 ; AVX2-NEXT: vmovaps %ymm6, 64(%r9)
5934 ; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax
5935 ; AVX2-NEXT: vmovaps %ymm7, 96(%rax)
5936 ; AVX2-NEXT: vmovaps %ymm5, 32(%rax)
5937 ; AVX2-NEXT: vmovaps %ymm1, 64(%rax)
5938 ; AVX2-NEXT: vmovaps %ymm0, (%rax)
5939 ; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax
5940 ; AVX2-NEXT: vmovaps %ymm4, 32(%rax)
5941 ; AVX2-NEXT: vmovaps %ymm3, (%rax)
5942 ; AVX2-NEXT: vmovaps %ymm2, 96(%rax)
5943 ; AVX2-NEXT: vmovaps %ymm8, 64(%rax)
5944 ; AVX2-NEXT: addq $1192, %rsp # imm = 0x4A8
5945 ; AVX2-NEXT: vzeroupper
5948 ; AVX2-FP-LABEL: load_i32_stride7_vf32:
5950 ; AVX2-FP-NEXT: subq $1192, %rsp # imm = 0x4A8
5951 ; AVX2-FP-NEXT: vmovdqa 320(%rdi), %ymm9
5952 ; AVX2-FP-NEXT: vmovdqa 256(%rdi), %ymm4
5953 ; AVX2-FP-NEXT: vmovdqa 224(%rdi), %ymm5
5954 ; AVX2-FP-NEXT: vmovdqa 544(%rdi), %ymm12
5955 ; AVX2-FP-NEXT: vmovdqa 480(%rdi), %ymm7
5956 ; AVX2-FP-NEXT: vmovdqa 448(%rdi), %ymm8
5957 ; AVX2-FP-NEXT: vmovdqa (%rdi), %ymm14
5958 ; AVX2-FP-NEXT: vmovdqa 32(%rdi), %ymm13
5959 ; AVX2-FP-NEXT: vmovdqa 96(%rdi), %ymm11
5960 ; AVX2-FP-NEXT: vpbroadcastq 80(%rdi), %ymm0
5961 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm0[0,1,2,3],ymm11[4,5,6,7]
5962 ; AVX2-FP-NEXT: vpmovsxbd {{.*#+}} xmm0 = [0,7,6,0]
5963 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm2 = ymm14[0,1,2,3,4,5],ymm13[6],ymm14[7]
5964 ; AVX2-FP-NEXT: vmovdqu %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5965 ; AVX2-FP-NEXT: vmovdqu %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5966 ; AVX2-FP-NEXT: vpermd %ymm2, %ymm0, %ymm2
5967 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3,4,5,6,7]
5968 ; AVX2-FP-NEXT: vmovdqa 128(%rdi), %xmm2
5969 ; AVX2-FP-NEXT: vmovdqa 160(%rdi), %xmm3
5970 ; AVX2-FP-NEXT: vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5971 ; AVX2-FP-NEXT: vpunpckhqdq {{.*#+}} xmm2 = xmm2[1],xmm3[1]
5972 ; AVX2-FP-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
5973 ; AVX2-FP-NEXT: vpbroadcastd 196(%rdi), %ymm3
5974 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,6],ymm3[7]
5975 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5,6,7]
5976 ; AVX2-FP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5977 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm8[0,1,2,3,4,5],ymm7[6],ymm8[7]
5978 ; AVX2-FP-NEXT: vmovdqu %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5979 ; AVX2-FP-NEXT: vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5980 ; AVX2-FP-NEXT: vpermd %ymm1, %ymm0, %ymm1
5981 ; AVX2-FP-NEXT: vpbroadcastq 528(%rdi), %ymm2
5982 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm12[4,5,6,7]
5983 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4,5,6,7]
5984 ; AVX2-FP-NEXT: vmovdqa 576(%rdi), %xmm2
5985 ; AVX2-FP-NEXT: vmovdqa 608(%rdi), %xmm3
5986 ; AVX2-FP-NEXT: vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5987 ; AVX2-FP-NEXT: vpunpckhqdq {{.*#+}} xmm2 = xmm2[1],xmm3[1]
5988 ; AVX2-FP-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
5989 ; AVX2-FP-NEXT: vpbroadcastd 644(%rdi), %ymm3
5990 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,6],ymm3[7]
5991 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5,6,7]
5992 ; AVX2-FP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5993 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm5[0,1,2,3,4,5],ymm4[6],ymm5[7]
5994 ; AVX2-FP-NEXT: vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5995 ; AVX2-FP-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5996 ; AVX2-FP-NEXT: vpermd %ymm1, %ymm0, %ymm1
5997 ; AVX2-FP-NEXT: vpbroadcastq 304(%rdi), %ymm2
5998 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm9[4,5,6,7]
5999 ; AVX2-FP-NEXT: vmovdqa %ymm9, %ymm10
6000 ; AVX2-FP-NEXT: vmovdqu %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6001 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4,5,6,7]
6002 ; AVX2-FP-NEXT: vmovdqa 352(%rdi), %xmm2
6003 ; AVX2-FP-NEXT: vmovdqa 384(%rdi), %xmm3
6004 ; AVX2-FP-NEXT: vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6005 ; AVX2-FP-NEXT: vpunpckhqdq {{.*#+}} xmm2 = xmm2[1],xmm3[1]
6006 ; AVX2-FP-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
6007 ; AVX2-FP-NEXT: vpbroadcastd 420(%rdi), %ymm3
6008 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,6],ymm3[7]
6009 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5,6,7]
6010 ; AVX2-FP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6011 ; AVX2-FP-NEXT: vmovdqa 704(%rdi), %ymm2
6012 ; AVX2-FP-NEXT: vmovdqa 672(%rdi), %ymm6
6013 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm6[0,1,2,3,4,5],ymm2[6],ymm6[7]
6014 ; AVX2-FP-NEXT: vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6015 ; AVX2-FP-NEXT: vmovdqa %ymm2, %ymm3
6016 ; AVX2-FP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6017 ; AVX2-FP-NEXT: vpermd %ymm1, %ymm0, %ymm0
6018 ; AVX2-FP-NEXT: vmovdqa 768(%rdi), %ymm15
6019 ; AVX2-FP-NEXT: vpbroadcastq 752(%rdi), %ymm1
6020 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm15[4,5,6,7]
6021 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
6022 ; AVX2-FP-NEXT: vmovdqa 800(%rdi), %xmm1
6023 ; AVX2-FP-NEXT: vmovdqa 832(%rdi), %xmm2
6024 ; AVX2-FP-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6025 ; AVX2-FP-NEXT: vpunpckhqdq {{.*#+}} xmm1 = xmm1[1],xmm2[1]
6026 ; AVX2-FP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
6027 ; AVX2-FP-NEXT: vpbroadcastd 868(%rdi), %ymm2
6028 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm2[7]
6029 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
6030 ; AVX2-FP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6031 ; AVX2-FP-NEXT: vpshufd {{.*#+}} xmm0 = mem[2,2,2,2]
6032 ; AVX2-FP-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
6033 ; AVX2-FP-NEXT: vmovdqa 608(%rdi), %ymm2
6034 ; AVX2-FP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6035 ; AVX2-FP-NEXT: vmovdqa 576(%rdi), %ymm1
6036 ; AVX2-FP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6037 ; AVX2-FP-NEXT: vpalignr {{.*#+}} ymm1 = ymm2[12,13,14,15],ymm1[0,1,2,3,4,5,6,7,8,9,10,11],ymm2[28,29,30,31],ymm1[16,17,18,19,20,21,22,23,24,25,26,27]
6038 ; AVX2-FP-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,1,2,0]
6039 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm0[7]
6040 ; AVX2-FP-NEXT: vmovdqa 512(%rdi), %ymm9
6041 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm12[0,1],ymm9[2,3],ymm12[4,5],ymm9[6,7]
6042 ; AVX2-FP-NEXT: vmovdqu %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6043 ; AVX2-FP-NEXT: vmovdqu %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6044 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm2 = ymm7[0],ymm8[1],ymm7[2,3,4],ymm8[5],ymm7[6,7]
6045 ; AVX2-FP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6046 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm0[5,6],ymm2[7]
6047 ; AVX2-FP-NEXT: vpmovsxbd {{.*#+}} ymm0 = [1,0,7,6,5,6,5,6]
6048 ; AVX2-FP-NEXT: vpermd %ymm2, %ymm0, %ymm2
6049 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3,4],ymm1[5,6,7]
6050 ; AVX2-FP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6051 ; AVX2-FP-NEXT: vpshufd {{.*#+}} xmm1 = mem[2,2,2,2]
6052 ; AVX2-FP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
6053 ; AVX2-FP-NEXT: vmovdqa 384(%rdi), %ymm7
6054 ; AVX2-FP-NEXT: vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6055 ; AVX2-FP-NEXT: vmovdqa 352(%rdi), %ymm2
6056 ; AVX2-FP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6057 ; AVX2-FP-NEXT: vpalignr {{.*#+}} ymm2 = ymm7[12,13,14,15],ymm2[0,1,2,3,4,5,6,7,8,9,10,11],ymm7[28,29,30,31],ymm2[16,17,18,19,20,21,22,23,24,25,26,27]
6058 ; AVX2-FP-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,1,2,0]
6059 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3,4,5,6],ymm1[7]
6060 ; AVX2-FP-NEXT: vmovdqa 288(%rdi), %ymm2
6061 ; AVX2-FP-NEXT: vmovdqu %ymm2, (%rsp) # 32-byte Spill
6062 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm2 = ymm10[0,1],ymm2[2,3],ymm10[4,5],ymm2[6,7]
6063 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0],ymm5[1],ymm4[2,3,4],ymm5[5],ymm4[6,7]
6064 ; AVX2-FP-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6065 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm2 = ymm4[0,1,2,3,4],ymm2[5,6],ymm4[7]
6066 ; AVX2-FP-NEXT: vpermd %ymm2, %ymm0, %ymm2
6067 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3,4],ymm1[5,6,7]
6068 ; AVX2-FP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6069 ; AVX2-FP-NEXT: vpshufd {{.*#+}} xmm1 = mem[2,2,2,2]
6070 ; AVX2-FP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
6071 ; AVX2-FP-NEXT: vmovdqa 832(%rdi), %ymm8
6072 ; AVX2-FP-NEXT: vmovdqa 800(%rdi), %ymm10
6073 ; AVX2-FP-NEXT: vpalignr {{.*#+}} ymm2 = ymm8[12,13,14,15],ymm10[0,1,2,3,4,5,6,7,8,9,10,11],ymm8[28,29,30,31],ymm10[16,17,18,19,20,21,22,23,24,25,26,27]
6074 ; AVX2-FP-NEXT: vmovdqu %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6075 ; AVX2-FP-NEXT: vmovdqu %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6076 ; AVX2-FP-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,1,2,0]
6077 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3,4,5,6],ymm1[7]
6078 ; AVX2-FP-NEXT: vmovdqa 736(%rdi), %ymm7
6079 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm2 = ymm15[0,1],ymm7[2,3],ymm15[4,5],ymm7[6,7]
6080 ; AVX2-FP-NEXT: vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6081 ; AVX2-FP-NEXT: vmovdqa %ymm15, %ymm5
6082 ; AVX2-FP-NEXT: vmovdqu %ymm15, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6083 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm4 = ymm3[0],ymm6[1],ymm3[2,3,4],ymm6[5],ymm3[6,7]
6084 ; AVX2-FP-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6085 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm2 = ymm4[0,1,2,3,4],ymm2[5,6],ymm4[7]
6086 ; AVX2-FP-NEXT: vpermd %ymm2, %ymm0, %ymm2
6087 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3,4],ymm1[5,6,7]
6088 ; AVX2-FP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6089 ; AVX2-FP-NEXT: vpshufd {{.*#+}} xmm1 = mem[2,2,2,2]
6090 ; AVX2-FP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
6091 ; AVX2-FP-NEXT: vmovdqa 160(%rdi), %ymm6
6092 ; AVX2-FP-NEXT: vmovdqa 128(%rdi), %ymm15
6093 ; AVX2-FP-NEXT: vpalignr {{.*#+}} ymm2 = ymm6[12,13,14,15],ymm15[0,1,2,3,4,5,6,7,8,9,10,11],ymm6[28,29,30,31],ymm15[16,17,18,19,20,21,22,23,24,25,26,27]
6094 ; AVX2-FP-NEXT: vmovdqu %ymm15, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6095 ; AVX2-FP-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,1,2,0]
6096 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,6],ymm1[7]
6097 ; AVX2-FP-NEXT: vmovdqa 64(%rdi), %ymm1
6098 ; AVX2-FP-NEXT: vmovdqu %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6099 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm3 = ymm11[0,1],ymm1[2,3],ymm11[4,5],ymm1[6,7]
6100 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm4 = ymm13[0],ymm14[1],ymm13[2,3,4],ymm14[5],ymm13[6,7]
6101 ; AVX2-FP-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6102 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3,4],ymm3[5,6],ymm4[7]
6103 ; AVX2-FP-NEXT: vpermd %ymm3, %ymm0, %ymm0
6104 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm2[5,6,7]
6105 ; AVX2-FP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6106 ; AVX2-FP-NEXT: vmovdqa 80(%rdi), %xmm0
6107 ; AVX2-FP-NEXT: vpalignr {{.*#+}} ymm2 = ymm11[8,9,10,11,12,13,14,15],ymm1[0,1,2,3,4,5,6,7],ymm11[24,25,26,27,28,29,30,31],ymm1[16,17,18,19,20,21,22,23]
6108 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2],ymm0[3],ymm2[4,5,6,7]
6109 ; AVX2-FP-NEXT: vpbroadcastd 8(%rdi), %xmm2
6110 ; AVX2-FP-NEXT: vmovdqa 32(%rdi), %xmm3
6111 ; AVX2-FP-NEXT: vpblendd {{.*#+}} xmm2 = xmm2[0],xmm3[1],xmm2[2,3]
6112 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1],ymm0[2,3,4,5,6,7]
6113 ; AVX2-FP-NEXT: vpunpcklqdq {{.*#+}} ymm2 = ymm15[0],ymm6[0],ymm15[2],ymm6[2]
6114 ; AVX2-FP-NEXT: vmovdqa %ymm6, %ymm11
6115 ; AVX2-FP-NEXT: vpbroadcastd 204(%rdi), %ymm4
6116 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,6],ymm4[7]
6117 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm2[5,6,7]
6118 ; AVX2-FP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6119 ; AVX2-FP-NEXT: vmovdqa 528(%rdi), %xmm0
6120 ; AVX2-FP-NEXT: vpalignr {{.*#+}} ymm2 = ymm12[8,9,10,11,12,13,14,15],ymm9[0,1,2,3,4,5,6,7],ymm12[24,25,26,27,28,29,30,31],ymm9[16,17,18,19,20,21,22,23]
6121 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2],ymm0[3],ymm2[4,5,6,7]
6122 ; AVX2-FP-NEXT: vpbroadcastd 456(%rdi), %xmm4
6123 ; AVX2-FP-NEXT: vmovdqa 480(%rdi), %xmm2
6124 ; AVX2-FP-NEXT: vpblendd {{.*#+}} xmm4 = xmm4[0],xmm2[1],xmm4[2,3]
6125 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm4[0,1],ymm0[2,3,4,5,6,7]
6126 ; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
6127 ; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
6128 ; AVX2-FP-NEXT: vpunpcklqdq {{.*#+}} ymm4 = ymm12[0],ymm9[0],ymm12[2],ymm9[2]
6129 ; AVX2-FP-NEXT: vpbroadcastd 652(%rdi), %ymm15
6130 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3,4,5,6],ymm15[7]
6131 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm4[5,6,7]
6132 ; AVX2-FP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6133 ; AVX2-FP-NEXT: vmovdqa 752(%rdi), %xmm0
6134 ; AVX2-FP-NEXT: vpalignr {{.*#+}} ymm4 = ymm5[8,9,10,11,12,13,14,15],ymm7[0,1,2,3,4,5,6,7],ymm5[24,25,26,27,28,29,30,31],ymm7[16,17,18,19,20,21,22,23]
6135 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm4[0,1,2],ymm0[3],ymm4[4,5,6,7]
6136 ; AVX2-FP-NEXT: vpbroadcastd 680(%rdi), %xmm15
6137 ; AVX2-FP-NEXT: vmovdqa 704(%rdi), %xmm7
6138 ; AVX2-FP-NEXT: vpblendd {{.*#+}} xmm15 = xmm15[0],xmm7[1],xmm15[2,3]
6139 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm15[0,1],ymm0[2,3,4,5,6,7]
6140 ; AVX2-FP-NEXT: vpunpcklqdq {{.*#+}} ymm15 = ymm10[0],ymm8[0],ymm10[2],ymm8[2]
6141 ; AVX2-FP-NEXT: vpbroadcastd 876(%rdi), %ymm13
6142 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm13 = ymm15[0,1,2,3,4,5,6],ymm13[7]
6143 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm13[5,6,7]
6144 ; AVX2-FP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6145 ; AVX2-FP-NEXT: vmovdqa 304(%rdi), %xmm0
6146 ; AVX2-FP-NEXT: vmovdqu (%rsp), %ymm5 # 32-byte Reload
6147 ; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
6148 ; AVX2-FP-NEXT: vpalignr {{.*#+}} ymm13 = ymm4[8,9,10,11,12,13,14,15],ymm5[0,1,2,3,4,5,6,7],ymm4[24,25,26,27,28,29,30,31],ymm5[16,17,18,19,20,21,22,23]
6149 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm13 = ymm13[0,1,2],ymm0[3],ymm13[4,5,6,7]
6150 ; AVX2-FP-NEXT: vpbroadcastd 232(%rdi), %xmm15
6151 ; AVX2-FP-NEXT: vmovdqa 256(%rdi), %xmm0
6152 ; AVX2-FP-NEXT: vpblendd {{.*#+}} xmm15 = xmm15[0],xmm0[1],xmm15[2,3]
6153 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm13 = ymm15[0,1],ymm13[2,3,4,5,6,7]
6154 ; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
6155 ; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
6156 ; AVX2-FP-NEXT: vpunpcklqdq {{.*#+}} ymm15 = ymm6[0],ymm8[0],ymm6[2],ymm8[2]
6157 ; AVX2-FP-NEXT: vpbroadcastd 428(%rdi), %ymm14
6158 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm14 = ymm15[0,1,2,3,4,5,6],ymm14[7]
6159 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm13 = ymm13[0,1,2,3,4],ymm14[5,6,7]
6160 ; AVX2-FP-NEXT: vmovdqu %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6161 ; AVX2-FP-NEXT: vpblendd $253, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
6162 ; AVX2-FP-NEXT: # ymm1 = mem[0],ymm1[1],mem[2,3,4,5,6,7]
6163 ; AVX2-FP-NEXT: vpblendd {{.*#+}} xmm3 = xmm3[0,1,2],mem[3]
6164 ; AVX2-FP-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[3,2,2,3]
6165 ; AVX2-FP-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[3,1,1,0,7,5,5,4]
6166 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm3[0,1],ymm1[2,3,4,5,6,7]
6167 ; AVX2-FP-NEXT: vmovdqa %ymm11, %ymm13
6168 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
6169 ; AVX2-FP-NEXT: vshufps {{.*#+}} ymm3 = ymm10[0,2],ymm11[1,3],ymm10[4,6],ymm11[5,7]
6170 ; AVX2-FP-NEXT: vbroadcastss 208(%rdi), %ymm11
6171 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5,6],ymm11[7]
6172 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm3[5,6,7]
6173 ; AVX2-FP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6174 ; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
6175 ; AVX2-FP-NEXT: vpblendd $253, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
6176 ; AVX2-FP-NEXT: # ymm1 = mem[0],ymm1[1],mem[2,3,4,5,6,7]
6177 ; AVX2-FP-NEXT: vpblendd {{.*#+}} xmm2 = xmm2[0,1,2],mem[3]
6178 ; AVX2-FP-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[3,2,2,3]
6179 ; AVX2-FP-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[3,1,1,0,7,5,5,4]
6180 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1],ymm1[2,3,4,5,6,7]
6181 ; AVX2-FP-NEXT: vshufps {{.*#+}} ymm2 = ymm12[0,2],ymm9[1,3],ymm12[4,6],ymm9[5,7]
6182 ; AVX2-FP-NEXT: vbroadcastss 656(%rdi), %ymm3
6183 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,6],ymm3[7]
6184 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5,6,7]
6185 ; AVX2-FP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6186 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm4[0],ymm5[1],ymm4[2,3,4,5,6,7]
6187 ; AVX2-FP-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1,2],mem[3]
6188 ; AVX2-FP-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[3,2,2,3]
6189 ; AVX2-FP-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[3,1,1,0,7,5,5,4]
6190 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5,6,7]
6191 ; AVX2-FP-NEXT: vshufps {{.*#+}} ymm1 = ymm6[0,2],ymm8[1,3],ymm6[4,6],ymm8[5,7]
6192 ; AVX2-FP-NEXT: vmovaps %ymm6, %ymm15
6193 ; AVX2-FP-NEXT: vbroadcastss 432(%rdi), %ymm2
6194 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm2[7]
6195 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
6196 ; AVX2-FP-NEXT: vmovdqu %ymm0, (%rsp) # 32-byte Spill
6197 ; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
6198 ; AVX2-FP-NEXT: vpblendd $253, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
6199 ; AVX2-FP-NEXT: # ymm0 = mem[0],ymm0[1],mem[2,3,4,5,6,7]
6200 ; AVX2-FP-NEXT: vpblendd {{.*#+}} xmm1 = xmm7[0,1,2],mem[3]
6201 ; AVX2-FP-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[3,2,2,3]
6202 ; AVX2-FP-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[3,1,1,0,7,5,5,4]
6203 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5,6,7]
6204 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
6205 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
6206 ; AVX2-FP-NEXT: vshufps {{.*#+}} ymm1 = ymm14[0,2],ymm6[1,3],ymm14[4,6],ymm6[5,7]
6207 ; AVX2-FP-NEXT: vbroadcastss 880(%rdi), %ymm2
6208 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm2[7]
6209 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
6210 ; AVX2-FP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6211 ; AVX2-FP-NEXT: vmovsd {{.*#+}} xmm1 = [4,3,0,0]
6212 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
6213 ; AVX2-FP-NEXT: vblendps $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
6214 ; AVX2-FP-NEXT: # ymm0 = mem[0,1,2,3],ymm0[4,5,6,7]
6215 ; AVX2-FP-NEXT: vpermps %ymm0, %ymm1, %ymm0
6216 ; AVX2-FP-NEXT: vbroadcastss 548(%rdi), %xmm2
6217 ; AVX2-FP-NEXT: vmovaps 512(%rdi), %xmm7
6218 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm2 = xmm7[0,1,2],xmm2[3]
6219 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3]
6220 ; AVX2-FP-NEXT: vbroadcastsd {{.*#+}} ymm11 = [0,7,0,7,0,7,0,7]
6221 ; AVX2-FP-NEXT: vpermps %ymm12, %ymm11, %ymm2
6222 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5],ymm9[6,7]
6223 ; AVX2-FP-NEXT: vmovaps %ymm9, %ymm12
6224 ; AVX2-FP-NEXT: vbroadcastss 660(%rdi), %ymm3
6225 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,6],ymm3[7]
6226 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7]
6227 ; AVX2-FP-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6228 ; AVX2-FP-NEXT: vbroadcastss 100(%rdi), %xmm2
6229 ; AVX2-FP-NEXT: vmovaps 64(%rdi), %xmm0
6230 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm2 = xmm0[0,1,2],xmm2[3]
6231 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
6232 ; AVX2-FP-NEXT: vblendps $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
6233 ; AVX2-FP-NEXT: # ymm3 = mem[0,1,2,3],ymm3[4,5,6,7]
6234 ; AVX2-FP-NEXT: vpermps %ymm3, %ymm1, %ymm3
6235 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm2 = xmm3[0,1],xmm2[2,3]
6236 ; AVX2-FP-NEXT: vpermps %ymm10, %ymm11, %ymm3
6237 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5],ymm13[6,7]
6238 ; AVX2-FP-NEXT: vbroadcastss 212(%rdi), %ymm4
6239 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5,6],ymm4[7]
6240 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
6241 ; AVX2-FP-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6242 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
6243 ; AVX2-FP-NEXT: vblendps $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
6244 ; AVX2-FP-NEXT: # ymm2 = mem[0,1,2,3],ymm2[4,5,6,7]
6245 ; AVX2-FP-NEXT: vpermps %ymm2, %ymm1, %ymm3
6246 ; AVX2-FP-NEXT: vbroadcastss 324(%rdi), %xmm4
6247 ; AVX2-FP-NEXT: vmovaps 288(%rdi), %xmm2
6248 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm4 = xmm2[0,1,2],xmm4[3]
6249 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm3 = xmm3[0,1],xmm4[2,3]
6250 ; AVX2-FP-NEXT: vpermps %ymm15, %ymm11, %ymm4
6251 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3,4,5],ymm8[6,7]
6252 ; AVX2-FP-NEXT: vmovaps %ymm8, %ymm9
6253 ; AVX2-FP-NEXT: vbroadcastss 436(%rdi), %ymm8
6254 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3,4,5,6],ymm8[7]
6255 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm4[4,5,6,7]
6256 ; AVX2-FP-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6257 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
6258 ; AVX2-FP-NEXT: vblendps $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
6259 ; AVX2-FP-NEXT: # ymm3 = mem[0,1,2,3],ymm3[4,5,6,7]
6260 ; AVX2-FP-NEXT: vpermps %ymm3, %ymm1, %ymm1
6261 ; AVX2-FP-NEXT: vbroadcastss 772(%rdi), %xmm4
6262 ; AVX2-FP-NEXT: vmovaps 736(%rdi), %xmm3
6263 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm4 = xmm3[0,1,2],xmm4[3]
6264 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm4[2,3]
6265 ; AVX2-FP-NEXT: vpermps %ymm14, %ymm11, %ymm4
6266 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3,4,5],ymm6[6,7]
6267 ; AVX2-FP-NEXT: vbroadcastss 884(%rdi), %ymm8
6268 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3,4,5,6],ymm8[7]
6269 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm4[4,5,6,7]
6270 ; AVX2-FP-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6271 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm1 = ymm13[0],ymm10[1],ymm13[2,3,4],ymm10[5],ymm13[6,7]
6272 ; AVX2-FP-NEXT: vshufps {{.*#+}} ymm1 = ymm1[1,0,3,3,5,4,7,7]
6273 ; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
6274 ; AVX2-FP-NEXT: vbroadcastss 216(%rdi), %ymm4
6275 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm4[7]
6276 ; AVX2-FP-NEXT: vmovaps 96(%rdi), %xmm10
6277 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm0 = xmm10[0,1,2],xmm0[3]
6278 ; AVX2-FP-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,1,3,2]
6279 ; AVX2-FP-NEXT: vpermilps $225, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Folded Reload
6280 ; AVX2-FP-NEXT: # ymm4 = mem[1,0,2,3,5,4,6,7]
6281 ; AVX2-FP-NEXT: vextractf128 $1, %ymm4, %xmm4
6282 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm0 = xmm4[0,1],xmm0[2,3]
6283 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
6284 ; AVX2-FP-NEXT: vmovaps 544(%rdi), %xmm4
6285 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm1 = xmm4[0,1,2],xmm7[3]
6286 ; AVX2-FP-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,1,3,2]
6287 ; AVX2-FP-NEXT: vpermilps $225, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload
6288 ; AVX2-FP-NEXT: # ymm5 = mem[1,0,2,3,5,4,6,7]
6289 ; AVX2-FP-NEXT: vextractf128 $1, %ymm5, %xmm5
6290 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm1 = xmm5[0,1],xmm1[2,3]
6291 ; AVX2-FP-NEXT: vblendps $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm5 # 32-byte Folded Reload
6292 ; AVX2-FP-NEXT: # ymm5 = ymm12[0],mem[1],ymm12[2,3,4],mem[5],ymm12[6,7]
6293 ; AVX2-FP-NEXT: vshufps {{.*#+}} ymm5 = ymm5[1,0,3,3,5,4,7,7]
6294 ; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,1,0,3]
6295 ; AVX2-FP-NEXT: vbroadcastss 664(%rdi), %ymm7
6296 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3,4,5,6],ymm7[7]
6297 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm5[4,5,6,7]
6298 ; AVX2-FP-NEXT: vmovaps 320(%rdi), %xmm12
6299 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm2 = xmm12[0,1,2],xmm2[3]
6300 ; AVX2-FP-NEXT: vshufps {{.*#+}} xmm2 = xmm2[0,1,3,2]
6301 ; AVX2-FP-NEXT: vpermilps $225, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload
6302 ; AVX2-FP-NEXT: # ymm5 = mem[1,0,2,3,5,4,6,7]
6303 ; AVX2-FP-NEXT: vextractf128 $1, %ymm5, %xmm5
6304 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm2 = xmm5[0,1],xmm2[2,3]
6305 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm5 = ymm9[0],ymm15[1],ymm9[2,3,4],ymm15[5],ymm9[6,7]
6306 ; AVX2-FP-NEXT: vshufps {{.*#+}} ymm5 = ymm5[1,0,3,3,5,4,7,7]
6307 ; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,1,0,3]
6308 ; AVX2-FP-NEXT: vbroadcastss 440(%rdi), %ymm7
6309 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3,4,5,6],ymm7[7]
6310 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm5 = ymm2[0,1,2,3],ymm5[4,5,6,7]
6311 ; AVX2-FP-NEXT: vmovaps 768(%rdi), %xmm2
6312 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm3 = xmm2[0,1,2],xmm3[3]
6313 ; AVX2-FP-NEXT: vshufps {{.*#+}} xmm3 = xmm3[0,1,3,2]
6314 ; AVX2-FP-NEXT: vpermilps $225, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Folded Reload
6315 ; AVX2-FP-NEXT: # ymm7 = mem[1,0,2,3,5,4,6,7]
6316 ; AVX2-FP-NEXT: vextractf128 $1, %ymm7, %xmm7
6317 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm3 = xmm7[0,1],xmm3[2,3]
6318 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm7 = ymm6[0],ymm14[1],ymm6[2,3,4],ymm14[5],ymm6[6,7]
6319 ; AVX2-FP-NEXT: vshufps {{.*#+}} ymm7 = ymm7[1,0,3,3,5,4,7,7]
6320 ; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm7 = ymm7[0,1,0,3]
6321 ; AVX2-FP-NEXT: vbroadcastss 888(%rdi), %ymm8
6322 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3,4,5,6],ymm8[7]
6323 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm7 = ymm3[0,1,2,3],ymm7[4,5,6,7]
6324 ; AVX2-FP-NEXT: vbroadcastss 584(%rdi), %xmm3
6325 ; AVX2-FP-NEXT: vblendps $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm3 # 16-byte Folded Reload
6326 ; AVX2-FP-NEXT: # xmm3 = xmm3[0],mem[1],xmm3[2,3]
6327 ; AVX2-FP-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm3
6328 ; AVX2-FP-NEXT: vpermps 640(%rdi), %ymm11, %ymm8
6329 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5],ymm8[6,7]
6330 ; AVX2-FP-NEXT: vbroadcastss 528(%rdi), %ymm8
6331 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm4 = xmm8[0,1,2],xmm4[3]
6332 ; AVX2-FP-NEXT: vpermilps $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Folded Reload
6333 ; AVX2-FP-NEXT: # ymm8 = mem[2,3,2,3,6,7,6,7]
6334 ; AVX2-FP-NEXT: vblendps $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm8 # 32-byte Folded Reload
6335 ; AVX2-FP-NEXT: # ymm8 = ymm8[0],mem[1],ymm8[2,3,4],mem[5],ymm8[6,7]
6336 ; AVX2-FP-NEXT: vextractf128 $1, %ymm8, %xmm8
6337 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm4 = xmm8[0,1],xmm4[2,3]
6338 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm8 = ymm4[0,1,2,3],ymm3[4,5,6,7]
6339 ; AVX2-FP-NEXT: vbroadcastss 808(%rdi), %xmm3
6340 ; AVX2-FP-NEXT: vblendps $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm3 # 16-byte Folded Reload
6341 ; AVX2-FP-NEXT: # xmm3 = xmm3[0],mem[1],xmm3[2,3]
6342 ; AVX2-FP-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm3
6343 ; AVX2-FP-NEXT: vpermps 864(%rdi), %ymm11, %ymm4
6344 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5],ymm4[6,7]
6345 ; AVX2-FP-NEXT: vbroadcastss 752(%rdi), %ymm4
6346 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm2 = xmm4[0,1,2],xmm2[3]
6347 ; AVX2-FP-NEXT: vpermilps $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Folded Reload
6348 ; AVX2-FP-NEXT: # ymm4 = mem[2,3,2,3,6,7,6,7]
6349 ; AVX2-FP-NEXT: vblendps $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm4 # 32-byte Folded Reload
6350 ; AVX2-FP-NEXT: # ymm4 = ymm4[0],mem[1],ymm4[2,3,4],mem[5],ymm4[6,7]
6351 ; AVX2-FP-NEXT: vextractf128 $1, %ymm4, %xmm4
6352 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm2 = xmm4[0,1],xmm2[2,3]
6353 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
6354 ; AVX2-FP-NEXT: vbroadcastss 136(%rdi), %xmm3
6355 ; AVX2-FP-NEXT: vblendps $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm3 # 16-byte Folded Reload
6356 ; AVX2-FP-NEXT: # xmm3 = xmm3[0],mem[1],xmm3[2,3]
6357 ; AVX2-FP-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm3
6358 ; AVX2-FP-NEXT: vpermps 192(%rdi), %ymm11, %ymm4
6359 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5],ymm4[6,7]
6360 ; AVX2-FP-NEXT: vbroadcastss 80(%rdi), %ymm4
6361 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm4 = xmm4[0,1,2],xmm10[3]
6362 ; AVX2-FP-NEXT: vpermilps $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Folded Reload
6363 ; AVX2-FP-NEXT: # ymm6 = mem[2,3,2,3,6,7,6,7]
6364 ; AVX2-FP-NEXT: vblendps $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm6 # 32-byte Folded Reload
6365 ; AVX2-FP-NEXT: # ymm6 = ymm6[0],mem[1],ymm6[2,3,4],mem[5],ymm6[6,7]
6366 ; AVX2-FP-NEXT: vextractf128 $1, %ymm6, %xmm6
6367 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm4 = xmm6[0,1],xmm4[2,3]
6368 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
6369 ; AVX2-FP-NEXT: vbroadcastss 360(%rdi), %xmm4
6370 ; AVX2-FP-NEXT: vblendps $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm4 # 16-byte Folded Reload
6371 ; AVX2-FP-NEXT: # xmm4 = xmm4[0],mem[1],xmm4[2,3]
6372 ; AVX2-FP-NEXT: vpermps 416(%rdi), %ymm11, %ymm6
6373 ; AVX2-FP-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm4
6374 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3,4,5],ymm6[6,7]
6375 ; AVX2-FP-NEXT: vbroadcastss 304(%rdi), %ymm6
6376 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm6 = xmm6[0,1,2],xmm12[3]
6377 ; AVX2-FP-NEXT: vpermilps $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Folded Reload
6378 ; AVX2-FP-NEXT: # ymm10 = mem[2,3,2,3,6,7,6,7]
6379 ; AVX2-FP-NEXT: vblendps $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm10 # 32-byte Folded Reload
6380 ; AVX2-FP-NEXT: # ymm10 = ymm10[0],mem[1],ymm10[2,3,4],mem[5],ymm10[6,7]
6381 ; AVX2-FP-NEXT: vextractf128 $1, %ymm10, %xmm10
6382 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm6 = xmm10[0,1],xmm6[2,3]
6383 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm4 = ymm6[0,1,2,3],ymm4[4,5,6,7]
6384 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
6385 ; AVX2-FP-NEXT: vmovaps %ymm6, 96(%rsi)
6386 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
6387 ; AVX2-FP-NEXT: vmovaps %ymm6, 32(%rsi)
6388 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
6389 ; AVX2-FP-NEXT: vmovaps %ymm6, 64(%rsi)
6390 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
6391 ; AVX2-FP-NEXT: vmovaps %ymm6, (%rsi)
6392 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
6393 ; AVX2-FP-NEXT: vmovaps %ymm6, 96(%rdx)
6394 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
6395 ; AVX2-FP-NEXT: vmovaps %ymm6, 32(%rdx)
6396 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
6397 ; AVX2-FP-NEXT: vmovaps %ymm6, 64(%rdx)
6398 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
6399 ; AVX2-FP-NEXT: vmovaps %ymm6, (%rdx)
6400 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
6401 ; AVX2-FP-NEXT: vmovaps %ymm6, 32(%rcx)
6402 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
6403 ; AVX2-FP-NEXT: vmovaps %ymm6, 96(%rcx)
6404 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
6405 ; AVX2-FP-NEXT: vmovaps %ymm6, 64(%rcx)
6406 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
6407 ; AVX2-FP-NEXT: vmovaps %ymm6, (%rcx)
6408 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
6409 ; AVX2-FP-NEXT: vmovaps %ymm6, 96(%r8)
6410 ; AVX2-FP-NEXT: vmovups (%rsp), %ymm6 # 32-byte Reload
6411 ; AVX2-FP-NEXT: vmovaps %ymm6, 32(%r8)
6412 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
6413 ; AVX2-FP-NEXT: vmovaps %ymm6, 64(%r8)
6414 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
6415 ; AVX2-FP-NEXT: vmovaps %ymm6, (%r8)
6416 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
6417 ; AVX2-FP-NEXT: vmovaps %ymm6, 96(%r9)
6418 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
6419 ; AVX2-FP-NEXT: vmovaps %ymm6, 32(%r9)
6420 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
6421 ; AVX2-FP-NEXT: vmovaps %ymm6, (%r9)
6422 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
6423 ; AVX2-FP-NEXT: vmovaps %ymm6, 64(%r9)
6424 ; AVX2-FP-NEXT: movq {{[0-9]+}}(%rsp), %rax
6425 ; AVX2-FP-NEXT: vmovaps %ymm7, 96(%rax)
6426 ; AVX2-FP-NEXT: vmovaps %ymm5, 32(%rax)
6427 ; AVX2-FP-NEXT: vmovaps %ymm1, 64(%rax)
6428 ; AVX2-FP-NEXT: vmovaps %ymm0, (%rax)
6429 ; AVX2-FP-NEXT: movq {{[0-9]+}}(%rsp), %rax
6430 ; AVX2-FP-NEXT: vmovaps %ymm4, 32(%rax)
6431 ; AVX2-FP-NEXT: vmovaps %ymm3, (%rax)
6432 ; AVX2-FP-NEXT: vmovaps %ymm2, 96(%rax)
6433 ; AVX2-FP-NEXT: vmovaps %ymm8, 64(%rax)
6434 ; AVX2-FP-NEXT: addq $1192, %rsp # imm = 0x4A8
6435 ; AVX2-FP-NEXT: vzeroupper
6436 ; AVX2-FP-NEXT: retq
6438 ; AVX2-FCP-LABEL: load_i32_stride7_vf32:
6439 ; AVX2-FCP: # %bb.0:
6440 ; AVX2-FCP-NEXT: subq $1224, %rsp # imm = 0x4C8
6441 ; AVX2-FCP-NEXT: vmovdqa 320(%rdi), %ymm10
6442 ; AVX2-FCP-NEXT: vmovdqa 256(%rdi), %ymm4
6443 ; AVX2-FCP-NEXT: vmovdqa 224(%rdi), %ymm5
6444 ; AVX2-FCP-NEXT: vmovdqa 544(%rdi), %ymm12
6445 ; AVX2-FCP-NEXT: vmovdqa 480(%rdi), %ymm7
6446 ; AVX2-FCP-NEXT: vmovdqa 448(%rdi), %ymm8
6447 ; AVX2-FCP-NEXT: vmovdqa (%rdi), %ymm14
6448 ; AVX2-FCP-NEXT: vmovdqa 32(%rdi), %ymm2
6449 ; AVX2-FCP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6450 ; AVX2-FCP-NEXT: vmovdqa 96(%rdi), %ymm1
6451 ; AVX2-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6452 ; AVX2-FCP-NEXT: vpbroadcastq 80(%rdi), %ymm0
6453 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm0[0,1,2,3],ymm1[4,5,6,7]
6454 ; AVX2-FCP-NEXT: vpmovsxbd {{.*#+}} xmm0 = [0,7,6,0]
6455 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm14[0,1,2,3,4,5],ymm2[6],ymm14[7]
6456 ; AVX2-FCP-NEXT: vmovdqu %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6457 ; AVX2-FCP-NEXT: vpermd %ymm2, %ymm0, %ymm2
6458 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3,4,5,6,7]
6459 ; AVX2-FCP-NEXT: vmovdqa 128(%rdi), %xmm2
6460 ; AVX2-FCP-NEXT: vmovdqa 160(%rdi), %xmm3
6461 ; AVX2-FCP-NEXT: vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6462 ; AVX2-FCP-NEXT: vpunpckhqdq {{.*#+}} xmm2 = xmm2[1],xmm3[1]
6463 ; AVX2-FCP-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
6464 ; AVX2-FCP-NEXT: vpbroadcastd 196(%rdi), %ymm3
6465 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,6],ymm3[7]
6466 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5,6,7]
6467 ; AVX2-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6468 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm8[0,1,2,3,4,5],ymm7[6],ymm8[7]
6469 ; AVX2-FCP-NEXT: vmovdqu %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6470 ; AVX2-FCP-NEXT: vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6471 ; AVX2-FCP-NEXT: vpermd %ymm1, %ymm0, %ymm1
6472 ; AVX2-FCP-NEXT: vpbroadcastq 528(%rdi), %ymm2
6473 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm12[4,5,6,7]
6474 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4,5,6,7]
6475 ; AVX2-FCP-NEXT: vmovdqa 576(%rdi), %xmm2
6476 ; AVX2-FCP-NEXT: vmovdqa 608(%rdi), %xmm3
6477 ; AVX2-FCP-NEXT: vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6478 ; AVX2-FCP-NEXT: vpunpckhqdq {{.*#+}} xmm2 = xmm2[1],xmm3[1]
6479 ; AVX2-FCP-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
6480 ; AVX2-FCP-NEXT: vpbroadcastd 644(%rdi), %ymm3
6481 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,6],ymm3[7]
6482 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5,6,7]
6483 ; AVX2-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6484 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm5[0,1,2,3,4,5],ymm4[6],ymm5[7]
6485 ; AVX2-FCP-NEXT: vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6486 ; AVX2-FCP-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6487 ; AVX2-FCP-NEXT: vpermd %ymm1, %ymm0, %ymm1
6488 ; AVX2-FCP-NEXT: vpbroadcastq 304(%rdi), %ymm2
6489 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm10[4,5,6,7]
6490 ; AVX2-FCP-NEXT: vmovdqu %ymm10, (%rsp) # 32-byte Spill
6491 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4,5,6,7]
6492 ; AVX2-FCP-NEXT: vmovdqa 352(%rdi), %xmm2
6493 ; AVX2-FCP-NEXT: vmovdqa 384(%rdi), %xmm3
6494 ; AVX2-FCP-NEXT: vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6495 ; AVX2-FCP-NEXT: vpunpckhqdq {{.*#+}} xmm2 = xmm2[1],xmm3[1]
6496 ; AVX2-FCP-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
6497 ; AVX2-FCP-NEXT: vpbroadcastd 420(%rdi), %ymm3
6498 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,6],ymm3[7]
6499 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5,6,7]
6500 ; AVX2-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6501 ; AVX2-FCP-NEXT: vmovdqa 704(%rdi), %ymm2
6502 ; AVX2-FCP-NEXT: vmovdqa 672(%rdi), %ymm6
6503 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm6[0,1,2,3,4,5],ymm2[6],ymm6[7]
6504 ; AVX2-FCP-NEXT: vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6505 ; AVX2-FCP-NEXT: vmovdqa %ymm2, %ymm3
6506 ; AVX2-FCP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6507 ; AVX2-FCP-NEXT: vpermd %ymm1, %ymm0, %ymm0
6508 ; AVX2-FCP-NEXT: vmovdqa 768(%rdi), %ymm11
6509 ; AVX2-FCP-NEXT: vpbroadcastq 752(%rdi), %ymm1
6510 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm11[4,5,6,7]
6511 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
6512 ; AVX2-FCP-NEXT: vmovdqa 800(%rdi), %xmm1
6513 ; AVX2-FCP-NEXT: vmovdqa 832(%rdi), %xmm2
6514 ; AVX2-FCP-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6515 ; AVX2-FCP-NEXT: vpunpckhqdq {{.*#+}} xmm1 = xmm1[1],xmm2[1]
6516 ; AVX2-FCP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
6517 ; AVX2-FCP-NEXT: vpbroadcastd 868(%rdi), %ymm2
6518 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm2[7]
6519 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
6520 ; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6521 ; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm0 = mem[2,2,2,2]
6522 ; AVX2-FCP-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
6523 ; AVX2-FCP-NEXT: vmovdqa 608(%rdi), %ymm2
6524 ; AVX2-FCP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6525 ; AVX2-FCP-NEXT: vmovdqa 576(%rdi), %ymm1
6526 ; AVX2-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6527 ; AVX2-FCP-NEXT: vpalignr {{.*#+}} ymm1 = ymm2[12,13,14,15],ymm1[0,1,2,3,4,5,6,7,8,9,10,11],ymm2[28,29,30,31],ymm1[16,17,18,19,20,21,22,23,24,25,26,27]
6528 ; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,1,2,0]
6529 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm0[7]
6530 ; AVX2-FCP-NEXT: vmovdqa 512(%rdi), %ymm13
6531 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm12[0,1],ymm13[2,3],ymm12[4,5],ymm13[6,7]
6532 ; AVX2-FCP-NEXT: vmovdqu %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6533 ; AVX2-FCP-NEXT: vmovdqu %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6534 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm7[0],ymm8[1],ymm7[2,3,4],ymm8[5],ymm7[6,7]
6535 ; AVX2-FCP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6536 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm0[5,6],ymm2[7]
6537 ; AVX2-FCP-NEXT: vpmovsxbd {{.*#+}} ymm0 = [1,0,7,6,5,6,5,6]
6538 ; AVX2-FCP-NEXT: vpermd %ymm2, %ymm0, %ymm2
6539 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3,4],ymm1[5,6,7]
6540 ; AVX2-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6541 ; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm1 = mem[2,2,2,2]
6542 ; AVX2-FCP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
6543 ; AVX2-FCP-NEXT: vmovdqa 384(%rdi), %ymm7
6544 ; AVX2-FCP-NEXT: vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6545 ; AVX2-FCP-NEXT: vmovdqa 352(%rdi), %ymm2
6546 ; AVX2-FCP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6547 ; AVX2-FCP-NEXT: vpalignr {{.*#+}} ymm2 = ymm7[12,13,14,15],ymm2[0,1,2,3,4,5,6,7,8,9,10,11],ymm7[28,29,30,31],ymm2[16,17,18,19,20,21,22,23,24,25,26,27]
6548 ; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,1,2,0]
6549 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3,4,5,6],ymm1[7]
6550 ; AVX2-FCP-NEXT: vmovdqa 288(%rdi), %ymm2
6551 ; AVX2-FCP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6552 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm10[0,1],ymm2[2,3],ymm10[4,5],ymm2[6,7]
6553 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0],ymm5[1],ymm4[2,3,4],ymm5[5],ymm4[6,7]
6554 ; AVX2-FCP-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6555 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm4[0,1,2,3,4],ymm2[5,6],ymm4[7]
6556 ; AVX2-FCP-NEXT: vpermd %ymm2, %ymm0, %ymm2
6557 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3,4],ymm1[5,6,7]
6558 ; AVX2-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6559 ; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm1 = mem[2,2,2,2]
6560 ; AVX2-FCP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
6561 ; AVX2-FCP-NEXT: vmovdqa 832(%rdi), %ymm9
6562 ; AVX2-FCP-NEXT: vmovdqa 800(%rdi), %ymm15
6563 ; AVX2-FCP-NEXT: vpalignr {{.*#+}} ymm2 = ymm9[12,13,14,15],ymm15[0,1,2,3,4,5,6,7,8,9,10,11],ymm9[28,29,30,31],ymm15[16,17,18,19,20,21,22,23,24,25,26,27]
6564 ; AVX2-FCP-NEXT: vmovdqu %ymm15, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6565 ; AVX2-FCP-NEXT: vmovdqu %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6566 ; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,1,2,0]
6567 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3,4,5,6],ymm1[7]
6568 ; AVX2-FCP-NEXT: vmovdqa 736(%rdi), %ymm5
6569 ; AVX2-FCP-NEXT: vmovdqu %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6570 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm11[0,1],ymm5[2,3],ymm11[4,5],ymm5[6,7]
6571 ; AVX2-FCP-NEXT: vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6572 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm3[0],ymm6[1],ymm3[2,3,4],ymm6[5],ymm3[6,7]
6573 ; AVX2-FCP-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6574 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm4[0,1,2,3,4],ymm2[5,6],ymm4[7]
6575 ; AVX2-FCP-NEXT: vpermd %ymm2, %ymm0, %ymm2
6576 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3,4],ymm1[5,6,7]
6577 ; AVX2-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6578 ; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm1 = mem[2,2,2,2]
6579 ; AVX2-FCP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
6580 ; AVX2-FCP-NEXT: vmovdqa 160(%rdi), %ymm7
6581 ; AVX2-FCP-NEXT: vmovdqa 128(%rdi), %ymm8
6582 ; AVX2-FCP-NEXT: vpalignr {{.*#+}} ymm2 = ymm7[12,13,14,15],ymm8[0,1,2,3,4,5,6,7,8,9,10,11],ymm7[28,29,30,31],ymm8[16,17,18,19,20,21,22,23,24,25,26,27]
6583 ; AVX2-FCP-NEXT: vmovdqu %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6584 ; AVX2-FCP-NEXT: vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6585 ; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,1,2,0]
6586 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,6],ymm1[7]
6587 ; AVX2-FCP-NEXT: vmovdqa 64(%rdi), %ymm1
6588 ; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
6589 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm6[0,1],ymm1[2,3],ymm6[4,5],ymm1[6,7]
6590 ; AVX2-FCP-NEXT: vpblendd $221, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm4 # 32-byte Folded Reload
6591 ; AVX2-FCP-NEXT: # ymm4 = mem[0],ymm14[1],mem[2,3,4],ymm14[5],mem[6,7]
6592 ; AVX2-FCP-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6593 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3,4],ymm3[5,6],ymm4[7]
6594 ; AVX2-FCP-NEXT: vpermd %ymm3, %ymm0, %ymm0
6595 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm2[5,6,7]
6596 ; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6597 ; AVX2-FCP-NEXT: vmovdqa 80(%rdi), %xmm0
6598 ; AVX2-FCP-NEXT: vpalignr {{.*#+}} ymm2 = ymm6[8,9,10,11,12,13,14,15],ymm1[0,1,2,3,4,5,6,7],ymm6[24,25,26,27,28,29,30,31],ymm1[16,17,18,19,20,21,22,23]
6599 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2],ymm0[3],ymm2[4,5,6,7]
6600 ; AVX2-FCP-NEXT: vpbroadcastd 8(%rdi), %xmm2
6601 ; AVX2-FCP-NEXT: vmovdqa 32(%rdi), %xmm3
6602 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm2 = xmm2[0],xmm3[1],xmm2[2,3]
6603 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1],ymm0[2,3,4,5,6,7]
6604 ; AVX2-FCP-NEXT: vpunpcklqdq {{.*#+}} ymm2 = ymm8[0],ymm7[0],ymm8[2],ymm7[2]
6605 ; AVX2-FCP-NEXT: vpbroadcastd 204(%rdi), %ymm4
6606 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,6],ymm4[7]
6607 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm2[5,6,7]
6608 ; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6609 ; AVX2-FCP-NEXT: vmovdqa 528(%rdi), %xmm0
6610 ; AVX2-FCP-NEXT: vpalignr {{.*#+}} ymm2 = ymm12[8,9,10,11,12,13,14,15],ymm13[0,1,2,3,4,5,6,7],ymm12[24,25,26,27,28,29,30,31],ymm13[16,17,18,19,20,21,22,23]
6611 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2],ymm0[3],ymm2[4,5,6,7]
6612 ; AVX2-FCP-NEXT: vpbroadcastd 456(%rdi), %xmm4
6613 ; AVX2-FCP-NEXT: vmovdqa 480(%rdi), %xmm2
6614 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm4 = xmm4[0],xmm2[1],xmm4[2,3]
6615 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm4[0,1],ymm0[2,3,4,5,6,7]
6616 ; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
6617 ; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
6618 ; AVX2-FCP-NEXT: vpunpcklqdq {{.*#+}} ymm4 = ymm8[0],ymm12[0],ymm8[2],ymm12[2]
6619 ; AVX2-FCP-NEXT: vpbroadcastd 652(%rdi), %ymm13
6620 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3,4,5,6],ymm13[7]
6621 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm4[5,6,7]
6622 ; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6623 ; AVX2-FCP-NEXT: vmovdqa 752(%rdi), %xmm0
6624 ; AVX2-FCP-NEXT: vpalignr {{.*#+}} ymm4 = ymm11[8,9,10,11,12,13,14,15],ymm5[0,1,2,3,4,5,6,7],ymm11[24,25,26,27,28,29,30,31],ymm5[16,17,18,19,20,21,22,23]
6625 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm4[0,1,2],ymm0[3],ymm4[4,5,6,7]
6626 ; AVX2-FCP-NEXT: vpbroadcastd 680(%rdi), %xmm13
6627 ; AVX2-FCP-NEXT: vmovdqa 704(%rdi), %xmm10
6628 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm13 = xmm13[0],xmm10[1],xmm13[2,3]
6629 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm13[0,1],ymm0[2,3,4,5,6,7]
6630 ; AVX2-FCP-NEXT: vpunpcklqdq {{.*#+}} ymm13 = ymm15[0],ymm9[0],ymm15[2],ymm9[2]
6631 ; AVX2-FCP-NEXT: vpbroadcastd 876(%rdi), %ymm15
6632 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm13 = ymm13[0,1,2,3,4,5,6],ymm15[7]
6633 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm13[5,6,7]
6634 ; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6635 ; AVX2-FCP-NEXT: vmovdqa 304(%rdi), %xmm0
6636 ; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
6637 ; AVX2-FCP-NEXT: vmovdqu (%rsp), %ymm6 # 32-byte Reload
6638 ; AVX2-FCP-NEXT: vpalignr {{.*#+}} ymm13 = ymm6[8,9,10,11,12,13,14,15],ymm7[0,1,2,3,4,5,6,7],ymm6[24,25,26,27,28,29,30,31],ymm7[16,17,18,19,20,21,22,23]
6639 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm13 = ymm13[0,1,2],ymm0[3],ymm13[4,5,6,7]
6640 ; AVX2-FCP-NEXT: vpbroadcastd 232(%rdi), %xmm15
6641 ; AVX2-FCP-NEXT: vmovdqa 256(%rdi), %xmm0
6642 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm15 = xmm15[0],xmm0[1],xmm15[2,3]
6643 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm13 = ymm15[0,1],ymm13[2,3,4,5,6,7]
6644 ; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
6645 ; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
6646 ; AVX2-FCP-NEXT: vpunpcklqdq {{.*#+}} ymm15 = ymm5[0],ymm4[0],ymm5[2],ymm4[2]
6647 ; AVX2-FCP-NEXT: vpbroadcastd 428(%rdi), %ymm14
6648 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm14 = ymm15[0,1,2,3,4,5,6],ymm14[7]
6649 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm13 = ymm13[0,1,2,3,4],ymm14[5,6,7]
6650 ; AVX2-FCP-NEXT: vmovdqu %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6651 ; AVX2-FCP-NEXT: vpblendd $253, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
6652 ; AVX2-FCP-NEXT: # ymm1 = mem[0],ymm1[1],mem[2,3,4,5,6,7]
6653 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm3 = xmm3[0,1,2],mem[3]
6654 ; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[3,2,2,3]
6655 ; AVX2-FCP-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[3,1,1,0,7,5,5,4]
6656 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm3[0,1],ymm1[2,3,4,5,6,7]
6657 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
6658 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
6659 ; AVX2-FCP-NEXT: vshufps {{.*#+}} ymm3 = ymm15[0,2],ymm9[1,3],ymm15[4,6],ymm9[5,7]
6660 ; AVX2-FCP-NEXT: vbroadcastss 208(%rdi), %ymm11
6661 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5,6],ymm11[7]
6662 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm3[5,6,7]
6663 ; AVX2-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6664 ; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
6665 ; AVX2-FCP-NEXT: vpblendd $253, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
6666 ; AVX2-FCP-NEXT: # ymm1 = mem[0],ymm1[1],mem[2,3,4,5,6,7]
6667 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm2 = xmm2[0,1,2],mem[3]
6668 ; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[3,2,2,3]
6669 ; AVX2-FCP-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[3,1,1,0,7,5,5,4]
6670 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1],ymm1[2,3,4,5,6,7]
6671 ; AVX2-FCP-NEXT: vmovdqa %ymm8, %ymm13
6672 ; AVX2-FCP-NEXT: vshufps {{.*#+}} ymm2 = ymm8[0,2],ymm12[1,3],ymm8[4,6],ymm12[5,7]
6673 ; AVX2-FCP-NEXT: vbroadcastss 656(%rdi), %ymm3
6674 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,6],ymm3[7]
6675 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5,6,7]
6676 ; AVX2-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6677 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm6[0],ymm7[1],ymm6[2,3,4,5,6,7]
6678 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1,2],mem[3]
6679 ; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[3,2,2,3]
6680 ; AVX2-FCP-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[3,1,1,0,7,5,5,4]
6681 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5,6,7]
6682 ; AVX2-FCP-NEXT: vshufps {{.*#+}} ymm1 = ymm5[0,2],ymm4[1,3],ymm5[4,6],ymm4[5,7]
6683 ; AVX2-FCP-NEXT: vmovdqa %ymm4, %ymm8
6684 ; AVX2-FCP-NEXT: vbroadcastss 432(%rdi), %ymm2
6685 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm2[7]
6686 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
6687 ; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6688 ; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
6689 ; AVX2-FCP-NEXT: vpblendd $2, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
6690 ; AVX2-FCP-NEXT: # ymm0 = ymm0[0],mem[1],ymm0[2,3,4,5,6,7]
6691 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm1 = xmm10[0,1,2],mem[3]
6692 ; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[3,2,2,3]
6693 ; AVX2-FCP-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[3,1,1,0,7,5,5,4]
6694 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5,6,7]
6695 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
6696 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
6697 ; AVX2-FCP-NEXT: vshufps {{.*#+}} ymm1 = ymm6[0,2],ymm5[1,3],ymm6[4,6],ymm5[5,7]
6698 ; AVX2-FCP-NEXT: vbroadcastss 880(%rdi), %ymm2
6699 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm2[7]
6700 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
6701 ; AVX2-FCP-NEXT: vmovdqu %ymm0, (%rsp) # 32-byte Spill
6702 ; AVX2-FCP-NEXT: vpmovsxbd {{.*#+}} xmm1 = [4,3,0,0]
6703 ; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
6704 ; AVX2-FCP-NEXT: vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
6705 ; AVX2-FCP-NEXT: # ymm0 = mem[0,1,2,3],ymm0[4,5,6,7]
6706 ; AVX2-FCP-NEXT: vpermd %ymm0, %ymm1, %ymm0
6707 ; AVX2-FCP-NEXT: vpbroadcastd 548(%rdi), %xmm2
6708 ; AVX2-FCP-NEXT: vmovdqa 512(%rdi), %xmm14
6709 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm2 = xmm14[0,1,2],xmm2[3]
6710 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3]
6711 ; AVX2-FCP-NEXT: vpbroadcastq {{.*#+}} ymm11 = [0,7,0,7,0,7,0,7]
6712 ; AVX2-FCP-NEXT: vpermd %ymm13, %ymm11, %ymm2
6713 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5],ymm12[6,7]
6714 ; AVX2-FCP-NEXT: vmovaps %ymm12, %ymm13
6715 ; AVX2-FCP-NEXT: vpbroadcastd 660(%rdi), %ymm3
6716 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,6],ymm3[7]
6717 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7]
6718 ; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6719 ; AVX2-FCP-NEXT: vpbroadcastd 100(%rdi), %xmm2
6720 ; AVX2-FCP-NEXT: vmovdqa 64(%rdi), %xmm0
6721 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm2 = xmm0[0,1,2],xmm2[3]
6722 ; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
6723 ; AVX2-FCP-NEXT: vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
6724 ; AVX2-FCP-NEXT: # ymm3 = mem[0,1,2,3],ymm3[4,5,6,7]
6725 ; AVX2-FCP-NEXT: vpermd %ymm3, %ymm1, %ymm3
6726 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm2 = xmm3[0,1],xmm2[2,3]
6727 ; AVX2-FCP-NEXT: vmovaps %ymm15, %ymm10
6728 ; AVX2-FCP-NEXT: vpermd %ymm15, %ymm11, %ymm3
6729 ; AVX2-FCP-NEXT: vmovaps %ymm9, %ymm7
6730 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5],ymm9[6,7]
6731 ; AVX2-FCP-NEXT: vpbroadcastd 212(%rdi), %ymm4
6732 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5,6],ymm4[7]
6733 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
6734 ; AVX2-FCP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6735 ; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
6736 ; AVX2-FCP-NEXT: vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
6737 ; AVX2-FCP-NEXT: # ymm2 = mem[0,1,2,3],ymm2[4,5,6,7]
6738 ; AVX2-FCP-NEXT: vpermd %ymm2, %ymm1, %ymm3
6739 ; AVX2-FCP-NEXT: vpbroadcastd 324(%rdi), %xmm4
6740 ; AVX2-FCP-NEXT: vmovdqa 288(%rdi), %xmm2
6741 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm4 = xmm2[0,1,2],xmm4[3]
6742 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm3 = xmm3[0,1],xmm4[2,3]
6743 ; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
6744 ; AVX2-FCP-NEXT: vpermd %ymm9, %ymm11, %ymm4
6745 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3,4,5],ymm8[6,7]
6746 ; AVX2-FCP-NEXT: vmovdqa %ymm8, %ymm15
6747 ; AVX2-FCP-NEXT: vpbroadcastd 436(%rdi), %ymm8
6748 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3,4,5,6],ymm8[7]
6749 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm4[4,5,6,7]
6750 ; AVX2-FCP-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6751 ; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
6752 ; AVX2-FCP-NEXT: vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
6753 ; AVX2-FCP-NEXT: # ymm3 = mem[0,1,2,3],ymm3[4,5,6,7]
6754 ; AVX2-FCP-NEXT: vpermd %ymm3, %ymm1, %ymm1
6755 ; AVX2-FCP-NEXT: vpbroadcastd 772(%rdi), %xmm4
6756 ; AVX2-FCP-NEXT: vmovdqa 736(%rdi), %xmm3
6757 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm4 = xmm3[0,1,2],xmm4[3]
6758 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0,1],xmm4[2,3]
6759 ; AVX2-FCP-NEXT: vpermd %ymm6, %ymm11, %ymm4
6760 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3,4,5],ymm5[6,7]
6761 ; AVX2-FCP-NEXT: vmovaps %ymm5, %ymm12
6762 ; AVX2-FCP-NEXT: vpbroadcastd 884(%rdi), %ymm8
6763 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3,4,5,6],ymm8[7]
6764 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm4[4,5,6,7]
6765 ; AVX2-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6766 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm1 = ymm7[0],ymm10[1],ymm7[2,3,4],ymm10[5],ymm7[6,7]
6767 ; AVX2-FCP-NEXT: vmovaps {{.*#+}} ymm4 = [1,0,3,3,1,0,7,7]
6768 ; AVX2-FCP-NEXT: vpermps %ymm1, %ymm4, %ymm1
6769 ; AVX2-FCP-NEXT: vbroadcastss 216(%rdi), %ymm6
6770 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm6[7]
6771 ; AVX2-FCP-NEXT: vmovdqa 96(%rdi), %xmm6
6772 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm0 = xmm6[0,1,2],xmm0[3]
6773 ; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,3,2]
6774 ; AVX2-FCP-NEXT: vpshufd $225, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Folded Reload
6775 ; AVX2-FCP-NEXT: # ymm7 = mem[1,0,2,3,5,4,6,7]
6776 ; AVX2-FCP-NEXT: vextracti128 $1, %ymm7, %xmm7
6777 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm0 = xmm7[0,1],xmm0[2,3]
6778 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
6779 ; AVX2-FCP-NEXT: vmovdqa 544(%rdi), %xmm8
6780 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm1 = xmm8[0,1,2],xmm14[3]
6781 ; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,3,2]
6782 ; AVX2-FCP-NEXT: vpshufd $225, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload
6783 ; AVX2-FCP-NEXT: # ymm5 = mem[1,0,2,3,5,4,6,7]
6784 ; AVX2-FCP-NEXT: vextracti128 $1, %ymm5, %xmm5
6785 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm1 = xmm5[0,1],xmm1[2,3]
6786 ; AVX2-FCP-NEXT: vblendps $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm5 # 32-byte Folded Reload
6787 ; AVX2-FCP-NEXT: # ymm5 = ymm13[0],mem[1],ymm13[2,3,4],mem[5],ymm13[6,7]
6788 ; AVX2-FCP-NEXT: vpermps %ymm5, %ymm4, %ymm5
6789 ; AVX2-FCP-NEXT: vbroadcastss 664(%rdi), %ymm7
6790 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3,4,5,6],ymm7[7]
6791 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm5[4,5,6,7]
6792 ; AVX2-FCP-NEXT: vmovdqa 320(%rdi), %xmm13
6793 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm2 = xmm13[0,1,2],xmm2[3]
6794 ; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,1,3,2]
6795 ; AVX2-FCP-NEXT: vpshufd $225, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload
6796 ; AVX2-FCP-NEXT: # ymm5 = mem[1,0,2,3,5,4,6,7]
6797 ; AVX2-FCP-NEXT: vextracti128 $1, %ymm5, %xmm5
6798 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm2 = xmm5[0,1],xmm2[2,3]
6799 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm5 = ymm15[0],ymm9[1],ymm15[2,3,4],ymm9[5],ymm15[6,7]
6800 ; AVX2-FCP-NEXT: vpermps %ymm5, %ymm4, %ymm5
6801 ; AVX2-FCP-NEXT: vbroadcastss 440(%rdi), %ymm7
6802 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3,4,5,6],ymm7[7]
6803 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm5 = ymm2[0,1,2,3],ymm5[4,5,6,7]
6804 ; AVX2-FCP-NEXT: vmovdqa 768(%rdi), %xmm2
6805 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm3 = xmm2[0,1,2],xmm3[3]
6806 ; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[0,1,3,2]
6807 ; AVX2-FCP-NEXT: vpshufd $225, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Folded Reload
6808 ; AVX2-FCP-NEXT: # ymm7 = mem[1,0,2,3,5,4,6,7]
6809 ; AVX2-FCP-NEXT: vextracti128 $1, %ymm7, %xmm7
6810 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm3 = xmm7[0,1],xmm3[2,3]
6811 ; AVX2-FCP-NEXT: vblendps $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm7 # 32-byte Folded Reload
6812 ; AVX2-FCP-NEXT: # ymm7 = ymm12[0],mem[1],ymm12[2,3,4],mem[5],ymm12[6,7]
6813 ; AVX2-FCP-NEXT: vpermps %ymm7, %ymm4, %ymm4
6814 ; AVX2-FCP-NEXT: vbroadcastss 888(%rdi), %ymm7
6815 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3,4,5,6],ymm7[7]
6816 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm7 = ymm3[0,1,2,3],ymm4[4,5,6,7]
6817 ; AVX2-FCP-NEXT: vbroadcastss 584(%rdi), %xmm3
6818 ; AVX2-FCP-NEXT: vblendps $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm3 # 16-byte Folded Reload
6819 ; AVX2-FCP-NEXT: # xmm3 = xmm3[0],mem[1],xmm3[2,3]
6820 ; AVX2-FCP-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm3
6821 ; AVX2-FCP-NEXT: vpermd 640(%rdi), %ymm11, %ymm4
6822 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5],ymm4[6,7]
6823 ; AVX2-FCP-NEXT: vpbroadcastd 528(%rdi), %ymm4
6824 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm4 = xmm4[0,1,2],xmm8[3]
6825 ; AVX2-FCP-NEXT: vpshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Folded Reload
6826 ; AVX2-FCP-NEXT: # ymm8 = mem[2,3,2,3,6,7,6,7]
6827 ; AVX2-FCP-NEXT: vpblendd $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm8 # 32-byte Folded Reload
6828 ; AVX2-FCP-NEXT: # ymm8 = ymm8[0],mem[1],ymm8[2,3,4],mem[5],ymm8[6,7]
6829 ; AVX2-FCP-NEXT: vextracti128 $1, %ymm8, %xmm8
6830 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm4 = xmm8[0,1],xmm4[2,3]
6831 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm8 = ymm4[0,1,2,3],ymm3[4,5,6,7]
6832 ; AVX2-FCP-NEXT: vbroadcastss 808(%rdi), %xmm3
6833 ; AVX2-FCP-NEXT: vblendps $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm3 # 16-byte Folded Reload
6834 ; AVX2-FCP-NEXT: # xmm3 = xmm3[0],mem[1],xmm3[2,3]
6835 ; AVX2-FCP-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm3
6836 ; AVX2-FCP-NEXT: vpermd 864(%rdi), %ymm11, %ymm4
6837 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5],ymm4[6,7]
6838 ; AVX2-FCP-NEXT: vpbroadcastd 752(%rdi), %ymm4
6839 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm2 = xmm4[0,1,2],xmm2[3]
6840 ; AVX2-FCP-NEXT: vpshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Folded Reload
6841 ; AVX2-FCP-NEXT: # ymm4 = mem[2,3,2,3,6,7,6,7]
6842 ; AVX2-FCP-NEXT: vpblendd $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm4 # 32-byte Folded Reload
6843 ; AVX2-FCP-NEXT: # ymm4 = ymm4[0],mem[1],ymm4[2,3,4],mem[5],ymm4[6,7]
6844 ; AVX2-FCP-NEXT: vextracti128 $1, %ymm4, %xmm4
6845 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm2 = xmm4[0,1],xmm2[2,3]
6846 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
6847 ; AVX2-FCP-NEXT: vbroadcastss 136(%rdi), %xmm3
6848 ; AVX2-FCP-NEXT: vblendps $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm3 # 16-byte Folded Reload
6849 ; AVX2-FCP-NEXT: # xmm3 = xmm3[0],mem[1],xmm3[2,3]
6850 ; AVX2-FCP-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm3
6851 ; AVX2-FCP-NEXT: vpermd 192(%rdi), %ymm11, %ymm4
6852 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5],ymm4[6,7]
6853 ; AVX2-FCP-NEXT: vpbroadcastd 80(%rdi), %ymm4
6854 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm4 = xmm4[0,1,2],xmm6[3]
6855 ; AVX2-FCP-NEXT: vpshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Folded Reload
6856 ; AVX2-FCP-NEXT: # ymm6 = mem[2,3,2,3,6,7,6,7]
6857 ; AVX2-FCP-NEXT: vpblendd $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm6 # 32-byte Folded Reload
6858 ; AVX2-FCP-NEXT: # ymm6 = ymm6[0],mem[1],ymm6[2,3,4],mem[5],ymm6[6,7]
6859 ; AVX2-FCP-NEXT: vextracti128 $1, %ymm6, %xmm6
6860 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm4 = xmm6[0,1],xmm4[2,3]
6861 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
6862 ; AVX2-FCP-NEXT: vbroadcastss 360(%rdi), %xmm4
6863 ; AVX2-FCP-NEXT: vblendps $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm4 # 16-byte Folded Reload
6864 ; AVX2-FCP-NEXT: # xmm4 = xmm4[0],mem[1],xmm4[2,3]
6865 ; AVX2-FCP-NEXT: vpermd 416(%rdi), %ymm11, %ymm6
6866 ; AVX2-FCP-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm4
6867 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3,4,5],ymm6[6,7]
6868 ; AVX2-FCP-NEXT: vpbroadcastd 304(%rdi), %ymm6
6869 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm6 = xmm6[0,1,2],xmm13[3]
6870 ; AVX2-FCP-NEXT: vpshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Folded Reload
6871 ; AVX2-FCP-NEXT: # ymm10 = mem[2,3,2,3,6,7,6,7]
6872 ; AVX2-FCP-NEXT: vpblendd $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm10 # 32-byte Folded Reload
6873 ; AVX2-FCP-NEXT: # ymm10 = ymm10[0],mem[1],ymm10[2,3,4],mem[5],ymm10[6,7]
6874 ; AVX2-FCP-NEXT: vextracti128 $1, %ymm10, %xmm10
6875 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm6 = xmm10[0,1],xmm6[2,3]
6876 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm4 = ymm6[0,1,2,3],ymm4[4,5,6,7]
6877 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
6878 ; AVX2-FCP-NEXT: vmovaps %ymm6, 96(%rsi)
6879 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
6880 ; AVX2-FCP-NEXT: vmovaps %ymm6, 32(%rsi)
6881 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
6882 ; AVX2-FCP-NEXT: vmovaps %ymm6, 64(%rsi)
6883 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
6884 ; AVX2-FCP-NEXT: vmovaps %ymm6, (%rsi)
6885 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
6886 ; AVX2-FCP-NEXT: vmovaps %ymm6, 96(%rdx)
6887 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
6888 ; AVX2-FCP-NEXT: vmovaps %ymm6, 32(%rdx)
6889 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
6890 ; AVX2-FCP-NEXT: vmovaps %ymm6, 64(%rdx)
6891 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
6892 ; AVX2-FCP-NEXT: vmovaps %ymm6, (%rdx)
6893 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
6894 ; AVX2-FCP-NEXT: vmovaps %ymm6, 32(%rcx)
6895 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
6896 ; AVX2-FCP-NEXT: vmovaps %ymm6, 96(%rcx)
6897 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
6898 ; AVX2-FCP-NEXT: vmovaps %ymm6, 64(%rcx)
6899 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
6900 ; AVX2-FCP-NEXT: vmovaps %ymm6, (%rcx)
6901 ; AVX2-FCP-NEXT: vmovups (%rsp), %ymm6 # 32-byte Reload
6902 ; AVX2-FCP-NEXT: vmovaps %ymm6, 96(%r8)
6903 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
6904 ; AVX2-FCP-NEXT: vmovaps %ymm6, 32(%r8)
6905 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
6906 ; AVX2-FCP-NEXT: vmovaps %ymm6, 64(%r8)
6907 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
6908 ; AVX2-FCP-NEXT: vmovaps %ymm6, (%r8)
6909 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
6910 ; AVX2-FCP-NEXT: vmovaps %ymm6, 96(%r9)
6911 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
6912 ; AVX2-FCP-NEXT: vmovaps %ymm6, 32(%r9)
6913 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
6914 ; AVX2-FCP-NEXT: vmovaps %ymm6, (%r9)
6915 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
6916 ; AVX2-FCP-NEXT: vmovaps %ymm6, 64(%r9)
6917 ; AVX2-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
6918 ; AVX2-FCP-NEXT: vmovaps %ymm7, 96(%rax)
6919 ; AVX2-FCP-NEXT: vmovaps %ymm5, 32(%rax)
6920 ; AVX2-FCP-NEXT: vmovaps %ymm1, 64(%rax)
6921 ; AVX2-FCP-NEXT: vmovaps %ymm0, (%rax)
6922 ; AVX2-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
6923 ; AVX2-FCP-NEXT: vmovaps %ymm4, 32(%rax)
6924 ; AVX2-FCP-NEXT: vmovaps %ymm3, (%rax)
6925 ; AVX2-FCP-NEXT: vmovaps %ymm2, 96(%rax)
6926 ; AVX2-FCP-NEXT: vmovaps %ymm8, 64(%rax)
6927 ; AVX2-FCP-NEXT: addq $1224, %rsp # imm = 0x4C8
6928 ; AVX2-FCP-NEXT: vzeroupper
6929 ; AVX2-FCP-NEXT: retq
6931 ; AVX512-LABEL: load_i32_stride7_vf32:
6933 ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax
6934 ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r10
6935 ; AVX512-NEXT: vmovdqa64 512(%rdi), %zmm1
6936 ; AVX512-NEXT: vmovdqa64 448(%rdi), %zmm0
6937 ; AVX512-NEXT: vmovdqa64 576(%rdi), %zmm4
6938 ; AVX512-NEXT: vmovdqa64 640(%rdi), %zmm2
6939 ; AVX512-NEXT: vmovdqa64 832(%rdi), %zmm5
6940 ; AVX512-NEXT: vmovdqa64 768(%rdi), %zmm6
6941 ; AVX512-NEXT: vmovdqa64 704(%rdi), %zmm3
6942 ; AVX512-NEXT: vmovdqa64 384(%rdi), %zmm13
6943 ; AVX512-NEXT: vmovdqa64 320(%rdi), %zmm15
6944 ; AVX512-NEXT: vmovdqa64 256(%rdi), %zmm9
6945 ; AVX512-NEXT: vmovdqa64 (%rdi), %zmm10
6946 ; AVX512-NEXT: vmovdqa64 64(%rdi), %zmm11
6947 ; AVX512-NEXT: vmovdqa64 128(%rdi), %zmm14
6948 ; AVX512-NEXT: vmovdqa64 192(%rdi), %zmm12
6949 ; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm16 = [8,15,0,0,0,19,26,1,8,15,0,0,0,19,26,1]
6950 ; AVX512-NEXT: # zmm16 = mem[0,1,2,3,0,1,2,3]
6951 ; AVX512-NEXT: vmovdqa64 %zmm12, %zmm17
6952 ; AVX512-NEXT: vpermt2d %zmm14, %zmm16, %zmm17
6953 ; AVX512-NEXT: vpmovsxbd {{.*#+}} ymm7 = [0,7,14,21,28,0,0,0]
6954 ; AVX512-NEXT: vmovdqa64 %zmm10, %zmm8
6955 ; AVX512-NEXT: vpermt2d %zmm11, %zmm7, %zmm8
6956 ; AVX512-NEXT: movw $992, %di # imm = 0x3E0
6957 ; AVX512-NEXT: kmovw %edi, %k1
6958 ; AVX512-NEXT: vmovdqa32 %zmm17, %zmm8 {%k1}
6959 ; AVX512-NEXT: vbroadcasti32x4 {{.*#+}} zmm17 = [20,27,6,13,20,27,6,13,20,27,6,13,20,27,6,13]
6960 ; AVX512-NEXT: # zmm17 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
6961 ; AVX512-NEXT: vmovdqa64 %zmm9, %zmm18
6962 ; AVX512-NEXT: vpermt2d %zmm15, %zmm17, %zmm18
6963 ; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm19 = [0,0,10,11,12,13,18,25,0,0,10,11,12,13,18,25]
6964 ; AVX512-NEXT: # zmm19 = mem[0,1,2,3,0,1,2,3]
6965 ; AVX512-NEXT: vpermt2d %zmm13, %zmm19, %zmm18
6966 ; AVX512-NEXT: movb $-32, %dil
6967 ; AVX512-NEXT: kmovw %edi, %k2
6968 ; AVX512-NEXT: vmovdqa64 %zmm18, %zmm8 {%k2}
6969 ; AVX512-NEXT: vpermi2d %zmm6, %zmm3, %zmm17
6970 ; AVX512-NEXT: vpermt2d %zmm5, %zmm19, %zmm17
6971 ; AVX512-NEXT: vpermi2d %zmm4, %zmm2, %zmm16
6972 ; AVX512-NEXT: vpermi2d %zmm1, %zmm0, %zmm7
6973 ; AVX512-NEXT: vmovdqa32 %zmm16, %zmm7 {%k1}
6974 ; AVX512-NEXT: vmovdqa64 %zmm17, %zmm7 {%k2}
6975 ; AVX512-NEXT: vbroadcasti32x4 {{.*#+}} zmm18 = [25,4,11,18,25,4,11,18,25,4,11,18,25,4,11,18]
6976 ; AVX512-NEXT: # zmm18 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
6977 ; AVX512-NEXT: vmovdqa64 %zmm14, %zmm19
6978 ; AVX512-NEXT: vpermt2d %zmm12, %zmm18, %zmm19
6979 ; AVX512-NEXT: vpmovsxbd {{.*#+}} ymm16 = [1,8,15,22,29,0,0,0]
6980 ; AVX512-NEXT: vmovdqa64 %zmm10, %zmm17
6981 ; AVX512-NEXT: vpermt2d %zmm11, %zmm16, %zmm17
6982 ; AVX512-NEXT: movw $480, %di # imm = 0x1E0
6983 ; AVX512-NEXT: kmovw %edi, %k2
6984 ; AVX512-NEXT: vmovdqa32 %zmm19, %zmm17 {%k2}
6985 ; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm19 = [0,0,7,14,21,28,0,0,0,0,7,14,21,28,0,0]
6986 ; AVX512-NEXT: # zmm19 = mem[0,1,2,3,0,1,2,3]
6987 ; AVX512-NEXT: vmovdqa64 %zmm9, %zmm20
6988 ; AVX512-NEXT: vpermt2d %zmm15, %zmm19, %zmm20
6989 ; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm21 = [0,9,10,11,12,13,19,26,0,9,10,11,12,13,19,26]
6990 ; AVX512-NEXT: # zmm21 = mem[0,1,2,3,0,1,2,3]
6991 ; AVX512-NEXT: vpermt2d %zmm13, %zmm21, %zmm20
6992 ; AVX512-NEXT: movw $-512, %di # imm = 0xFE00
6993 ; AVX512-NEXT: kmovw %edi, %k1
6994 ; AVX512-NEXT: vmovdqa32 %zmm20, %zmm17 {%k1}
6995 ; AVX512-NEXT: vpermi2d %zmm6, %zmm3, %zmm19
6996 ; AVX512-NEXT: vpermt2d %zmm5, %zmm21, %zmm19
6997 ; AVX512-NEXT: vmovdqa64 %zmm4, %zmm20
6998 ; AVX512-NEXT: vpermt2d %zmm2, %zmm18, %zmm20
6999 ; AVX512-NEXT: vpermi2d %zmm1, %zmm0, %zmm16
7000 ; AVX512-NEXT: vmovdqa32 %zmm20, %zmm16 {%k2}
7001 ; AVX512-NEXT: vmovdqa32 %zmm19, %zmm16 {%k1}
7002 ; AVX512-NEXT: vbroadcasti32x4 {{.*#+}} zmm21 = [26,5,12,19,26,5,12,19,26,5,12,19,26,5,12,19]
7003 ; AVX512-NEXT: # zmm21 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
7004 ; AVX512-NEXT: vmovdqa64 %zmm14, %zmm22
7005 ; AVX512-NEXT: vpermt2d %zmm12, %zmm21, %zmm22
7006 ; AVX512-NEXT: vpmovsxbd {{.*#+}} ymm19 = [18,25,0,7,14,0,0,0]
7007 ; AVX512-NEXT: vmovdqa64 %zmm11, %zmm20
7008 ; AVX512-NEXT: vpermt2d %zmm10, %zmm19, %zmm20
7009 ; AVX512-NEXT: vmovdqa32 %zmm22, %zmm20 {%k2}
7010 ; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm22 = [0,1,8,15,22,29,0,0,0,1,8,15,22,29,0,0]
7011 ; AVX512-NEXT: # zmm22 = mem[0,1,2,3,0,1,2,3]
7012 ; AVX512-NEXT: vmovdqa64 %zmm9, %zmm23
7013 ; AVX512-NEXT: vpermt2d %zmm15, %zmm22, %zmm23
7014 ; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm24 = [0,9,10,11,12,13,20,27,0,9,10,11,12,13,20,27]
7015 ; AVX512-NEXT: # zmm24 = mem[0,1,2,3,0,1,2,3]
7016 ; AVX512-NEXT: vpermt2d %zmm13, %zmm24, %zmm23
7017 ; AVX512-NEXT: vmovdqa32 %zmm23, %zmm20 {%k1}
7018 ; AVX512-NEXT: vpermi2d %zmm6, %zmm3, %zmm22
7019 ; AVX512-NEXT: vpermt2d %zmm5, %zmm24, %zmm22
7020 ; AVX512-NEXT: vmovdqa64 %zmm4, %zmm23
7021 ; AVX512-NEXT: vpermt2d %zmm2, %zmm21, %zmm23
7022 ; AVX512-NEXT: vpermi2d %zmm0, %zmm1, %zmm19
7023 ; AVX512-NEXT: vmovdqa32 %zmm23, %zmm19 {%k2}
7024 ; AVX512-NEXT: vmovdqa32 %zmm22, %zmm19 {%k1}
7025 ; AVX512-NEXT: vbroadcasti32x4 {{.*#+}} zmm24 = [27,6,13,20,27,6,13,20,27,6,13,20,27,6,13,20]
7026 ; AVX512-NEXT: # zmm24 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
7027 ; AVX512-NEXT: vmovdqa64 %zmm14, %zmm25
7028 ; AVX512-NEXT: vpermt2d %zmm12, %zmm24, %zmm25
7029 ; AVX512-NEXT: vpmovsxbd {{.*#+}} ymm22 = [19,26,1,8,15,0,0,0]
7030 ; AVX512-NEXT: vmovdqa64 %zmm11, %zmm23
7031 ; AVX512-NEXT: vpermt2d %zmm10, %zmm22, %zmm23
7032 ; AVX512-NEXT: vmovdqa32 %zmm25, %zmm23 {%k2}
7033 ; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm25 = [0,18,25,0,7,14,0,0,0,18,25,0,7,14,0,0]
7034 ; AVX512-NEXT: # zmm25 = mem[0,1,2,3,0,1,2,3]
7035 ; AVX512-NEXT: vmovdqa64 %zmm15, %zmm26
7036 ; AVX512-NEXT: vpermt2d %zmm9, %zmm25, %zmm26
7037 ; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm27 = [0,9,10,11,12,13,21,28,0,9,10,11,12,13,21,28]
7038 ; AVX512-NEXT: # zmm27 = mem[0,1,2,3,0,1,2,3]
7039 ; AVX512-NEXT: vpermt2d %zmm13, %zmm27, %zmm26
7040 ; AVX512-NEXT: vmovdqa32 %zmm26, %zmm23 {%k1}
7041 ; AVX512-NEXT: vpermi2d %zmm3, %zmm6, %zmm25
7042 ; AVX512-NEXT: vpermt2d %zmm5, %zmm27, %zmm25
7043 ; AVX512-NEXT: vpermi2d %zmm2, %zmm4, %zmm24
7044 ; AVX512-NEXT: vpermi2d %zmm0, %zmm1, %zmm22
7045 ; AVX512-NEXT: vmovdqa32 %zmm24, %zmm22 {%k2}
7046 ; AVX512-NEXT: vmovdqa32 %zmm25, %zmm22 {%k1}
7047 ; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm26 = [0,19,26,1,8,15,0,0,0,19,26,1,8,15,0,0]
7048 ; AVX512-NEXT: # zmm26 = mem[0,1,2,3,0,1,2,3]
7049 ; AVX512-NEXT: vmovdqa64 %zmm15, %zmm25
7050 ; AVX512-NEXT: vpermt2d %zmm9, %zmm26, %zmm25
7051 ; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm27 = [0,9,10,11,12,13,22,29,0,9,10,11,12,13,22,29]
7052 ; AVX512-NEXT: # zmm27 = mem[0,1,2,3,0,1,2,3]
7053 ; AVX512-NEXT: vpermt2d %zmm13, %zmm27, %zmm25
7054 ; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm28 = [28,0,0,0,0,7,14,21,28,0,0,0,0,7,14,21]
7055 ; AVX512-NEXT: # zmm28 = mem[0,1,2,3,0,1,2,3]
7056 ; AVX512-NEXT: vmovdqa64 %zmm14, %zmm24
7057 ; AVX512-NEXT: vpermt2d %zmm12, %zmm28, %zmm24
7058 ; AVX512-NEXT: vpmovsxbd {{.*#+}} xmm29 = [4,11,18,25]
7059 ; AVX512-NEXT: vmovdqa64 %zmm10, %zmm30
7060 ; AVX512-NEXT: vpermt2d %zmm11, %zmm29, %zmm30
7061 ; AVX512-NEXT: vinserti32x4 $0, %xmm30, %zmm24, %zmm24
7062 ; AVX512-NEXT: vmovdqa32 %zmm25, %zmm24 {%k1}
7063 ; AVX512-NEXT: vpermi2d %zmm3, %zmm6, %zmm26
7064 ; AVX512-NEXT: vpermt2d %zmm5, %zmm27, %zmm26
7065 ; AVX512-NEXT: vpermi2d %zmm2, %zmm4, %zmm28
7066 ; AVX512-NEXT: vpermi2d %zmm1, %zmm0, %zmm29
7067 ; AVX512-NEXT: vinserti32x4 $0, %xmm29, %zmm28, %zmm25
7068 ; AVX512-NEXT: vmovdqa32 %zmm26, %zmm25 {%k1}
7069 ; AVX512-NEXT: vmovdqa64 %zmm9, %zmm26
7070 ; AVX512-NEXT: vpermt2d %zmm15, %zmm18, %zmm26
7071 ; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm27 = [0,9,10,11,12,16,23,30,0,9,10,11,12,16,23,30]
7072 ; AVX512-NEXT: # zmm27 = mem[0,1,2,3,0,1,2,3]
7073 ; AVX512-NEXT: vpermt2d %zmm13, %zmm27, %zmm26
7074 ; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm28 = [29,0,0,0,1,8,15,22,29,0,0,0,1,8,15,22]
7075 ; AVX512-NEXT: # zmm28 = mem[0,1,2,3,0,1,2,3]
7076 ; AVX512-NEXT: vmovdqa64 %zmm14, %zmm29
7077 ; AVX512-NEXT: vpermt2d %zmm12, %zmm28, %zmm29
7078 ; AVX512-NEXT: vpmovsxbd {{.*#+}} xmm30 = [5,12,19,26]
7079 ; AVX512-NEXT: vmovdqa64 %zmm10, %zmm31
7080 ; AVX512-NEXT: vpermt2d %zmm11, %zmm30, %zmm31
7081 ; AVX512-NEXT: vinserti32x4 $0, %xmm31, %zmm29, %zmm29
7082 ; AVX512-NEXT: vmovdqa32 %zmm26, %zmm29 {%k1}
7083 ; AVX512-NEXT: vpermi2d %zmm6, %zmm3, %zmm18
7084 ; AVX512-NEXT: vpermt2d %zmm5, %zmm27, %zmm18
7085 ; AVX512-NEXT: vpermi2d %zmm2, %zmm4, %zmm28
7086 ; AVX512-NEXT: vpermi2d %zmm1, %zmm0, %zmm30
7087 ; AVX512-NEXT: vinserti32x4 $0, %xmm30, %zmm28, %zmm26
7088 ; AVX512-NEXT: vmovdqa32 %zmm18, %zmm26 {%k1}
7089 ; AVX512-NEXT: vpermt2d %zmm15, %zmm21, %zmm9
7090 ; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm15 = [0,9,10,11,12,17,24,31,0,9,10,11,12,17,24,31]
7091 ; AVX512-NEXT: # zmm15 = mem[0,1,2,3,0,1,2,3]
7092 ; AVX512-NEXT: vpermt2d %zmm13, %zmm15, %zmm9
7093 ; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm13 = [14,0,0,0,18,25,0,7,14,0,0,0,18,25,0,7]
7094 ; AVX512-NEXT: # zmm13 = mem[0,1,2,3,0,1,2,3]
7095 ; AVX512-NEXT: vpermt2d %zmm14, %zmm13, %zmm12
7096 ; AVX512-NEXT: vpmovsxbd {{.*#+}} xmm14 = [6,13,20,27]
7097 ; AVX512-NEXT: vpermt2d %zmm11, %zmm14, %zmm10
7098 ; AVX512-NEXT: vinserti32x4 $0, %xmm10, %zmm12, %zmm10
7099 ; AVX512-NEXT: vmovdqa32 %zmm9, %zmm10 {%k1}
7100 ; AVX512-NEXT: vpermt2d %zmm6, %zmm21, %zmm3
7101 ; AVX512-NEXT: vpermt2d %zmm5, %zmm15, %zmm3
7102 ; AVX512-NEXT: vpermt2d %zmm4, %zmm13, %zmm2
7103 ; AVX512-NEXT: vpermt2d %zmm1, %zmm14, %zmm0
7104 ; AVX512-NEXT: vinserti32x4 $0, %xmm0, %zmm2, %zmm0
7105 ; AVX512-NEXT: vmovdqa32 %zmm3, %zmm0 {%k1}
7106 ; AVX512-NEXT: vmovdqa64 %zmm7, 64(%rsi)
7107 ; AVX512-NEXT: vmovdqa64 %zmm8, (%rsi)
7108 ; AVX512-NEXT: vmovdqa64 %zmm16, 64(%rdx)
7109 ; AVX512-NEXT: vmovdqa64 %zmm17, (%rdx)
7110 ; AVX512-NEXT: vmovdqa64 %zmm19, 64(%rcx)
7111 ; AVX512-NEXT: vmovdqa64 %zmm20, (%rcx)
7112 ; AVX512-NEXT: vmovdqa64 %zmm22, 64(%r8)
7113 ; AVX512-NEXT: vmovdqa64 %zmm23, (%r8)
7114 ; AVX512-NEXT: vmovdqa64 %zmm25, 64(%r9)
7115 ; AVX512-NEXT: vmovdqa64 %zmm24, (%r9)
7116 ; AVX512-NEXT: vmovdqa64 %zmm26, 64(%r10)
7117 ; AVX512-NEXT: vmovdqa64 %zmm29, (%r10)
7118 ; AVX512-NEXT: vmovdqa64 %zmm0, 64(%rax)
7119 ; AVX512-NEXT: vmovdqa64 %zmm10, (%rax)
7120 ; AVX512-NEXT: vzeroupper
7123 ; AVX512-FCP-LABEL: load_i32_stride7_vf32:
7124 ; AVX512-FCP: # %bb.0:
7125 ; AVX512-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
7126 ; AVX512-FCP-NEXT: movq {{[0-9]+}}(%rsp), %r10
7127 ; AVX512-FCP-NEXT: vmovdqa64 512(%rdi), %zmm1
7128 ; AVX512-FCP-NEXT: vmovdqa64 448(%rdi), %zmm0
7129 ; AVX512-FCP-NEXT: vmovdqa64 576(%rdi), %zmm4
7130 ; AVX512-FCP-NEXT: vmovdqa64 640(%rdi), %zmm2
7131 ; AVX512-FCP-NEXT: vmovdqa64 832(%rdi), %zmm5
7132 ; AVX512-FCP-NEXT: vmovdqa64 768(%rdi), %zmm6
7133 ; AVX512-FCP-NEXT: vmovdqa64 704(%rdi), %zmm3
7134 ; AVX512-FCP-NEXT: vmovdqa64 384(%rdi), %zmm13
7135 ; AVX512-FCP-NEXT: vmovdqa64 320(%rdi), %zmm15
7136 ; AVX512-FCP-NEXT: vmovdqa64 256(%rdi), %zmm9
7137 ; AVX512-FCP-NEXT: vmovdqa64 (%rdi), %zmm10
7138 ; AVX512-FCP-NEXT: vmovdqa64 64(%rdi), %zmm11
7139 ; AVX512-FCP-NEXT: vmovdqa64 128(%rdi), %zmm14
7140 ; AVX512-FCP-NEXT: vmovdqa64 192(%rdi), %zmm12
7141 ; AVX512-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm16 = [8,15,0,0,0,19,26,1,8,15,0,0,0,19,26,1]
7142 ; AVX512-FCP-NEXT: # zmm16 = mem[0,1,2,3,0,1,2,3]
7143 ; AVX512-FCP-NEXT: vmovdqa64 %zmm12, %zmm17
7144 ; AVX512-FCP-NEXT: vpermt2d %zmm14, %zmm16, %zmm17
7145 ; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm7 = [0,7,14,21,28,0,0,0]
7146 ; AVX512-FCP-NEXT: vmovdqa64 %zmm10, %zmm8
7147 ; AVX512-FCP-NEXT: vpermt2d %zmm11, %zmm7, %zmm8
7148 ; AVX512-FCP-NEXT: movw $992, %di # imm = 0x3E0
7149 ; AVX512-FCP-NEXT: kmovw %edi, %k1
7150 ; AVX512-FCP-NEXT: vmovdqa32 %zmm17, %zmm8 {%k1}
7151 ; AVX512-FCP-NEXT: vbroadcasti32x4 {{.*#+}} zmm17 = [20,27,6,13,20,27,6,13,20,27,6,13,20,27,6,13]
7152 ; AVX512-FCP-NEXT: # zmm17 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
7153 ; AVX512-FCP-NEXT: vmovdqa64 %zmm9, %zmm18
7154 ; AVX512-FCP-NEXT: vpermt2d %zmm15, %zmm17, %zmm18
7155 ; AVX512-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm19 = [0,0,10,11,12,13,18,25,0,0,10,11,12,13,18,25]
7156 ; AVX512-FCP-NEXT: # zmm19 = mem[0,1,2,3,0,1,2,3]
7157 ; AVX512-FCP-NEXT: vpermt2d %zmm13, %zmm19, %zmm18
7158 ; AVX512-FCP-NEXT: movb $-32, %dil
7159 ; AVX512-FCP-NEXT: kmovw %edi, %k2
7160 ; AVX512-FCP-NEXT: vmovdqa64 %zmm18, %zmm8 {%k2}
7161 ; AVX512-FCP-NEXT: vpermi2d %zmm6, %zmm3, %zmm17
7162 ; AVX512-FCP-NEXT: vpermt2d %zmm5, %zmm19, %zmm17
7163 ; AVX512-FCP-NEXT: vpermi2d %zmm4, %zmm2, %zmm16
7164 ; AVX512-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm7
7165 ; AVX512-FCP-NEXT: vmovdqa32 %zmm16, %zmm7 {%k1}
7166 ; AVX512-FCP-NEXT: vmovdqa64 %zmm17, %zmm7 {%k2}
7167 ; AVX512-FCP-NEXT: vbroadcasti32x4 {{.*#+}} zmm18 = [25,4,11,18,25,4,11,18,25,4,11,18,25,4,11,18]
7168 ; AVX512-FCP-NEXT: # zmm18 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
7169 ; AVX512-FCP-NEXT: vmovdqa64 %zmm14, %zmm19
7170 ; AVX512-FCP-NEXT: vpermt2d %zmm12, %zmm18, %zmm19
7171 ; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm16 = [1,8,15,22,29,0,0,0]
7172 ; AVX512-FCP-NEXT: vmovdqa64 %zmm10, %zmm17
7173 ; AVX512-FCP-NEXT: vpermt2d %zmm11, %zmm16, %zmm17
7174 ; AVX512-FCP-NEXT: movw $480, %di # imm = 0x1E0
7175 ; AVX512-FCP-NEXT: kmovw %edi, %k2
7176 ; AVX512-FCP-NEXT: vmovdqa32 %zmm19, %zmm17 {%k2}
7177 ; AVX512-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm19 = [0,0,7,14,21,28,0,0,0,0,7,14,21,28,0,0]
7178 ; AVX512-FCP-NEXT: # zmm19 = mem[0,1,2,3,0,1,2,3]
7179 ; AVX512-FCP-NEXT: vmovdqa64 %zmm9, %zmm20
7180 ; AVX512-FCP-NEXT: vpermt2d %zmm15, %zmm19, %zmm20
7181 ; AVX512-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm21 = [0,9,10,11,12,13,19,26,0,9,10,11,12,13,19,26]
7182 ; AVX512-FCP-NEXT: # zmm21 = mem[0,1,2,3,0,1,2,3]
7183 ; AVX512-FCP-NEXT: vpermt2d %zmm13, %zmm21, %zmm20
7184 ; AVX512-FCP-NEXT: movw $-512, %di # imm = 0xFE00
7185 ; AVX512-FCP-NEXT: kmovw %edi, %k1
7186 ; AVX512-FCP-NEXT: vmovdqa32 %zmm20, %zmm17 {%k1}
7187 ; AVX512-FCP-NEXT: vpermi2d %zmm6, %zmm3, %zmm19
7188 ; AVX512-FCP-NEXT: vpermt2d %zmm5, %zmm21, %zmm19
7189 ; AVX512-FCP-NEXT: vmovdqa64 %zmm4, %zmm20
7190 ; AVX512-FCP-NEXT: vpermt2d %zmm2, %zmm18, %zmm20
7191 ; AVX512-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm16
7192 ; AVX512-FCP-NEXT: vmovdqa32 %zmm20, %zmm16 {%k2}
7193 ; AVX512-FCP-NEXT: vmovdqa32 %zmm19, %zmm16 {%k1}
7194 ; AVX512-FCP-NEXT: vbroadcasti32x4 {{.*#+}} zmm21 = [26,5,12,19,26,5,12,19,26,5,12,19,26,5,12,19]
7195 ; AVX512-FCP-NEXT: # zmm21 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
7196 ; AVX512-FCP-NEXT: vmovdqa64 %zmm14, %zmm22
7197 ; AVX512-FCP-NEXT: vpermt2d %zmm12, %zmm21, %zmm22
7198 ; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm19 = [18,25,0,7,14,0,0,0]
7199 ; AVX512-FCP-NEXT: vmovdqa64 %zmm11, %zmm20
7200 ; AVX512-FCP-NEXT: vpermt2d %zmm10, %zmm19, %zmm20
7201 ; AVX512-FCP-NEXT: vmovdqa32 %zmm22, %zmm20 {%k2}
7202 ; AVX512-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm22 = [0,1,8,15,22,29,0,0,0,1,8,15,22,29,0,0]
7203 ; AVX512-FCP-NEXT: # zmm22 = mem[0,1,2,3,0,1,2,3]
7204 ; AVX512-FCP-NEXT: vmovdqa64 %zmm9, %zmm23
7205 ; AVX512-FCP-NEXT: vpermt2d %zmm15, %zmm22, %zmm23
7206 ; AVX512-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm24 = [0,9,10,11,12,13,20,27,0,9,10,11,12,13,20,27]
7207 ; AVX512-FCP-NEXT: # zmm24 = mem[0,1,2,3,0,1,2,3]
7208 ; AVX512-FCP-NEXT: vpermt2d %zmm13, %zmm24, %zmm23
7209 ; AVX512-FCP-NEXT: vmovdqa32 %zmm23, %zmm20 {%k1}
7210 ; AVX512-FCP-NEXT: vpermi2d %zmm6, %zmm3, %zmm22
7211 ; AVX512-FCP-NEXT: vpermt2d %zmm5, %zmm24, %zmm22
7212 ; AVX512-FCP-NEXT: vmovdqa64 %zmm4, %zmm23
7213 ; AVX512-FCP-NEXT: vpermt2d %zmm2, %zmm21, %zmm23
7214 ; AVX512-FCP-NEXT: vpermi2d %zmm0, %zmm1, %zmm19
7215 ; AVX512-FCP-NEXT: vmovdqa32 %zmm23, %zmm19 {%k2}
7216 ; AVX512-FCP-NEXT: vmovdqa32 %zmm22, %zmm19 {%k1}
7217 ; AVX512-FCP-NEXT: vbroadcasti32x4 {{.*#+}} zmm24 = [27,6,13,20,27,6,13,20,27,6,13,20,27,6,13,20]
7218 ; AVX512-FCP-NEXT: # zmm24 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
7219 ; AVX512-FCP-NEXT: vmovdqa64 %zmm14, %zmm25
7220 ; AVX512-FCP-NEXT: vpermt2d %zmm12, %zmm24, %zmm25
7221 ; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm22 = [19,26,1,8,15,0,0,0]
7222 ; AVX512-FCP-NEXT: vmovdqa64 %zmm11, %zmm23
7223 ; AVX512-FCP-NEXT: vpermt2d %zmm10, %zmm22, %zmm23
7224 ; AVX512-FCP-NEXT: vmovdqa32 %zmm25, %zmm23 {%k2}
7225 ; AVX512-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm25 = [0,18,25,0,7,14,0,0,0,18,25,0,7,14,0,0]
7226 ; AVX512-FCP-NEXT: # zmm25 = mem[0,1,2,3,0,1,2,3]
7227 ; AVX512-FCP-NEXT: vmovdqa64 %zmm15, %zmm26
7228 ; AVX512-FCP-NEXT: vpermt2d %zmm9, %zmm25, %zmm26
7229 ; AVX512-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm27 = [0,9,10,11,12,13,21,28,0,9,10,11,12,13,21,28]
7230 ; AVX512-FCP-NEXT: # zmm27 = mem[0,1,2,3,0,1,2,3]
7231 ; AVX512-FCP-NEXT: vpermt2d %zmm13, %zmm27, %zmm26
7232 ; AVX512-FCP-NEXT: vmovdqa32 %zmm26, %zmm23 {%k1}
7233 ; AVX512-FCP-NEXT: vpermi2d %zmm3, %zmm6, %zmm25
7234 ; AVX512-FCP-NEXT: vpermt2d %zmm5, %zmm27, %zmm25
7235 ; AVX512-FCP-NEXT: vpermi2d %zmm2, %zmm4, %zmm24
7236 ; AVX512-FCP-NEXT: vpermi2d %zmm0, %zmm1, %zmm22
7237 ; AVX512-FCP-NEXT: vmovdqa32 %zmm24, %zmm22 {%k2}
7238 ; AVX512-FCP-NEXT: vmovdqa32 %zmm25, %zmm22 {%k1}
7239 ; AVX512-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm26 = [0,19,26,1,8,15,0,0,0,19,26,1,8,15,0,0]
7240 ; AVX512-FCP-NEXT: # zmm26 = mem[0,1,2,3,0,1,2,3]
7241 ; AVX512-FCP-NEXT: vmovdqa64 %zmm15, %zmm25
7242 ; AVX512-FCP-NEXT: vpermt2d %zmm9, %zmm26, %zmm25
7243 ; AVX512-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm27 = [0,9,10,11,12,13,22,29,0,9,10,11,12,13,22,29]
7244 ; AVX512-FCP-NEXT: # zmm27 = mem[0,1,2,3,0,1,2,3]
7245 ; AVX512-FCP-NEXT: vpermt2d %zmm13, %zmm27, %zmm25
7246 ; AVX512-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm28 = [28,0,0,0,0,7,14,21,28,0,0,0,0,7,14,21]
7247 ; AVX512-FCP-NEXT: # zmm28 = mem[0,1,2,3,0,1,2,3]
7248 ; AVX512-FCP-NEXT: vmovdqa64 %zmm14, %zmm24
7249 ; AVX512-FCP-NEXT: vpermt2d %zmm12, %zmm28, %zmm24
7250 ; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} xmm29 = [4,11,18,25]
7251 ; AVX512-FCP-NEXT: vmovdqa64 %zmm10, %zmm30
7252 ; AVX512-FCP-NEXT: vpermt2d %zmm11, %zmm29, %zmm30
7253 ; AVX512-FCP-NEXT: vinserti32x4 $0, %xmm30, %zmm24, %zmm24
7254 ; AVX512-FCP-NEXT: vmovdqa32 %zmm25, %zmm24 {%k1}
7255 ; AVX512-FCP-NEXT: vpermi2d %zmm3, %zmm6, %zmm26
7256 ; AVX512-FCP-NEXT: vpermt2d %zmm5, %zmm27, %zmm26
7257 ; AVX512-FCP-NEXT: vpermi2d %zmm2, %zmm4, %zmm28
7258 ; AVX512-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm29
7259 ; AVX512-FCP-NEXT: vinserti32x4 $0, %xmm29, %zmm28, %zmm25
7260 ; AVX512-FCP-NEXT: vmovdqa32 %zmm26, %zmm25 {%k1}
7261 ; AVX512-FCP-NEXT: vmovdqa64 %zmm9, %zmm26
7262 ; AVX512-FCP-NEXT: vpermt2d %zmm15, %zmm18, %zmm26
7263 ; AVX512-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm27 = [0,9,10,11,12,16,23,30,0,9,10,11,12,16,23,30]
7264 ; AVX512-FCP-NEXT: # zmm27 = mem[0,1,2,3,0,1,2,3]
7265 ; AVX512-FCP-NEXT: vpermt2d %zmm13, %zmm27, %zmm26
7266 ; AVX512-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm28 = [29,0,0,0,1,8,15,22,29,0,0,0,1,8,15,22]
7267 ; AVX512-FCP-NEXT: # zmm28 = mem[0,1,2,3,0,1,2,3]
7268 ; AVX512-FCP-NEXT: vmovdqa64 %zmm14, %zmm29
7269 ; AVX512-FCP-NEXT: vpermt2d %zmm12, %zmm28, %zmm29
7270 ; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} xmm30 = [5,12,19,26]
7271 ; AVX512-FCP-NEXT: vmovdqa64 %zmm10, %zmm31
7272 ; AVX512-FCP-NEXT: vpermt2d %zmm11, %zmm30, %zmm31
7273 ; AVX512-FCP-NEXT: vinserti32x4 $0, %xmm31, %zmm29, %zmm29
7274 ; AVX512-FCP-NEXT: vmovdqa32 %zmm26, %zmm29 {%k1}
7275 ; AVX512-FCP-NEXT: vpermi2d %zmm6, %zmm3, %zmm18
7276 ; AVX512-FCP-NEXT: vpermt2d %zmm5, %zmm27, %zmm18
7277 ; AVX512-FCP-NEXT: vpermi2d %zmm2, %zmm4, %zmm28
7278 ; AVX512-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm30
7279 ; AVX512-FCP-NEXT: vinserti32x4 $0, %xmm30, %zmm28, %zmm26
7280 ; AVX512-FCP-NEXT: vmovdqa32 %zmm18, %zmm26 {%k1}
7281 ; AVX512-FCP-NEXT: vpermt2d %zmm15, %zmm21, %zmm9
7282 ; AVX512-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm15 = [0,9,10,11,12,17,24,31,0,9,10,11,12,17,24,31]
7283 ; AVX512-FCP-NEXT: # zmm15 = mem[0,1,2,3,0,1,2,3]
7284 ; AVX512-FCP-NEXT: vpermt2d %zmm13, %zmm15, %zmm9
7285 ; AVX512-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm13 = [14,0,0,0,18,25,0,7,14,0,0,0,18,25,0,7]
7286 ; AVX512-FCP-NEXT: # zmm13 = mem[0,1,2,3,0,1,2,3]
7287 ; AVX512-FCP-NEXT: vpermt2d %zmm14, %zmm13, %zmm12
7288 ; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} xmm14 = [6,13,20,27]
7289 ; AVX512-FCP-NEXT: vpermt2d %zmm11, %zmm14, %zmm10
7290 ; AVX512-FCP-NEXT: vinserti32x4 $0, %xmm10, %zmm12, %zmm10
7291 ; AVX512-FCP-NEXT: vmovdqa32 %zmm9, %zmm10 {%k1}
7292 ; AVX512-FCP-NEXT: vpermt2d %zmm6, %zmm21, %zmm3
7293 ; AVX512-FCP-NEXT: vpermt2d %zmm5, %zmm15, %zmm3
7294 ; AVX512-FCP-NEXT: vpermt2d %zmm4, %zmm13, %zmm2
7295 ; AVX512-FCP-NEXT: vpermt2d %zmm1, %zmm14, %zmm0
7296 ; AVX512-FCP-NEXT: vinserti32x4 $0, %xmm0, %zmm2, %zmm0
7297 ; AVX512-FCP-NEXT: vmovdqa32 %zmm3, %zmm0 {%k1}
7298 ; AVX512-FCP-NEXT: vmovdqa64 %zmm7, 64(%rsi)
7299 ; AVX512-FCP-NEXT: vmovdqa64 %zmm8, (%rsi)
7300 ; AVX512-FCP-NEXT: vmovdqa64 %zmm16, 64(%rdx)
7301 ; AVX512-FCP-NEXT: vmovdqa64 %zmm17, (%rdx)
7302 ; AVX512-FCP-NEXT: vmovdqa64 %zmm19, 64(%rcx)
7303 ; AVX512-FCP-NEXT: vmovdqa64 %zmm20, (%rcx)
7304 ; AVX512-FCP-NEXT: vmovdqa64 %zmm22, 64(%r8)
7305 ; AVX512-FCP-NEXT: vmovdqa64 %zmm23, (%r8)
7306 ; AVX512-FCP-NEXT: vmovdqa64 %zmm25, 64(%r9)
7307 ; AVX512-FCP-NEXT: vmovdqa64 %zmm24, (%r9)
7308 ; AVX512-FCP-NEXT: vmovdqa64 %zmm26, 64(%r10)
7309 ; AVX512-FCP-NEXT: vmovdqa64 %zmm29, (%r10)
7310 ; AVX512-FCP-NEXT: vmovdqa64 %zmm0, 64(%rax)
7311 ; AVX512-FCP-NEXT: vmovdqa64 %zmm10, (%rax)
7312 ; AVX512-FCP-NEXT: vzeroupper
7313 ; AVX512-FCP-NEXT: retq
7315 ; AVX512DQ-LABEL: load_i32_stride7_vf32:
7316 ; AVX512DQ: # %bb.0:
7317 ; AVX512DQ-NEXT: movq {{[0-9]+}}(%rsp), %rax
7318 ; AVX512DQ-NEXT: movq {{[0-9]+}}(%rsp), %r10
7319 ; AVX512DQ-NEXT: vmovdqa64 512(%rdi), %zmm1
7320 ; AVX512DQ-NEXT: vmovdqa64 448(%rdi), %zmm0
7321 ; AVX512DQ-NEXT: vmovdqa64 576(%rdi), %zmm4
7322 ; AVX512DQ-NEXT: vmovdqa64 640(%rdi), %zmm2
7323 ; AVX512DQ-NEXT: vmovdqa64 832(%rdi), %zmm5
7324 ; AVX512DQ-NEXT: vmovdqa64 768(%rdi), %zmm6
7325 ; AVX512DQ-NEXT: vmovdqa64 704(%rdi), %zmm3
7326 ; AVX512DQ-NEXT: vmovdqa64 384(%rdi), %zmm13
7327 ; AVX512DQ-NEXT: vmovdqa64 320(%rdi), %zmm15
7328 ; AVX512DQ-NEXT: vmovdqa64 256(%rdi), %zmm9
7329 ; AVX512DQ-NEXT: vmovdqa64 (%rdi), %zmm10
7330 ; AVX512DQ-NEXT: vmovdqa64 64(%rdi), %zmm11
7331 ; AVX512DQ-NEXT: vmovdqa64 128(%rdi), %zmm14
7332 ; AVX512DQ-NEXT: vmovdqa64 192(%rdi), %zmm12
7333 ; AVX512DQ-NEXT: vbroadcasti64x4 {{.*#+}} zmm16 = [8,15,0,0,0,19,26,1,8,15,0,0,0,19,26,1]
7334 ; AVX512DQ-NEXT: # zmm16 = mem[0,1,2,3,0,1,2,3]
7335 ; AVX512DQ-NEXT: vmovdqa64 %zmm12, %zmm17
7336 ; AVX512DQ-NEXT: vpermt2d %zmm14, %zmm16, %zmm17
7337 ; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} ymm7 = [0,7,14,21,28,0,0,0]
7338 ; AVX512DQ-NEXT: vmovdqa64 %zmm10, %zmm8
7339 ; AVX512DQ-NEXT: vpermt2d %zmm11, %zmm7, %zmm8
7340 ; AVX512DQ-NEXT: movw $992, %di # imm = 0x3E0
7341 ; AVX512DQ-NEXT: kmovw %edi, %k1
7342 ; AVX512DQ-NEXT: vmovdqa32 %zmm17, %zmm8 {%k1}
7343 ; AVX512DQ-NEXT: vbroadcasti32x4 {{.*#+}} zmm17 = [20,27,6,13,20,27,6,13,20,27,6,13,20,27,6,13]
7344 ; AVX512DQ-NEXT: # zmm17 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
7345 ; AVX512DQ-NEXT: vmovdqa64 %zmm9, %zmm18
7346 ; AVX512DQ-NEXT: vpermt2d %zmm15, %zmm17, %zmm18
7347 ; AVX512DQ-NEXT: vbroadcasti64x4 {{.*#+}} zmm19 = [0,0,10,11,12,13,18,25,0,0,10,11,12,13,18,25]
7348 ; AVX512DQ-NEXT: # zmm19 = mem[0,1,2,3,0,1,2,3]
7349 ; AVX512DQ-NEXT: vpermt2d %zmm13, %zmm19, %zmm18
7350 ; AVX512DQ-NEXT: movb $-32, %dil
7351 ; AVX512DQ-NEXT: kmovw %edi, %k2
7352 ; AVX512DQ-NEXT: vmovdqa64 %zmm18, %zmm8 {%k2}
7353 ; AVX512DQ-NEXT: vpermi2d %zmm6, %zmm3, %zmm17
7354 ; AVX512DQ-NEXT: vpermt2d %zmm5, %zmm19, %zmm17
7355 ; AVX512DQ-NEXT: vpermi2d %zmm4, %zmm2, %zmm16
7356 ; AVX512DQ-NEXT: vpermi2d %zmm1, %zmm0, %zmm7
7357 ; AVX512DQ-NEXT: vmovdqa32 %zmm16, %zmm7 {%k1}
7358 ; AVX512DQ-NEXT: vmovdqa64 %zmm17, %zmm7 {%k2}
7359 ; AVX512DQ-NEXT: vbroadcasti32x4 {{.*#+}} zmm18 = [25,4,11,18,25,4,11,18,25,4,11,18,25,4,11,18]
7360 ; AVX512DQ-NEXT: # zmm18 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
7361 ; AVX512DQ-NEXT: vmovdqa64 %zmm14, %zmm19
7362 ; AVX512DQ-NEXT: vpermt2d %zmm12, %zmm18, %zmm19
7363 ; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} ymm16 = [1,8,15,22,29,0,0,0]
7364 ; AVX512DQ-NEXT: vmovdqa64 %zmm10, %zmm17
7365 ; AVX512DQ-NEXT: vpermt2d %zmm11, %zmm16, %zmm17
7366 ; AVX512DQ-NEXT: movw $480, %di # imm = 0x1E0
7367 ; AVX512DQ-NEXT: kmovw %edi, %k2
7368 ; AVX512DQ-NEXT: vmovdqa32 %zmm19, %zmm17 {%k2}
7369 ; AVX512DQ-NEXT: vbroadcasti64x4 {{.*#+}} zmm19 = [0,0,7,14,21,28,0,0,0,0,7,14,21,28,0,0]
7370 ; AVX512DQ-NEXT: # zmm19 = mem[0,1,2,3,0,1,2,3]
7371 ; AVX512DQ-NEXT: vmovdqa64 %zmm9, %zmm20
7372 ; AVX512DQ-NEXT: vpermt2d %zmm15, %zmm19, %zmm20
7373 ; AVX512DQ-NEXT: vbroadcasti64x4 {{.*#+}} zmm21 = [0,9,10,11,12,13,19,26,0,9,10,11,12,13,19,26]
7374 ; AVX512DQ-NEXT: # zmm21 = mem[0,1,2,3,0,1,2,3]
7375 ; AVX512DQ-NEXT: vpermt2d %zmm13, %zmm21, %zmm20
7376 ; AVX512DQ-NEXT: movw $-512, %di # imm = 0xFE00
7377 ; AVX512DQ-NEXT: kmovw %edi, %k1
7378 ; AVX512DQ-NEXT: vmovdqa32 %zmm20, %zmm17 {%k1}
7379 ; AVX512DQ-NEXT: vpermi2d %zmm6, %zmm3, %zmm19
7380 ; AVX512DQ-NEXT: vpermt2d %zmm5, %zmm21, %zmm19
7381 ; AVX512DQ-NEXT: vmovdqa64 %zmm4, %zmm20
7382 ; AVX512DQ-NEXT: vpermt2d %zmm2, %zmm18, %zmm20
7383 ; AVX512DQ-NEXT: vpermi2d %zmm1, %zmm0, %zmm16
7384 ; AVX512DQ-NEXT: vmovdqa32 %zmm20, %zmm16 {%k2}
7385 ; AVX512DQ-NEXT: vmovdqa32 %zmm19, %zmm16 {%k1}
7386 ; AVX512DQ-NEXT: vbroadcasti32x4 {{.*#+}} zmm21 = [26,5,12,19,26,5,12,19,26,5,12,19,26,5,12,19]
7387 ; AVX512DQ-NEXT: # zmm21 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
7388 ; AVX512DQ-NEXT: vmovdqa64 %zmm14, %zmm22
7389 ; AVX512DQ-NEXT: vpermt2d %zmm12, %zmm21, %zmm22
7390 ; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} ymm19 = [18,25,0,7,14,0,0,0]
7391 ; AVX512DQ-NEXT: vmovdqa64 %zmm11, %zmm20
7392 ; AVX512DQ-NEXT: vpermt2d %zmm10, %zmm19, %zmm20
7393 ; AVX512DQ-NEXT: vmovdqa32 %zmm22, %zmm20 {%k2}
7394 ; AVX512DQ-NEXT: vbroadcasti64x4 {{.*#+}} zmm22 = [0,1,8,15,22,29,0,0,0,1,8,15,22,29,0,0]
7395 ; AVX512DQ-NEXT: # zmm22 = mem[0,1,2,3,0,1,2,3]
7396 ; AVX512DQ-NEXT: vmovdqa64 %zmm9, %zmm23
7397 ; AVX512DQ-NEXT: vpermt2d %zmm15, %zmm22, %zmm23
7398 ; AVX512DQ-NEXT: vbroadcasti64x4 {{.*#+}} zmm24 = [0,9,10,11,12,13,20,27,0,9,10,11,12,13,20,27]
7399 ; AVX512DQ-NEXT: # zmm24 = mem[0,1,2,3,0,1,2,3]
7400 ; AVX512DQ-NEXT: vpermt2d %zmm13, %zmm24, %zmm23
7401 ; AVX512DQ-NEXT: vmovdqa32 %zmm23, %zmm20 {%k1}
7402 ; AVX512DQ-NEXT: vpermi2d %zmm6, %zmm3, %zmm22
7403 ; AVX512DQ-NEXT: vpermt2d %zmm5, %zmm24, %zmm22
7404 ; AVX512DQ-NEXT: vmovdqa64 %zmm4, %zmm23
7405 ; AVX512DQ-NEXT: vpermt2d %zmm2, %zmm21, %zmm23
7406 ; AVX512DQ-NEXT: vpermi2d %zmm0, %zmm1, %zmm19
7407 ; AVX512DQ-NEXT: vmovdqa32 %zmm23, %zmm19 {%k2}
7408 ; AVX512DQ-NEXT: vmovdqa32 %zmm22, %zmm19 {%k1}
7409 ; AVX512DQ-NEXT: vbroadcasti32x4 {{.*#+}} zmm24 = [27,6,13,20,27,6,13,20,27,6,13,20,27,6,13,20]
7410 ; AVX512DQ-NEXT: # zmm24 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
7411 ; AVX512DQ-NEXT: vmovdqa64 %zmm14, %zmm25
7412 ; AVX512DQ-NEXT: vpermt2d %zmm12, %zmm24, %zmm25
7413 ; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} ymm22 = [19,26,1,8,15,0,0,0]
7414 ; AVX512DQ-NEXT: vmovdqa64 %zmm11, %zmm23
7415 ; AVX512DQ-NEXT: vpermt2d %zmm10, %zmm22, %zmm23
7416 ; AVX512DQ-NEXT: vmovdqa32 %zmm25, %zmm23 {%k2}
7417 ; AVX512DQ-NEXT: vbroadcasti64x4 {{.*#+}} zmm25 = [0,18,25,0,7,14,0,0,0,18,25,0,7,14,0,0]
7418 ; AVX512DQ-NEXT: # zmm25 = mem[0,1,2,3,0,1,2,3]
7419 ; AVX512DQ-NEXT: vmovdqa64 %zmm15, %zmm26
7420 ; AVX512DQ-NEXT: vpermt2d %zmm9, %zmm25, %zmm26
7421 ; AVX512DQ-NEXT: vbroadcasti64x4 {{.*#+}} zmm27 = [0,9,10,11,12,13,21,28,0,9,10,11,12,13,21,28]
7422 ; AVX512DQ-NEXT: # zmm27 = mem[0,1,2,3,0,1,2,3]
7423 ; AVX512DQ-NEXT: vpermt2d %zmm13, %zmm27, %zmm26
7424 ; AVX512DQ-NEXT: vmovdqa32 %zmm26, %zmm23 {%k1}
7425 ; AVX512DQ-NEXT: vpermi2d %zmm3, %zmm6, %zmm25
7426 ; AVX512DQ-NEXT: vpermt2d %zmm5, %zmm27, %zmm25
7427 ; AVX512DQ-NEXT: vpermi2d %zmm2, %zmm4, %zmm24
7428 ; AVX512DQ-NEXT: vpermi2d %zmm0, %zmm1, %zmm22
7429 ; AVX512DQ-NEXT: vmovdqa32 %zmm24, %zmm22 {%k2}
7430 ; AVX512DQ-NEXT: vmovdqa32 %zmm25, %zmm22 {%k1}
7431 ; AVX512DQ-NEXT: vbroadcasti64x4 {{.*#+}} zmm26 = [0,19,26,1,8,15,0,0,0,19,26,1,8,15,0,0]
7432 ; AVX512DQ-NEXT: # zmm26 = mem[0,1,2,3,0,1,2,3]
7433 ; AVX512DQ-NEXT: vmovdqa64 %zmm15, %zmm25
7434 ; AVX512DQ-NEXT: vpermt2d %zmm9, %zmm26, %zmm25
7435 ; AVX512DQ-NEXT: vbroadcasti64x4 {{.*#+}} zmm27 = [0,9,10,11,12,13,22,29,0,9,10,11,12,13,22,29]
7436 ; AVX512DQ-NEXT: # zmm27 = mem[0,1,2,3,0,1,2,3]
7437 ; AVX512DQ-NEXT: vpermt2d %zmm13, %zmm27, %zmm25
7438 ; AVX512DQ-NEXT: vbroadcasti64x4 {{.*#+}} zmm28 = [28,0,0,0,0,7,14,21,28,0,0,0,0,7,14,21]
7439 ; AVX512DQ-NEXT: # zmm28 = mem[0,1,2,3,0,1,2,3]
7440 ; AVX512DQ-NEXT: vmovdqa64 %zmm14, %zmm24
7441 ; AVX512DQ-NEXT: vpermt2d %zmm12, %zmm28, %zmm24
7442 ; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} xmm29 = [4,11,18,25]
7443 ; AVX512DQ-NEXT: vmovdqa64 %zmm10, %zmm30
7444 ; AVX512DQ-NEXT: vpermt2d %zmm11, %zmm29, %zmm30
7445 ; AVX512DQ-NEXT: vinserti32x4 $0, %xmm30, %zmm24, %zmm24
7446 ; AVX512DQ-NEXT: vmovdqa32 %zmm25, %zmm24 {%k1}
7447 ; AVX512DQ-NEXT: vpermi2d %zmm3, %zmm6, %zmm26
7448 ; AVX512DQ-NEXT: vpermt2d %zmm5, %zmm27, %zmm26
7449 ; AVX512DQ-NEXT: vpermi2d %zmm2, %zmm4, %zmm28
7450 ; AVX512DQ-NEXT: vpermi2d %zmm1, %zmm0, %zmm29
7451 ; AVX512DQ-NEXT: vinserti32x4 $0, %xmm29, %zmm28, %zmm25
7452 ; AVX512DQ-NEXT: vmovdqa32 %zmm26, %zmm25 {%k1}
7453 ; AVX512DQ-NEXT: vmovdqa64 %zmm9, %zmm26
7454 ; AVX512DQ-NEXT: vpermt2d %zmm15, %zmm18, %zmm26
7455 ; AVX512DQ-NEXT: vbroadcasti64x4 {{.*#+}} zmm27 = [0,9,10,11,12,16,23,30,0,9,10,11,12,16,23,30]
7456 ; AVX512DQ-NEXT: # zmm27 = mem[0,1,2,3,0,1,2,3]
7457 ; AVX512DQ-NEXT: vpermt2d %zmm13, %zmm27, %zmm26
7458 ; AVX512DQ-NEXT: vbroadcasti64x4 {{.*#+}} zmm28 = [29,0,0,0,1,8,15,22,29,0,0,0,1,8,15,22]
7459 ; AVX512DQ-NEXT: # zmm28 = mem[0,1,2,3,0,1,2,3]
7460 ; AVX512DQ-NEXT: vmovdqa64 %zmm14, %zmm29
7461 ; AVX512DQ-NEXT: vpermt2d %zmm12, %zmm28, %zmm29
7462 ; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} xmm30 = [5,12,19,26]
7463 ; AVX512DQ-NEXT: vmovdqa64 %zmm10, %zmm31
7464 ; AVX512DQ-NEXT: vpermt2d %zmm11, %zmm30, %zmm31
7465 ; AVX512DQ-NEXT: vinserti32x4 $0, %xmm31, %zmm29, %zmm29
7466 ; AVX512DQ-NEXT: vmovdqa32 %zmm26, %zmm29 {%k1}
7467 ; AVX512DQ-NEXT: vpermi2d %zmm6, %zmm3, %zmm18
7468 ; AVX512DQ-NEXT: vpermt2d %zmm5, %zmm27, %zmm18
7469 ; AVX512DQ-NEXT: vpermi2d %zmm2, %zmm4, %zmm28
7470 ; AVX512DQ-NEXT: vpermi2d %zmm1, %zmm0, %zmm30
7471 ; AVX512DQ-NEXT: vinserti32x4 $0, %xmm30, %zmm28, %zmm26
7472 ; AVX512DQ-NEXT: vmovdqa32 %zmm18, %zmm26 {%k1}
7473 ; AVX512DQ-NEXT: vpermt2d %zmm15, %zmm21, %zmm9
7474 ; AVX512DQ-NEXT: vbroadcasti64x4 {{.*#+}} zmm15 = [0,9,10,11,12,17,24,31,0,9,10,11,12,17,24,31]
7475 ; AVX512DQ-NEXT: # zmm15 = mem[0,1,2,3,0,1,2,3]
7476 ; AVX512DQ-NEXT: vpermt2d %zmm13, %zmm15, %zmm9
7477 ; AVX512DQ-NEXT: vbroadcasti64x4 {{.*#+}} zmm13 = [14,0,0,0,18,25,0,7,14,0,0,0,18,25,0,7]
7478 ; AVX512DQ-NEXT: # zmm13 = mem[0,1,2,3,0,1,2,3]
7479 ; AVX512DQ-NEXT: vpermt2d %zmm14, %zmm13, %zmm12
7480 ; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} xmm14 = [6,13,20,27]
7481 ; AVX512DQ-NEXT: vpermt2d %zmm11, %zmm14, %zmm10
7482 ; AVX512DQ-NEXT: vinserti32x4 $0, %xmm10, %zmm12, %zmm10
7483 ; AVX512DQ-NEXT: vmovdqa32 %zmm9, %zmm10 {%k1}
7484 ; AVX512DQ-NEXT: vpermt2d %zmm6, %zmm21, %zmm3
7485 ; AVX512DQ-NEXT: vpermt2d %zmm5, %zmm15, %zmm3
7486 ; AVX512DQ-NEXT: vpermt2d %zmm4, %zmm13, %zmm2
7487 ; AVX512DQ-NEXT: vpermt2d %zmm1, %zmm14, %zmm0
7488 ; AVX512DQ-NEXT: vinserti32x4 $0, %xmm0, %zmm2, %zmm0
7489 ; AVX512DQ-NEXT: vmovdqa32 %zmm3, %zmm0 {%k1}
7490 ; AVX512DQ-NEXT: vmovdqa64 %zmm7, 64(%rsi)
7491 ; AVX512DQ-NEXT: vmovdqa64 %zmm8, (%rsi)
7492 ; AVX512DQ-NEXT: vmovdqa64 %zmm16, 64(%rdx)
7493 ; AVX512DQ-NEXT: vmovdqa64 %zmm17, (%rdx)
7494 ; AVX512DQ-NEXT: vmovdqa64 %zmm19, 64(%rcx)
7495 ; AVX512DQ-NEXT: vmovdqa64 %zmm20, (%rcx)
7496 ; AVX512DQ-NEXT: vmovdqa64 %zmm22, 64(%r8)
7497 ; AVX512DQ-NEXT: vmovdqa64 %zmm23, (%r8)
7498 ; AVX512DQ-NEXT: vmovdqa64 %zmm25, 64(%r9)
7499 ; AVX512DQ-NEXT: vmovdqa64 %zmm24, (%r9)
7500 ; AVX512DQ-NEXT: vmovdqa64 %zmm26, 64(%r10)
7501 ; AVX512DQ-NEXT: vmovdqa64 %zmm29, (%r10)
7502 ; AVX512DQ-NEXT: vmovdqa64 %zmm0, 64(%rax)
7503 ; AVX512DQ-NEXT: vmovdqa64 %zmm10, (%rax)
7504 ; AVX512DQ-NEXT: vzeroupper
7505 ; AVX512DQ-NEXT: retq
7507 ; AVX512DQ-FCP-LABEL: load_i32_stride7_vf32:
7508 ; AVX512DQ-FCP: # %bb.0:
7509 ; AVX512DQ-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
7510 ; AVX512DQ-FCP-NEXT: movq {{[0-9]+}}(%rsp), %r10
7511 ; AVX512DQ-FCP-NEXT: vmovdqa64 512(%rdi), %zmm1
7512 ; AVX512DQ-FCP-NEXT: vmovdqa64 448(%rdi), %zmm0
7513 ; AVX512DQ-FCP-NEXT: vmovdqa64 576(%rdi), %zmm4
7514 ; AVX512DQ-FCP-NEXT: vmovdqa64 640(%rdi), %zmm2
7515 ; AVX512DQ-FCP-NEXT: vmovdqa64 832(%rdi), %zmm5
7516 ; AVX512DQ-FCP-NEXT: vmovdqa64 768(%rdi), %zmm6
7517 ; AVX512DQ-FCP-NEXT: vmovdqa64 704(%rdi), %zmm3
7518 ; AVX512DQ-FCP-NEXT: vmovdqa64 384(%rdi), %zmm13
7519 ; AVX512DQ-FCP-NEXT: vmovdqa64 320(%rdi), %zmm15
7520 ; AVX512DQ-FCP-NEXT: vmovdqa64 256(%rdi), %zmm9
7521 ; AVX512DQ-FCP-NEXT: vmovdqa64 (%rdi), %zmm10
7522 ; AVX512DQ-FCP-NEXT: vmovdqa64 64(%rdi), %zmm11
7523 ; AVX512DQ-FCP-NEXT: vmovdqa64 128(%rdi), %zmm14
7524 ; AVX512DQ-FCP-NEXT: vmovdqa64 192(%rdi), %zmm12
7525 ; AVX512DQ-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm16 = [8,15,0,0,0,19,26,1,8,15,0,0,0,19,26,1]
7526 ; AVX512DQ-FCP-NEXT: # zmm16 = mem[0,1,2,3,0,1,2,3]
7527 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm12, %zmm17
7528 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm14, %zmm16, %zmm17
7529 ; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm7 = [0,7,14,21,28,0,0,0]
7530 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm10, %zmm8
7531 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm11, %zmm7, %zmm8
7532 ; AVX512DQ-FCP-NEXT: movw $992, %di # imm = 0x3E0
7533 ; AVX512DQ-FCP-NEXT: kmovw %edi, %k1
7534 ; AVX512DQ-FCP-NEXT: vmovdqa32 %zmm17, %zmm8 {%k1}
7535 ; AVX512DQ-FCP-NEXT: vbroadcasti32x4 {{.*#+}} zmm17 = [20,27,6,13,20,27,6,13,20,27,6,13,20,27,6,13]
7536 ; AVX512DQ-FCP-NEXT: # zmm17 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
7537 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm9, %zmm18
7538 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm15, %zmm17, %zmm18
7539 ; AVX512DQ-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm19 = [0,0,10,11,12,13,18,25,0,0,10,11,12,13,18,25]
7540 ; AVX512DQ-FCP-NEXT: # zmm19 = mem[0,1,2,3,0,1,2,3]
7541 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm13, %zmm19, %zmm18
7542 ; AVX512DQ-FCP-NEXT: movb $-32, %dil
7543 ; AVX512DQ-FCP-NEXT: kmovw %edi, %k2
7544 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm18, %zmm8 {%k2}
7545 ; AVX512DQ-FCP-NEXT: vpermi2d %zmm6, %zmm3, %zmm17
7546 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm5, %zmm19, %zmm17
7547 ; AVX512DQ-FCP-NEXT: vpermi2d %zmm4, %zmm2, %zmm16
7548 ; AVX512DQ-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm7
7549 ; AVX512DQ-FCP-NEXT: vmovdqa32 %zmm16, %zmm7 {%k1}
7550 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm17, %zmm7 {%k2}
7551 ; AVX512DQ-FCP-NEXT: vbroadcasti32x4 {{.*#+}} zmm18 = [25,4,11,18,25,4,11,18,25,4,11,18,25,4,11,18]
7552 ; AVX512DQ-FCP-NEXT: # zmm18 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
7553 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm14, %zmm19
7554 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm12, %zmm18, %zmm19
7555 ; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm16 = [1,8,15,22,29,0,0,0]
7556 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm10, %zmm17
7557 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm11, %zmm16, %zmm17
7558 ; AVX512DQ-FCP-NEXT: movw $480, %di # imm = 0x1E0
7559 ; AVX512DQ-FCP-NEXT: kmovw %edi, %k2
7560 ; AVX512DQ-FCP-NEXT: vmovdqa32 %zmm19, %zmm17 {%k2}
7561 ; AVX512DQ-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm19 = [0,0,7,14,21,28,0,0,0,0,7,14,21,28,0,0]
7562 ; AVX512DQ-FCP-NEXT: # zmm19 = mem[0,1,2,3,0,1,2,3]
7563 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm9, %zmm20
7564 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm15, %zmm19, %zmm20
7565 ; AVX512DQ-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm21 = [0,9,10,11,12,13,19,26,0,9,10,11,12,13,19,26]
7566 ; AVX512DQ-FCP-NEXT: # zmm21 = mem[0,1,2,3,0,1,2,3]
7567 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm13, %zmm21, %zmm20
7568 ; AVX512DQ-FCP-NEXT: movw $-512, %di # imm = 0xFE00
7569 ; AVX512DQ-FCP-NEXT: kmovw %edi, %k1
7570 ; AVX512DQ-FCP-NEXT: vmovdqa32 %zmm20, %zmm17 {%k1}
7571 ; AVX512DQ-FCP-NEXT: vpermi2d %zmm6, %zmm3, %zmm19
7572 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm5, %zmm21, %zmm19
7573 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm4, %zmm20
7574 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm2, %zmm18, %zmm20
7575 ; AVX512DQ-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm16
7576 ; AVX512DQ-FCP-NEXT: vmovdqa32 %zmm20, %zmm16 {%k2}
7577 ; AVX512DQ-FCP-NEXT: vmovdqa32 %zmm19, %zmm16 {%k1}
7578 ; AVX512DQ-FCP-NEXT: vbroadcasti32x4 {{.*#+}} zmm21 = [26,5,12,19,26,5,12,19,26,5,12,19,26,5,12,19]
7579 ; AVX512DQ-FCP-NEXT: # zmm21 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
7580 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm14, %zmm22
7581 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm12, %zmm21, %zmm22
7582 ; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm19 = [18,25,0,7,14,0,0,0]
7583 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm11, %zmm20
7584 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm10, %zmm19, %zmm20
7585 ; AVX512DQ-FCP-NEXT: vmovdqa32 %zmm22, %zmm20 {%k2}
7586 ; AVX512DQ-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm22 = [0,1,8,15,22,29,0,0,0,1,8,15,22,29,0,0]
7587 ; AVX512DQ-FCP-NEXT: # zmm22 = mem[0,1,2,3,0,1,2,3]
7588 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm9, %zmm23
7589 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm15, %zmm22, %zmm23
7590 ; AVX512DQ-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm24 = [0,9,10,11,12,13,20,27,0,9,10,11,12,13,20,27]
7591 ; AVX512DQ-FCP-NEXT: # zmm24 = mem[0,1,2,3,0,1,2,3]
7592 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm13, %zmm24, %zmm23
7593 ; AVX512DQ-FCP-NEXT: vmovdqa32 %zmm23, %zmm20 {%k1}
7594 ; AVX512DQ-FCP-NEXT: vpermi2d %zmm6, %zmm3, %zmm22
7595 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm5, %zmm24, %zmm22
7596 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm4, %zmm23
7597 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm2, %zmm21, %zmm23
7598 ; AVX512DQ-FCP-NEXT: vpermi2d %zmm0, %zmm1, %zmm19
7599 ; AVX512DQ-FCP-NEXT: vmovdqa32 %zmm23, %zmm19 {%k2}
7600 ; AVX512DQ-FCP-NEXT: vmovdqa32 %zmm22, %zmm19 {%k1}
7601 ; AVX512DQ-FCP-NEXT: vbroadcasti32x4 {{.*#+}} zmm24 = [27,6,13,20,27,6,13,20,27,6,13,20,27,6,13,20]
7602 ; AVX512DQ-FCP-NEXT: # zmm24 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
7603 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm14, %zmm25
7604 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm12, %zmm24, %zmm25
7605 ; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm22 = [19,26,1,8,15,0,0,0]
7606 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm11, %zmm23
7607 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm10, %zmm22, %zmm23
7608 ; AVX512DQ-FCP-NEXT: vmovdqa32 %zmm25, %zmm23 {%k2}
7609 ; AVX512DQ-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm25 = [0,18,25,0,7,14,0,0,0,18,25,0,7,14,0,0]
7610 ; AVX512DQ-FCP-NEXT: # zmm25 = mem[0,1,2,3,0,1,2,3]
7611 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm15, %zmm26
7612 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm9, %zmm25, %zmm26
7613 ; AVX512DQ-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm27 = [0,9,10,11,12,13,21,28,0,9,10,11,12,13,21,28]
7614 ; AVX512DQ-FCP-NEXT: # zmm27 = mem[0,1,2,3,0,1,2,3]
7615 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm13, %zmm27, %zmm26
7616 ; AVX512DQ-FCP-NEXT: vmovdqa32 %zmm26, %zmm23 {%k1}
7617 ; AVX512DQ-FCP-NEXT: vpermi2d %zmm3, %zmm6, %zmm25
7618 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm5, %zmm27, %zmm25
7619 ; AVX512DQ-FCP-NEXT: vpermi2d %zmm2, %zmm4, %zmm24
7620 ; AVX512DQ-FCP-NEXT: vpermi2d %zmm0, %zmm1, %zmm22
7621 ; AVX512DQ-FCP-NEXT: vmovdqa32 %zmm24, %zmm22 {%k2}
7622 ; AVX512DQ-FCP-NEXT: vmovdqa32 %zmm25, %zmm22 {%k1}
7623 ; AVX512DQ-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm26 = [0,19,26,1,8,15,0,0,0,19,26,1,8,15,0,0]
7624 ; AVX512DQ-FCP-NEXT: # zmm26 = mem[0,1,2,3,0,1,2,3]
7625 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm15, %zmm25
7626 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm9, %zmm26, %zmm25
7627 ; AVX512DQ-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm27 = [0,9,10,11,12,13,22,29,0,9,10,11,12,13,22,29]
7628 ; AVX512DQ-FCP-NEXT: # zmm27 = mem[0,1,2,3,0,1,2,3]
7629 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm13, %zmm27, %zmm25
7630 ; AVX512DQ-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm28 = [28,0,0,0,0,7,14,21,28,0,0,0,0,7,14,21]
7631 ; AVX512DQ-FCP-NEXT: # zmm28 = mem[0,1,2,3,0,1,2,3]
7632 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm14, %zmm24
7633 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm12, %zmm28, %zmm24
7634 ; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} xmm29 = [4,11,18,25]
7635 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm10, %zmm30
7636 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm11, %zmm29, %zmm30
7637 ; AVX512DQ-FCP-NEXT: vinserti32x4 $0, %xmm30, %zmm24, %zmm24
7638 ; AVX512DQ-FCP-NEXT: vmovdqa32 %zmm25, %zmm24 {%k1}
7639 ; AVX512DQ-FCP-NEXT: vpermi2d %zmm3, %zmm6, %zmm26
7640 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm5, %zmm27, %zmm26
7641 ; AVX512DQ-FCP-NEXT: vpermi2d %zmm2, %zmm4, %zmm28
7642 ; AVX512DQ-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm29
7643 ; AVX512DQ-FCP-NEXT: vinserti32x4 $0, %xmm29, %zmm28, %zmm25
7644 ; AVX512DQ-FCP-NEXT: vmovdqa32 %zmm26, %zmm25 {%k1}
7645 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm9, %zmm26
7646 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm15, %zmm18, %zmm26
7647 ; AVX512DQ-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm27 = [0,9,10,11,12,16,23,30,0,9,10,11,12,16,23,30]
7648 ; AVX512DQ-FCP-NEXT: # zmm27 = mem[0,1,2,3,0,1,2,3]
7649 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm13, %zmm27, %zmm26
7650 ; AVX512DQ-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm28 = [29,0,0,0,1,8,15,22,29,0,0,0,1,8,15,22]
7651 ; AVX512DQ-FCP-NEXT: # zmm28 = mem[0,1,2,3,0,1,2,3]
7652 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm14, %zmm29
7653 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm12, %zmm28, %zmm29
7654 ; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} xmm30 = [5,12,19,26]
7655 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm10, %zmm31
7656 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm11, %zmm30, %zmm31
7657 ; AVX512DQ-FCP-NEXT: vinserti32x4 $0, %xmm31, %zmm29, %zmm29
7658 ; AVX512DQ-FCP-NEXT: vmovdqa32 %zmm26, %zmm29 {%k1}
7659 ; AVX512DQ-FCP-NEXT: vpermi2d %zmm6, %zmm3, %zmm18
7660 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm5, %zmm27, %zmm18
7661 ; AVX512DQ-FCP-NEXT: vpermi2d %zmm2, %zmm4, %zmm28
7662 ; AVX512DQ-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm30
7663 ; AVX512DQ-FCP-NEXT: vinserti32x4 $0, %xmm30, %zmm28, %zmm26
7664 ; AVX512DQ-FCP-NEXT: vmovdqa32 %zmm18, %zmm26 {%k1}
7665 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm15, %zmm21, %zmm9
7666 ; AVX512DQ-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm15 = [0,9,10,11,12,17,24,31,0,9,10,11,12,17,24,31]
7667 ; AVX512DQ-FCP-NEXT: # zmm15 = mem[0,1,2,3,0,1,2,3]
7668 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm13, %zmm15, %zmm9
7669 ; AVX512DQ-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm13 = [14,0,0,0,18,25,0,7,14,0,0,0,18,25,0,7]
7670 ; AVX512DQ-FCP-NEXT: # zmm13 = mem[0,1,2,3,0,1,2,3]
7671 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm14, %zmm13, %zmm12
7672 ; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} xmm14 = [6,13,20,27]
7673 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm11, %zmm14, %zmm10
7674 ; AVX512DQ-FCP-NEXT: vinserti32x4 $0, %xmm10, %zmm12, %zmm10
7675 ; AVX512DQ-FCP-NEXT: vmovdqa32 %zmm9, %zmm10 {%k1}
7676 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm6, %zmm21, %zmm3
7677 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm5, %zmm15, %zmm3
7678 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm4, %zmm13, %zmm2
7679 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm1, %zmm14, %zmm0
7680 ; AVX512DQ-FCP-NEXT: vinserti32x4 $0, %xmm0, %zmm2, %zmm0
7681 ; AVX512DQ-FCP-NEXT: vmovdqa32 %zmm3, %zmm0 {%k1}
7682 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm7, 64(%rsi)
7683 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm8, (%rsi)
7684 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm16, 64(%rdx)
7685 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm17, (%rdx)
7686 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm19, 64(%rcx)
7687 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm20, (%rcx)
7688 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm22, 64(%r8)
7689 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm23, (%r8)
7690 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm25, 64(%r9)
7691 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm24, (%r9)
7692 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm26, 64(%r10)
7693 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm29, (%r10)
7694 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm0, 64(%rax)
7695 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm10, (%rax)
7696 ; AVX512DQ-FCP-NEXT: vzeroupper
7697 ; AVX512DQ-FCP-NEXT: retq
7699 ; AVX512BW-LABEL: load_i32_stride7_vf32:
7700 ; AVX512BW: # %bb.0:
7701 ; AVX512BW-NEXT: movq {{[0-9]+}}(%rsp), %rax
7702 ; AVX512BW-NEXT: movq {{[0-9]+}}(%rsp), %r10
7703 ; AVX512BW-NEXT: vmovdqa64 512(%rdi), %zmm1
7704 ; AVX512BW-NEXT: vmovdqa64 448(%rdi), %zmm0
7705 ; AVX512BW-NEXT: vmovdqa64 576(%rdi), %zmm4
7706 ; AVX512BW-NEXT: vmovdqa64 640(%rdi), %zmm2
7707 ; AVX512BW-NEXT: vmovdqa64 832(%rdi), %zmm5
7708 ; AVX512BW-NEXT: vmovdqa64 768(%rdi), %zmm6
7709 ; AVX512BW-NEXT: vmovdqa64 704(%rdi), %zmm3
7710 ; AVX512BW-NEXT: vmovdqa64 384(%rdi), %zmm13
7711 ; AVX512BW-NEXT: vmovdqa64 320(%rdi), %zmm15
7712 ; AVX512BW-NEXT: vmovdqa64 256(%rdi), %zmm9
7713 ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm10
7714 ; AVX512BW-NEXT: vmovdqa64 64(%rdi), %zmm11
7715 ; AVX512BW-NEXT: vmovdqa64 128(%rdi), %zmm14
7716 ; AVX512BW-NEXT: vmovdqa64 192(%rdi), %zmm12
7717 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm16 = [8,15,0,0,0,19,26,1,8,15,0,0,0,19,26,1]
7718 ; AVX512BW-NEXT: # zmm16 = mem[0,1,2,3,0,1,2,3]
7719 ; AVX512BW-NEXT: vmovdqa64 %zmm12, %zmm17
7720 ; AVX512BW-NEXT: vpermt2d %zmm14, %zmm16, %zmm17
7721 ; AVX512BW-NEXT: vpmovsxbd {{.*#+}} ymm7 = [0,7,14,21,28,0,0,0]
7722 ; AVX512BW-NEXT: vmovdqa64 %zmm10, %zmm8
7723 ; AVX512BW-NEXT: vpermt2d %zmm11, %zmm7, %zmm8
7724 ; AVX512BW-NEXT: movw $992, %di # imm = 0x3E0
7725 ; AVX512BW-NEXT: kmovd %edi, %k1
7726 ; AVX512BW-NEXT: vmovdqa32 %zmm17, %zmm8 {%k1}
7727 ; AVX512BW-NEXT: vbroadcasti32x4 {{.*#+}} zmm17 = [20,27,6,13,20,27,6,13,20,27,6,13,20,27,6,13]
7728 ; AVX512BW-NEXT: # zmm17 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
7729 ; AVX512BW-NEXT: vmovdqa64 %zmm9, %zmm18
7730 ; AVX512BW-NEXT: vpermt2d %zmm15, %zmm17, %zmm18
7731 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm19 = [0,0,10,11,12,13,18,25,0,0,10,11,12,13,18,25]
7732 ; AVX512BW-NEXT: # zmm19 = mem[0,1,2,3,0,1,2,3]
7733 ; AVX512BW-NEXT: vpermt2d %zmm13, %zmm19, %zmm18
7734 ; AVX512BW-NEXT: movb $-32, %dil
7735 ; AVX512BW-NEXT: kmovd %edi, %k2
7736 ; AVX512BW-NEXT: vmovdqa64 %zmm18, %zmm8 {%k2}
7737 ; AVX512BW-NEXT: vpermi2d %zmm6, %zmm3, %zmm17
7738 ; AVX512BW-NEXT: vpermt2d %zmm5, %zmm19, %zmm17
7739 ; AVX512BW-NEXT: vpermi2d %zmm4, %zmm2, %zmm16
7740 ; AVX512BW-NEXT: vpermi2d %zmm1, %zmm0, %zmm7
7741 ; AVX512BW-NEXT: vmovdqa32 %zmm16, %zmm7 {%k1}
7742 ; AVX512BW-NEXT: vmovdqa64 %zmm17, %zmm7 {%k2}
7743 ; AVX512BW-NEXT: vbroadcasti32x4 {{.*#+}} zmm18 = [25,4,11,18,25,4,11,18,25,4,11,18,25,4,11,18]
7744 ; AVX512BW-NEXT: # zmm18 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
7745 ; AVX512BW-NEXT: vmovdqa64 %zmm14, %zmm19
7746 ; AVX512BW-NEXT: vpermt2d %zmm12, %zmm18, %zmm19
7747 ; AVX512BW-NEXT: vpmovsxbd {{.*#+}} ymm16 = [1,8,15,22,29,0,0,0]
7748 ; AVX512BW-NEXT: vmovdqa64 %zmm10, %zmm17
7749 ; AVX512BW-NEXT: vpermt2d %zmm11, %zmm16, %zmm17
7750 ; AVX512BW-NEXT: movw $480, %di # imm = 0x1E0
7751 ; AVX512BW-NEXT: kmovd %edi, %k2
7752 ; AVX512BW-NEXT: vmovdqa32 %zmm19, %zmm17 {%k2}
7753 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm19 = [0,0,7,14,21,28,0,0,0,0,7,14,21,28,0,0]
7754 ; AVX512BW-NEXT: # zmm19 = mem[0,1,2,3,0,1,2,3]
7755 ; AVX512BW-NEXT: vmovdqa64 %zmm9, %zmm20
7756 ; AVX512BW-NEXT: vpermt2d %zmm15, %zmm19, %zmm20
7757 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm21 = [0,9,10,11,12,13,19,26,0,9,10,11,12,13,19,26]
7758 ; AVX512BW-NEXT: # zmm21 = mem[0,1,2,3,0,1,2,3]
7759 ; AVX512BW-NEXT: vpermt2d %zmm13, %zmm21, %zmm20
7760 ; AVX512BW-NEXT: movw $-512, %di # imm = 0xFE00
7761 ; AVX512BW-NEXT: kmovd %edi, %k1
7762 ; AVX512BW-NEXT: vmovdqa32 %zmm20, %zmm17 {%k1}
7763 ; AVX512BW-NEXT: vpermi2d %zmm6, %zmm3, %zmm19
7764 ; AVX512BW-NEXT: vpermt2d %zmm5, %zmm21, %zmm19
7765 ; AVX512BW-NEXT: vmovdqa64 %zmm4, %zmm20
7766 ; AVX512BW-NEXT: vpermt2d %zmm2, %zmm18, %zmm20
7767 ; AVX512BW-NEXT: vpermi2d %zmm1, %zmm0, %zmm16
7768 ; AVX512BW-NEXT: vmovdqa32 %zmm20, %zmm16 {%k2}
7769 ; AVX512BW-NEXT: vmovdqa32 %zmm19, %zmm16 {%k1}
7770 ; AVX512BW-NEXT: vbroadcasti32x4 {{.*#+}} zmm21 = [26,5,12,19,26,5,12,19,26,5,12,19,26,5,12,19]
7771 ; AVX512BW-NEXT: # zmm21 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
7772 ; AVX512BW-NEXT: vmovdqa64 %zmm14, %zmm22
7773 ; AVX512BW-NEXT: vpermt2d %zmm12, %zmm21, %zmm22
7774 ; AVX512BW-NEXT: vpmovsxbd {{.*#+}} ymm19 = [18,25,0,7,14,0,0,0]
7775 ; AVX512BW-NEXT: vmovdqa64 %zmm11, %zmm20
7776 ; AVX512BW-NEXT: vpermt2d %zmm10, %zmm19, %zmm20
7777 ; AVX512BW-NEXT: vmovdqa32 %zmm22, %zmm20 {%k2}
7778 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm22 = [0,1,8,15,22,29,0,0,0,1,8,15,22,29,0,0]
7779 ; AVX512BW-NEXT: # zmm22 = mem[0,1,2,3,0,1,2,3]
7780 ; AVX512BW-NEXT: vmovdqa64 %zmm9, %zmm23
7781 ; AVX512BW-NEXT: vpermt2d %zmm15, %zmm22, %zmm23
7782 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm24 = [0,9,10,11,12,13,20,27,0,9,10,11,12,13,20,27]
7783 ; AVX512BW-NEXT: # zmm24 = mem[0,1,2,3,0,1,2,3]
7784 ; AVX512BW-NEXT: vpermt2d %zmm13, %zmm24, %zmm23
7785 ; AVX512BW-NEXT: vmovdqa32 %zmm23, %zmm20 {%k1}
7786 ; AVX512BW-NEXT: vpermi2d %zmm6, %zmm3, %zmm22
7787 ; AVX512BW-NEXT: vpermt2d %zmm5, %zmm24, %zmm22
7788 ; AVX512BW-NEXT: vmovdqa64 %zmm4, %zmm23
7789 ; AVX512BW-NEXT: vpermt2d %zmm2, %zmm21, %zmm23
7790 ; AVX512BW-NEXT: vpermi2d %zmm0, %zmm1, %zmm19
7791 ; AVX512BW-NEXT: vmovdqa32 %zmm23, %zmm19 {%k2}
7792 ; AVX512BW-NEXT: vmovdqa32 %zmm22, %zmm19 {%k1}
7793 ; AVX512BW-NEXT: vbroadcasti32x4 {{.*#+}} zmm24 = [27,6,13,20,27,6,13,20,27,6,13,20,27,6,13,20]
7794 ; AVX512BW-NEXT: # zmm24 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
7795 ; AVX512BW-NEXT: vmovdqa64 %zmm14, %zmm25
7796 ; AVX512BW-NEXT: vpermt2d %zmm12, %zmm24, %zmm25
7797 ; AVX512BW-NEXT: vpmovsxbd {{.*#+}} ymm22 = [19,26,1,8,15,0,0,0]
7798 ; AVX512BW-NEXT: vmovdqa64 %zmm11, %zmm23
7799 ; AVX512BW-NEXT: vpermt2d %zmm10, %zmm22, %zmm23
7800 ; AVX512BW-NEXT: vmovdqa32 %zmm25, %zmm23 {%k2}
7801 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm25 = [0,18,25,0,7,14,0,0,0,18,25,0,7,14,0,0]
7802 ; AVX512BW-NEXT: # zmm25 = mem[0,1,2,3,0,1,2,3]
7803 ; AVX512BW-NEXT: vmovdqa64 %zmm15, %zmm26
7804 ; AVX512BW-NEXT: vpermt2d %zmm9, %zmm25, %zmm26
7805 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm27 = [0,9,10,11,12,13,21,28,0,9,10,11,12,13,21,28]
7806 ; AVX512BW-NEXT: # zmm27 = mem[0,1,2,3,0,1,2,3]
7807 ; AVX512BW-NEXT: vpermt2d %zmm13, %zmm27, %zmm26
7808 ; AVX512BW-NEXT: vmovdqa32 %zmm26, %zmm23 {%k1}
7809 ; AVX512BW-NEXT: vpermi2d %zmm3, %zmm6, %zmm25
7810 ; AVX512BW-NEXT: vpermt2d %zmm5, %zmm27, %zmm25
7811 ; AVX512BW-NEXT: vpermi2d %zmm2, %zmm4, %zmm24
7812 ; AVX512BW-NEXT: vpermi2d %zmm0, %zmm1, %zmm22
7813 ; AVX512BW-NEXT: vmovdqa32 %zmm24, %zmm22 {%k2}
7814 ; AVX512BW-NEXT: vmovdqa32 %zmm25, %zmm22 {%k1}
7815 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm26 = [0,19,26,1,8,15,0,0,0,19,26,1,8,15,0,0]
7816 ; AVX512BW-NEXT: # zmm26 = mem[0,1,2,3,0,1,2,3]
7817 ; AVX512BW-NEXT: vmovdqa64 %zmm15, %zmm25
7818 ; AVX512BW-NEXT: vpermt2d %zmm9, %zmm26, %zmm25
7819 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm27 = [0,9,10,11,12,13,22,29,0,9,10,11,12,13,22,29]
7820 ; AVX512BW-NEXT: # zmm27 = mem[0,1,2,3,0,1,2,3]
7821 ; AVX512BW-NEXT: vpermt2d %zmm13, %zmm27, %zmm25
7822 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm28 = [28,0,0,0,0,7,14,21,28,0,0,0,0,7,14,21]
7823 ; AVX512BW-NEXT: # zmm28 = mem[0,1,2,3,0,1,2,3]
7824 ; AVX512BW-NEXT: vmovdqa64 %zmm14, %zmm24
7825 ; AVX512BW-NEXT: vpermt2d %zmm12, %zmm28, %zmm24
7826 ; AVX512BW-NEXT: vpmovsxbd {{.*#+}} xmm29 = [4,11,18,25]
7827 ; AVX512BW-NEXT: vmovdqa64 %zmm10, %zmm30
7828 ; AVX512BW-NEXT: vpermt2d %zmm11, %zmm29, %zmm30
7829 ; AVX512BW-NEXT: vinserti32x4 $0, %xmm30, %zmm24, %zmm24
7830 ; AVX512BW-NEXT: vmovdqa32 %zmm25, %zmm24 {%k1}
7831 ; AVX512BW-NEXT: vpermi2d %zmm3, %zmm6, %zmm26
7832 ; AVX512BW-NEXT: vpermt2d %zmm5, %zmm27, %zmm26
7833 ; AVX512BW-NEXT: vpermi2d %zmm2, %zmm4, %zmm28
7834 ; AVX512BW-NEXT: vpermi2d %zmm1, %zmm0, %zmm29
7835 ; AVX512BW-NEXT: vinserti32x4 $0, %xmm29, %zmm28, %zmm25
7836 ; AVX512BW-NEXT: vmovdqa32 %zmm26, %zmm25 {%k1}
7837 ; AVX512BW-NEXT: vmovdqa64 %zmm9, %zmm26
7838 ; AVX512BW-NEXT: vpermt2d %zmm15, %zmm18, %zmm26
7839 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm27 = [0,9,10,11,12,16,23,30,0,9,10,11,12,16,23,30]
7840 ; AVX512BW-NEXT: # zmm27 = mem[0,1,2,3,0,1,2,3]
7841 ; AVX512BW-NEXT: vpermt2d %zmm13, %zmm27, %zmm26
7842 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm28 = [29,0,0,0,1,8,15,22,29,0,0,0,1,8,15,22]
7843 ; AVX512BW-NEXT: # zmm28 = mem[0,1,2,3,0,1,2,3]
7844 ; AVX512BW-NEXT: vmovdqa64 %zmm14, %zmm29
7845 ; AVX512BW-NEXT: vpermt2d %zmm12, %zmm28, %zmm29
7846 ; AVX512BW-NEXT: vpmovsxbd {{.*#+}} xmm30 = [5,12,19,26]
7847 ; AVX512BW-NEXT: vmovdqa64 %zmm10, %zmm31
7848 ; AVX512BW-NEXT: vpermt2d %zmm11, %zmm30, %zmm31
7849 ; AVX512BW-NEXT: vinserti32x4 $0, %xmm31, %zmm29, %zmm29
7850 ; AVX512BW-NEXT: vmovdqa32 %zmm26, %zmm29 {%k1}
7851 ; AVX512BW-NEXT: vpermi2d %zmm6, %zmm3, %zmm18
7852 ; AVX512BW-NEXT: vpermt2d %zmm5, %zmm27, %zmm18
7853 ; AVX512BW-NEXT: vpermi2d %zmm2, %zmm4, %zmm28
7854 ; AVX512BW-NEXT: vpermi2d %zmm1, %zmm0, %zmm30
7855 ; AVX512BW-NEXT: vinserti32x4 $0, %xmm30, %zmm28, %zmm26
7856 ; AVX512BW-NEXT: vmovdqa32 %zmm18, %zmm26 {%k1}
7857 ; AVX512BW-NEXT: vpermt2d %zmm15, %zmm21, %zmm9
7858 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm15 = [0,9,10,11,12,17,24,31,0,9,10,11,12,17,24,31]
7859 ; AVX512BW-NEXT: # zmm15 = mem[0,1,2,3,0,1,2,3]
7860 ; AVX512BW-NEXT: vpermt2d %zmm13, %zmm15, %zmm9
7861 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm13 = [14,0,0,0,18,25,0,7,14,0,0,0,18,25,0,7]
7862 ; AVX512BW-NEXT: # zmm13 = mem[0,1,2,3,0,1,2,3]
7863 ; AVX512BW-NEXT: vpermt2d %zmm14, %zmm13, %zmm12
7864 ; AVX512BW-NEXT: vpmovsxbd {{.*#+}} xmm14 = [6,13,20,27]
7865 ; AVX512BW-NEXT: vpermt2d %zmm11, %zmm14, %zmm10
7866 ; AVX512BW-NEXT: vinserti32x4 $0, %xmm10, %zmm12, %zmm10
7867 ; AVX512BW-NEXT: vmovdqa32 %zmm9, %zmm10 {%k1}
7868 ; AVX512BW-NEXT: vpermt2d %zmm6, %zmm21, %zmm3
7869 ; AVX512BW-NEXT: vpermt2d %zmm5, %zmm15, %zmm3
7870 ; AVX512BW-NEXT: vpermt2d %zmm4, %zmm13, %zmm2
7871 ; AVX512BW-NEXT: vpermt2d %zmm1, %zmm14, %zmm0
7872 ; AVX512BW-NEXT: vinserti32x4 $0, %xmm0, %zmm2, %zmm0
7873 ; AVX512BW-NEXT: vmovdqa32 %zmm3, %zmm0 {%k1}
7874 ; AVX512BW-NEXT: vmovdqa64 %zmm7, 64(%rsi)
7875 ; AVX512BW-NEXT: vmovdqa64 %zmm8, (%rsi)
7876 ; AVX512BW-NEXT: vmovdqa64 %zmm16, 64(%rdx)
7877 ; AVX512BW-NEXT: vmovdqa64 %zmm17, (%rdx)
7878 ; AVX512BW-NEXT: vmovdqa64 %zmm19, 64(%rcx)
7879 ; AVX512BW-NEXT: vmovdqa64 %zmm20, (%rcx)
7880 ; AVX512BW-NEXT: vmovdqa64 %zmm22, 64(%r8)
7881 ; AVX512BW-NEXT: vmovdqa64 %zmm23, (%r8)
7882 ; AVX512BW-NEXT: vmovdqa64 %zmm25, 64(%r9)
7883 ; AVX512BW-NEXT: vmovdqa64 %zmm24, (%r9)
7884 ; AVX512BW-NEXT: vmovdqa64 %zmm26, 64(%r10)
7885 ; AVX512BW-NEXT: vmovdqa64 %zmm29, (%r10)
7886 ; AVX512BW-NEXT: vmovdqa64 %zmm0, 64(%rax)
7887 ; AVX512BW-NEXT: vmovdqa64 %zmm10, (%rax)
7888 ; AVX512BW-NEXT: vzeroupper
7889 ; AVX512BW-NEXT: retq
7891 ; AVX512BW-FCP-LABEL: load_i32_stride7_vf32:
7892 ; AVX512BW-FCP: # %bb.0:
7893 ; AVX512BW-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
7894 ; AVX512BW-FCP-NEXT: movq {{[0-9]+}}(%rsp), %r10
7895 ; AVX512BW-FCP-NEXT: vmovdqa64 512(%rdi), %zmm1
7896 ; AVX512BW-FCP-NEXT: vmovdqa64 448(%rdi), %zmm0
7897 ; AVX512BW-FCP-NEXT: vmovdqa64 576(%rdi), %zmm4
7898 ; AVX512BW-FCP-NEXT: vmovdqa64 640(%rdi), %zmm2
7899 ; AVX512BW-FCP-NEXT: vmovdqa64 832(%rdi), %zmm5
7900 ; AVX512BW-FCP-NEXT: vmovdqa64 768(%rdi), %zmm6
7901 ; AVX512BW-FCP-NEXT: vmovdqa64 704(%rdi), %zmm3
7902 ; AVX512BW-FCP-NEXT: vmovdqa64 384(%rdi), %zmm13
7903 ; AVX512BW-FCP-NEXT: vmovdqa64 320(%rdi), %zmm15
7904 ; AVX512BW-FCP-NEXT: vmovdqa64 256(%rdi), %zmm9
7905 ; AVX512BW-FCP-NEXT: vmovdqa64 (%rdi), %zmm10
7906 ; AVX512BW-FCP-NEXT: vmovdqa64 64(%rdi), %zmm11
7907 ; AVX512BW-FCP-NEXT: vmovdqa64 128(%rdi), %zmm14
7908 ; AVX512BW-FCP-NEXT: vmovdqa64 192(%rdi), %zmm12
7909 ; AVX512BW-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm16 = [8,15,0,0,0,19,26,1,8,15,0,0,0,19,26,1]
7910 ; AVX512BW-FCP-NEXT: # zmm16 = mem[0,1,2,3,0,1,2,3]
7911 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm12, %zmm17
7912 ; AVX512BW-FCP-NEXT: vpermt2d %zmm14, %zmm16, %zmm17
7913 ; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm7 = [0,7,14,21,28,0,0,0]
7914 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm10, %zmm8
7915 ; AVX512BW-FCP-NEXT: vpermt2d %zmm11, %zmm7, %zmm8
7916 ; AVX512BW-FCP-NEXT: movw $992, %di # imm = 0x3E0
7917 ; AVX512BW-FCP-NEXT: kmovd %edi, %k1
7918 ; AVX512BW-FCP-NEXT: vmovdqa32 %zmm17, %zmm8 {%k1}
7919 ; AVX512BW-FCP-NEXT: vbroadcasti32x4 {{.*#+}} zmm17 = [20,27,6,13,20,27,6,13,20,27,6,13,20,27,6,13]
7920 ; AVX512BW-FCP-NEXT: # zmm17 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
7921 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm9, %zmm18
7922 ; AVX512BW-FCP-NEXT: vpermt2d %zmm15, %zmm17, %zmm18
7923 ; AVX512BW-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm19 = [0,0,10,11,12,13,18,25,0,0,10,11,12,13,18,25]
7924 ; AVX512BW-FCP-NEXT: # zmm19 = mem[0,1,2,3,0,1,2,3]
7925 ; AVX512BW-FCP-NEXT: vpermt2d %zmm13, %zmm19, %zmm18
7926 ; AVX512BW-FCP-NEXT: movb $-32, %dil
7927 ; AVX512BW-FCP-NEXT: kmovd %edi, %k2
7928 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm18, %zmm8 {%k2}
7929 ; AVX512BW-FCP-NEXT: vpermi2d %zmm6, %zmm3, %zmm17
7930 ; AVX512BW-FCP-NEXT: vpermt2d %zmm5, %zmm19, %zmm17
7931 ; AVX512BW-FCP-NEXT: vpermi2d %zmm4, %zmm2, %zmm16
7932 ; AVX512BW-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm7
7933 ; AVX512BW-FCP-NEXT: vmovdqa32 %zmm16, %zmm7 {%k1}
7934 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm17, %zmm7 {%k2}
7935 ; AVX512BW-FCP-NEXT: vbroadcasti32x4 {{.*#+}} zmm18 = [25,4,11,18,25,4,11,18,25,4,11,18,25,4,11,18]
7936 ; AVX512BW-FCP-NEXT: # zmm18 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
7937 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm14, %zmm19
7938 ; AVX512BW-FCP-NEXT: vpermt2d %zmm12, %zmm18, %zmm19
7939 ; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm16 = [1,8,15,22,29,0,0,0]
7940 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm10, %zmm17
7941 ; AVX512BW-FCP-NEXT: vpermt2d %zmm11, %zmm16, %zmm17
7942 ; AVX512BW-FCP-NEXT: movw $480, %di # imm = 0x1E0
7943 ; AVX512BW-FCP-NEXT: kmovd %edi, %k2
7944 ; AVX512BW-FCP-NEXT: vmovdqa32 %zmm19, %zmm17 {%k2}
7945 ; AVX512BW-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm19 = [0,0,7,14,21,28,0,0,0,0,7,14,21,28,0,0]
7946 ; AVX512BW-FCP-NEXT: # zmm19 = mem[0,1,2,3,0,1,2,3]
7947 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm9, %zmm20
7948 ; AVX512BW-FCP-NEXT: vpermt2d %zmm15, %zmm19, %zmm20
7949 ; AVX512BW-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm21 = [0,9,10,11,12,13,19,26,0,9,10,11,12,13,19,26]
7950 ; AVX512BW-FCP-NEXT: # zmm21 = mem[0,1,2,3,0,1,2,3]
7951 ; AVX512BW-FCP-NEXT: vpermt2d %zmm13, %zmm21, %zmm20
7952 ; AVX512BW-FCP-NEXT: movw $-512, %di # imm = 0xFE00
7953 ; AVX512BW-FCP-NEXT: kmovd %edi, %k1
7954 ; AVX512BW-FCP-NEXT: vmovdqa32 %zmm20, %zmm17 {%k1}
7955 ; AVX512BW-FCP-NEXT: vpermi2d %zmm6, %zmm3, %zmm19
7956 ; AVX512BW-FCP-NEXT: vpermt2d %zmm5, %zmm21, %zmm19
7957 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm4, %zmm20
7958 ; AVX512BW-FCP-NEXT: vpermt2d %zmm2, %zmm18, %zmm20
7959 ; AVX512BW-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm16
7960 ; AVX512BW-FCP-NEXT: vmovdqa32 %zmm20, %zmm16 {%k2}
7961 ; AVX512BW-FCP-NEXT: vmovdqa32 %zmm19, %zmm16 {%k1}
7962 ; AVX512BW-FCP-NEXT: vbroadcasti32x4 {{.*#+}} zmm21 = [26,5,12,19,26,5,12,19,26,5,12,19,26,5,12,19]
7963 ; AVX512BW-FCP-NEXT: # zmm21 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
7964 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm14, %zmm22
7965 ; AVX512BW-FCP-NEXT: vpermt2d %zmm12, %zmm21, %zmm22
7966 ; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm19 = [18,25,0,7,14,0,0,0]
7967 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm11, %zmm20
7968 ; AVX512BW-FCP-NEXT: vpermt2d %zmm10, %zmm19, %zmm20
7969 ; AVX512BW-FCP-NEXT: vmovdqa32 %zmm22, %zmm20 {%k2}
7970 ; AVX512BW-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm22 = [0,1,8,15,22,29,0,0,0,1,8,15,22,29,0,0]
7971 ; AVX512BW-FCP-NEXT: # zmm22 = mem[0,1,2,3,0,1,2,3]
7972 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm9, %zmm23
7973 ; AVX512BW-FCP-NEXT: vpermt2d %zmm15, %zmm22, %zmm23
7974 ; AVX512BW-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm24 = [0,9,10,11,12,13,20,27,0,9,10,11,12,13,20,27]
7975 ; AVX512BW-FCP-NEXT: # zmm24 = mem[0,1,2,3,0,1,2,3]
7976 ; AVX512BW-FCP-NEXT: vpermt2d %zmm13, %zmm24, %zmm23
7977 ; AVX512BW-FCP-NEXT: vmovdqa32 %zmm23, %zmm20 {%k1}
7978 ; AVX512BW-FCP-NEXT: vpermi2d %zmm6, %zmm3, %zmm22
7979 ; AVX512BW-FCP-NEXT: vpermt2d %zmm5, %zmm24, %zmm22
7980 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm4, %zmm23
7981 ; AVX512BW-FCP-NEXT: vpermt2d %zmm2, %zmm21, %zmm23
7982 ; AVX512BW-FCP-NEXT: vpermi2d %zmm0, %zmm1, %zmm19
7983 ; AVX512BW-FCP-NEXT: vmovdqa32 %zmm23, %zmm19 {%k2}
7984 ; AVX512BW-FCP-NEXT: vmovdqa32 %zmm22, %zmm19 {%k1}
7985 ; AVX512BW-FCP-NEXT: vbroadcasti32x4 {{.*#+}} zmm24 = [27,6,13,20,27,6,13,20,27,6,13,20,27,6,13,20]
7986 ; AVX512BW-FCP-NEXT: # zmm24 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
7987 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm14, %zmm25
7988 ; AVX512BW-FCP-NEXT: vpermt2d %zmm12, %zmm24, %zmm25
7989 ; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm22 = [19,26,1,8,15,0,0,0]
7990 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm11, %zmm23
7991 ; AVX512BW-FCP-NEXT: vpermt2d %zmm10, %zmm22, %zmm23
7992 ; AVX512BW-FCP-NEXT: vmovdqa32 %zmm25, %zmm23 {%k2}
7993 ; AVX512BW-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm25 = [0,18,25,0,7,14,0,0,0,18,25,0,7,14,0,0]
7994 ; AVX512BW-FCP-NEXT: # zmm25 = mem[0,1,2,3,0,1,2,3]
7995 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm15, %zmm26
7996 ; AVX512BW-FCP-NEXT: vpermt2d %zmm9, %zmm25, %zmm26
7997 ; AVX512BW-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm27 = [0,9,10,11,12,13,21,28,0,9,10,11,12,13,21,28]
7998 ; AVX512BW-FCP-NEXT: # zmm27 = mem[0,1,2,3,0,1,2,3]
7999 ; AVX512BW-FCP-NEXT: vpermt2d %zmm13, %zmm27, %zmm26
8000 ; AVX512BW-FCP-NEXT: vmovdqa32 %zmm26, %zmm23 {%k1}
8001 ; AVX512BW-FCP-NEXT: vpermi2d %zmm3, %zmm6, %zmm25
8002 ; AVX512BW-FCP-NEXT: vpermt2d %zmm5, %zmm27, %zmm25
8003 ; AVX512BW-FCP-NEXT: vpermi2d %zmm2, %zmm4, %zmm24
8004 ; AVX512BW-FCP-NEXT: vpermi2d %zmm0, %zmm1, %zmm22
8005 ; AVX512BW-FCP-NEXT: vmovdqa32 %zmm24, %zmm22 {%k2}
8006 ; AVX512BW-FCP-NEXT: vmovdqa32 %zmm25, %zmm22 {%k1}
8007 ; AVX512BW-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm26 = [0,19,26,1,8,15,0,0,0,19,26,1,8,15,0,0]
8008 ; AVX512BW-FCP-NEXT: # zmm26 = mem[0,1,2,3,0,1,2,3]
8009 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm15, %zmm25
8010 ; AVX512BW-FCP-NEXT: vpermt2d %zmm9, %zmm26, %zmm25
8011 ; AVX512BW-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm27 = [0,9,10,11,12,13,22,29,0,9,10,11,12,13,22,29]
8012 ; AVX512BW-FCP-NEXT: # zmm27 = mem[0,1,2,3,0,1,2,3]
8013 ; AVX512BW-FCP-NEXT: vpermt2d %zmm13, %zmm27, %zmm25
8014 ; AVX512BW-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm28 = [28,0,0,0,0,7,14,21,28,0,0,0,0,7,14,21]
8015 ; AVX512BW-FCP-NEXT: # zmm28 = mem[0,1,2,3,0,1,2,3]
8016 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm14, %zmm24
8017 ; AVX512BW-FCP-NEXT: vpermt2d %zmm12, %zmm28, %zmm24
8018 ; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm29 = [4,11,18,25]
8019 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm10, %zmm30
8020 ; AVX512BW-FCP-NEXT: vpermt2d %zmm11, %zmm29, %zmm30
8021 ; AVX512BW-FCP-NEXT: vinserti32x4 $0, %xmm30, %zmm24, %zmm24
8022 ; AVX512BW-FCP-NEXT: vmovdqa32 %zmm25, %zmm24 {%k1}
8023 ; AVX512BW-FCP-NEXT: vpermi2d %zmm3, %zmm6, %zmm26
8024 ; AVX512BW-FCP-NEXT: vpermt2d %zmm5, %zmm27, %zmm26
8025 ; AVX512BW-FCP-NEXT: vpermi2d %zmm2, %zmm4, %zmm28
8026 ; AVX512BW-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm29
8027 ; AVX512BW-FCP-NEXT: vinserti32x4 $0, %xmm29, %zmm28, %zmm25
8028 ; AVX512BW-FCP-NEXT: vmovdqa32 %zmm26, %zmm25 {%k1}
8029 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm9, %zmm26
8030 ; AVX512BW-FCP-NEXT: vpermt2d %zmm15, %zmm18, %zmm26
8031 ; AVX512BW-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm27 = [0,9,10,11,12,16,23,30,0,9,10,11,12,16,23,30]
8032 ; AVX512BW-FCP-NEXT: # zmm27 = mem[0,1,2,3,0,1,2,3]
8033 ; AVX512BW-FCP-NEXT: vpermt2d %zmm13, %zmm27, %zmm26
8034 ; AVX512BW-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm28 = [29,0,0,0,1,8,15,22,29,0,0,0,1,8,15,22]
8035 ; AVX512BW-FCP-NEXT: # zmm28 = mem[0,1,2,3,0,1,2,3]
8036 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm14, %zmm29
8037 ; AVX512BW-FCP-NEXT: vpermt2d %zmm12, %zmm28, %zmm29
8038 ; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm30 = [5,12,19,26]
8039 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm10, %zmm31
8040 ; AVX512BW-FCP-NEXT: vpermt2d %zmm11, %zmm30, %zmm31
8041 ; AVX512BW-FCP-NEXT: vinserti32x4 $0, %xmm31, %zmm29, %zmm29
8042 ; AVX512BW-FCP-NEXT: vmovdqa32 %zmm26, %zmm29 {%k1}
8043 ; AVX512BW-FCP-NEXT: vpermi2d %zmm6, %zmm3, %zmm18
8044 ; AVX512BW-FCP-NEXT: vpermt2d %zmm5, %zmm27, %zmm18
8045 ; AVX512BW-FCP-NEXT: vpermi2d %zmm2, %zmm4, %zmm28
8046 ; AVX512BW-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm30
8047 ; AVX512BW-FCP-NEXT: vinserti32x4 $0, %xmm30, %zmm28, %zmm26
8048 ; AVX512BW-FCP-NEXT: vmovdqa32 %zmm18, %zmm26 {%k1}
8049 ; AVX512BW-FCP-NEXT: vpermt2d %zmm15, %zmm21, %zmm9
8050 ; AVX512BW-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm15 = [0,9,10,11,12,17,24,31,0,9,10,11,12,17,24,31]
8051 ; AVX512BW-FCP-NEXT: # zmm15 = mem[0,1,2,3,0,1,2,3]
8052 ; AVX512BW-FCP-NEXT: vpermt2d %zmm13, %zmm15, %zmm9
8053 ; AVX512BW-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm13 = [14,0,0,0,18,25,0,7,14,0,0,0,18,25,0,7]
8054 ; AVX512BW-FCP-NEXT: # zmm13 = mem[0,1,2,3,0,1,2,3]
8055 ; AVX512BW-FCP-NEXT: vpermt2d %zmm14, %zmm13, %zmm12
8056 ; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm14 = [6,13,20,27]
8057 ; AVX512BW-FCP-NEXT: vpermt2d %zmm11, %zmm14, %zmm10
8058 ; AVX512BW-FCP-NEXT: vinserti32x4 $0, %xmm10, %zmm12, %zmm10
8059 ; AVX512BW-FCP-NEXT: vmovdqa32 %zmm9, %zmm10 {%k1}
8060 ; AVX512BW-FCP-NEXT: vpermt2d %zmm6, %zmm21, %zmm3
8061 ; AVX512BW-FCP-NEXT: vpermt2d %zmm5, %zmm15, %zmm3
8062 ; AVX512BW-FCP-NEXT: vpermt2d %zmm4, %zmm13, %zmm2
8063 ; AVX512BW-FCP-NEXT: vpermt2d %zmm1, %zmm14, %zmm0
8064 ; AVX512BW-FCP-NEXT: vinserti32x4 $0, %xmm0, %zmm2, %zmm0
8065 ; AVX512BW-FCP-NEXT: vmovdqa32 %zmm3, %zmm0 {%k1}
8066 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm7, 64(%rsi)
8067 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm8, (%rsi)
8068 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm16, 64(%rdx)
8069 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm17, (%rdx)
8070 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm19, 64(%rcx)
8071 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm20, (%rcx)
8072 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm22, 64(%r8)
8073 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm23, (%r8)
8074 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm25, 64(%r9)
8075 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm24, (%r9)
8076 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm26, 64(%r10)
8077 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm29, (%r10)
8078 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm0, 64(%rax)
8079 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm10, (%rax)
8080 ; AVX512BW-FCP-NEXT: vzeroupper
8081 ; AVX512BW-FCP-NEXT: retq
8083 ; AVX512DQ-BW-LABEL: load_i32_stride7_vf32:
8084 ; AVX512DQ-BW: # %bb.0:
8085 ; AVX512DQ-BW-NEXT: movq {{[0-9]+}}(%rsp), %rax
8086 ; AVX512DQ-BW-NEXT: movq {{[0-9]+}}(%rsp), %r10
8087 ; AVX512DQ-BW-NEXT: vmovdqa64 512(%rdi), %zmm1
8088 ; AVX512DQ-BW-NEXT: vmovdqa64 448(%rdi), %zmm0
8089 ; AVX512DQ-BW-NEXT: vmovdqa64 576(%rdi), %zmm4
8090 ; AVX512DQ-BW-NEXT: vmovdqa64 640(%rdi), %zmm2
8091 ; AVX512DQ-BW-NEXT: vmovdqa64 832(%rdi), %zmm5
8092 ; AVX512DQ-BW-NEXT: vmovdqa64 768(%rdi), %zmm6
8093 ; AVX512DQ-BW-NEXT: vmovdqa64 704(%rdi), %zmm3
8094 ; AVX512DQ-BW-NEXT: vmovdqa64 384(%rdi), %zmm13
8095 ; AVX512DQ-BW-NEXT: vmovdqa64 320(%rdi), %zmm15
8096 ; AVX512DQ-BW-NEXT: vmovdqa64 256(%rdi), %zmm9
8097 ; AVX512DQ-BW-NEXT: vmovdqa64 (%rdi), %zmm10
8098 ; AVX512DQ-BW-NEXT: vmovdqa64 64(%rdi), %zmm11
8099 ; AVX512DQ-BW-NEXT: vmovdqa64 128(%rdi), %zmm14
8100 ; AVX512DQ-BW-NEXT: vmovdqa64 192(%rdi), %zmm12
8101 ; AVX512DQ-BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm16 = [8,15,0,0,0,19,26,1,8,15,0,0,0,19,26,1]
8102 ; AVX512DQ-BW-NEXT: # zmm16 = mem[0,1,2,3,0,1,2,3]
8103 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm12, %zmm17
8104 ; AVX512DQ-BW-NEXT: vpermt2d %zmm14, %zmm16, %zmm17
8105 ; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} ymm7 = [0,7,14,21,28,0,0,0]
8106 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm10, %zmm8
8107 ; AVX512DQ-BW-NEXT: vpermt2d %zmm11, %zmm7, %zmm8
8108 ; AVX512DQ-BW-NEXT: movw $992, %di # imm = 0x3E0
8109 ; AVX512DQ-BW-NEXT: kmovd %edi, %k1
8110 ; AVX512DQ-BW-NEXT: vmovdqa32 %zmm17, %zmm8 {%k1}
8111 ; AVX512DQ-BW-NEXT: vbroadcasti32x4 {{.*#+}} zmm17 = [20,27,6,13,20,27,6,13,20,27,6,13,20,27,6,13]
8112 ; AVX512DQ-BW-NEXT: # zmm17 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
8113 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm9, %zmm18
8114 ; AVX512DQ-BW-NEXT: vpermt2d %zmm15, %zmm17, %zmm18
8115 ; AVX512DQ-BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm19 = [0,0,10,11,12,13,18,25,0,0,10,11,12,13,18,25]
8116 ; AVX512DQ-BW-NEXT: # zmm19 = mem[0,1,2,3,0,1,2,3]
8117 ; AVX512DQ-BW-NEXT: vpermt2d %zmm13, %zmm19, %zmm18
8118 ; AVX512DQ-BW-NEXT: movb $-32, %dil
8119 ; AVX512DQ-BW-NEXT: kmovd %edi, %k2
8120 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm18, %zmm8 {%k2}
8121 ; AVX512DQ-BW-NEXT: vpermi2d %zmm6, %zmm3, %zmm17
8122 ; AVX512DQ-BW-NEXT: vpermt2d %zmm5, %zmm19, %zmm17
8123 ; AVX512DQ-BW-NEXT: vpermi2d %zmm4, %zmm2, %zmm16
8124 ; AVX512DQ-BW-NEXT: vpermi2d %zmm1, %zmm0, %zmm7
8125 ; AVX512DQ-BW-NEXT: vmovdqa32 %zmm16, %zmm7 {%k1}
8126 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm17, %zmm7 {%k2}
8127 ; AVX512DQ-BW-NEXT: vbroadcasti32x4 {{.*#+}} zmm18 = [25,4,11,18,25,4,11,18,25,4,11,18,25,4,11,18]
8128 ; AVX512DQ-BW-NEXT: # zmm18 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
8129 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm14, %zmm19
8130 ; AVX512DQ-BW-NEXT: vpermt2d %zmm12, %zmm18, %zmm19
8131 ; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} ymm16 = [1,8,15,22,29,0,0,0]
8132 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm10, %zmm17
8133 ; AVX512DQ-BW-NEXT: vpermt2d %zmm11, %zmm16, %zmm17
8134 ; AVX512DQ-BW-NEXT: movw $480, %di # imm = 0x1E0
8135 ; AVX512DQ-BW-NEXT: kmovd %edi, %k2
8136 ; AVX512DQ-BW-NEXT: vmovdqa32 %zmm19, %zmm17 {%k2}
8137 ; AVX512DQ-BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm19 = [0,0,7,14,21,28,0,0,0,0,7,14,21,28,0,0]
8138 ; AVX512DQ-BW-NEXT: # zmm19 = mem[0,1,2,3,0,1,2,3]
8139 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm9, %zmm20
8140 ; AVX512DQ-BW-NEXT: vpermt2d %zmm15, %zmm19, %zmm20
8141 ; AVX512DQ-BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm21 = [0,9,10,11,12,13,19,26,0,9,10,11,12,13,19,26]
8142 ; AVX512DQ-BW-NEXT: # zmm21 = mem[0,1,2,3,0,1,2,3]
8143 ; AVX512DQ-BW-NEXT: vpermt2d %zmm13, %zmm21, %zmm20
8144 ; AVX512DQ-BW-NEXT: movw $-512, %di # imm = 0xFE00
8145 ; AVX512DQ-BW-NEXT: kmovd %edi, %k1
8146 ; AVX512DQ-BW-NEXT: vmovdqa32 %zmm20, %zmm17 {%k1}
8147 ; AVX512DQ-BW-NEXT: vpermi2d %zmm6, %zmm3, %zmm19
8148 ; AVX512DQ-BW-NEXT: vpermt2d %zmm5, %zmm21, %zmm19
8149 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm4, %zmm20
8150 ; AVX512DQ-BW-NEXT: vpermt2d %zmm2, %zmm18, %zmm20
8151 ; AVX512DQ-BW-NEXT: vpermi2d %zmm1, %zmm0, %zmm16
8152 ; AVX512DQ-BW-NEXT: vmovdqa32 %zmm20, %zmm16 {%k2}
8153 ; AVX512DQ-BW-NEXT: vmovdqa32 %zmm19, %zmm16 {%k1}
8154 ; AVX512DQ-BW-NEXT: vbroadcasti32x4 {{.*#+}} zmm21 = [26,5,12,19,26,5,12,19,26,5,12,19,26,5,12,19]
8155 ; AVX512DQ-BW-NEXT: # zmm21 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
8156 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm14, %zmm22
8157 ; AVX512DQ-BW-NEXT: vpermt2d %zmm12, %zmm21, %zmm22
8158 ; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} ymm19 = [18,25,0,7,14,0,0,0]
8159 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm11, %zmm20
8160 ; AVX512DQ-BW-NEXT: vpermt2d %zmm10, %zmm19, %zmm20
8161 ; AVX512DQ-BW-NEXT: vmovdqa32 %zmm22, %zmm20 {%k2}
8162 ; AVX512DQ-BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm22 = [0,1,8,15,22,29,0,0,0,1,8,15,22,29,0,0]
8163 ; AVX512DQ-BW-NEXT: # zmm22 = mem[0,1,2,3,0,1,2,3]
8164 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm9, %zmm23
8165 ; AVX512DQ-BW-NEXT: vpermt2d %zmm15, %zmm22, %zmm23
8166 ; AVX512DQ-BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm24 = [0,9,10,11,12,13,20,27,0,9,10,11,12,13,20,27]
8167 ; AVX512DQ-BW-NEXT: # zmm24 = mem[0,1,2,3,0,1,2,3]
8168 ; AVX512DQ-BW-NEXT: vpermt2d %zmm13, %zmm24, %zmm23
8169 ; AVX512DQ-BW-NEXT: vmovdqa32 %zmm23, %zmm20 {%k1}
8170 ; AVX512DQ-BW-NEXT: vpermi2d %zmm6, %zmm3, %zmm22
8171 ; AVX512DQ-BW-NEXT: vpermt2d %zmm5, %zmm24, %zmm22
8172 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm4, %zmm23
8173 ; AVX512DQ-BW-NEXT: vpermt2d %zmm2, %zmm21, %zmm23
8174 ; AVX512DQ-BW-NEXT: vpermi2d %zmm0, %zmm1, %zmm19
8175 ; AVX512DQ-BW-NEXT: vmovdqa32 %zmm23, %zmm19 {%k2}
8176 ; AVX512DQ-BW-NEXT: vmovdqa32 %zmm22, %zmm19 {%k1}
8177 ; AVX512DQ-BW-NEXT: vbroadcasti32x4 {{.*#+}} zmm24 = [27,6,13,20,27,6,13,20,27,6,13,20,27,6,13,20]
8178 ; AVX512DQ-BW-NEXT: # zmm24 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
8179 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm14, %zmm25
8180 ; AVX512DQ-BW-NEXT: vpermt2d %zmm12, %zmm24, %zmm25
8181 ; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} ymm22 = [19,26,1,8,15,0,0,0]
8182 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm11, %zmm23
8183 ; AVX512DQ-BW-NEXT: vpermt2d %zmm10, %zmm22, %zmm23
8184 ; AVX512DQ-BW-NEXT: vmovdqa32 %zmm25, %zmm23 {%k2}
8185 ; AVX512DQ-BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm25 = [0,18,25,0,7,14,0,0,0,18,25,0,7,14,0,0]
8186 ; AVX512DQ-BW-NEXT: # zmm25 = mem[0,1,2,3,0,1,2,3]
8187 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm15, %zmm26
8188 ; AVX512DQ-BW-NEXT: vpermt2d %zmm9, %zmm25, %zmm26
8189 ; AVX512DQ-BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm27 = [0,9,10,11,12,13,21,28,0,9,10,11,12,13,21,28]
8190 ; AVX512DQ-BW-NEXT: # zmm27 = mem[0,1,2,3,0,1,2,3]
8191 ; AVX512DQ-BW-NEXT: vpermt2d %zmm13, %zmm27, %zmm26
8192 ; AVX512DQ-BW-NEXT: vmovdqa32 %zmm26, %zmm23 {%k1}
8193 ; AVX512DQ-BW-NEXT: vpermi2d %zmm3, %zmm6, %zmm25
8194 ; AVX512DQ-BW-NEXT: vpermt2d %zmm5, %zmm27, %zmm25
8195 ; AVX512DQ-BW-NEXT: vpermi2d %zmm2, %zmm4, %zmm24
8196 ; AVX512DQ-BW-NEXT: vpermi2d %zmm0, %zmm1, %zmm22
8197 ; AVX512DQ-BW-NEXT: vmovdqa32 %zmm24, %zmm22 {%k2}
8198 ; AVX512DQ-BW-NEXT: vmovdqa32 %zmm25, %zmm22 {%k1}
8199 ; AVX512DQ-BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm26 = [0,19,26,1,8,15,0,0,0,19,26,1,8,15,0,0]
8200 ; AVX512DQ-BW-NEXT: # zmm26 = mem[0,1,2,3,0,1,2,3]
8201 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm15, %zmm25
8202 ; AVX512DQ-BW-NEXT: vpermt2d %zmm9, %zmm26, %zmm25
8203 ; AVX512DQ-BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm27 = [0,9,10,11,12,13,22,29,0,9,10,11,12,13,22,29]
8204 ; AVX512DQ-BW-NEXT: # zmm27 = mem[0,1,2,3,0,1,2,3]
8205 ; AVX512DQ-BW-NEXT: vpermt2d %zmm13, %zmm27, %zmm25
8206 ; AVX512DQ-BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm28 = [28,0,0,0,0,7,14,21,28,0,0,0,0,7,14,21]
8207 ; AVX512DQ-BW-NEXT: # zmm28 = mem[0,1,2,3,0,1,2,3]
8208 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm14, %zmm24
8209 ; AVX512DQ-BW-NEXT: vpermt2d %zmm12, %zmm28, %zmm24
8210 ; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} xmm29 = [4,11,18,25]
8211 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm10, %zmm30
8212 ; AVX512DQ-BW-NEXT: vpermt2d %zmm11, %zmm29, %zmm30
8213 ; AVX512DQ-BW-NEXT: vinserti32x4 $0, %xmm30, %zmm24, %zmm24
8214 ; AVX512DQ-BW-NEXT: vmovdqa32 %zmm25, %zmm24 {%k1}
8215 ; AVX512DQ-BW-NEXT: vpermi2d %zmm3, %zmm6, %zmm26
8216 ; AVX512DQ-BW-NEXT: vpermt2d %zmm5, %zmm27, %zmm26
8217 ; AVX512DQ-BW-NEXT: vpermi2d %zmm2, %zmm4, %zmm28
8218 ; AVX512DQ-BW-NEXT: vpermi2d %zmm1, %zmm0, %zmm29
8219 ; AVX512DQ-BW-NEXT: vinserti32x4 $0, %xmm29, %zmm28, %zmm25
8220 ; AVX512DQ-BW-NEXT: vmovdqa32 %zmm26, %zmm25 {%k1}
8221 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm9, %zmm26
8222 ; AVX512DQ-BW-NEXT: vpermt2d %zmm15, %zmm18, %zmm26
8223 ; AVX512DQ-BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm27 = [0,9,10,11,12,16,23,30,0,9,10,11,12,16,23,30]
8224 ; AVX512DQ-BW-NEXT: # zmm27 = mem[0,1,2,3,0,1,2,3]
8225 ; AVX512DQ-BW-NEXT: vpermt2d %zmm13, %zmm27, %zmm26
8226 ; AVX512DQ-BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm28 = [29,0,0,0,1,8,15,22,29,0,0,0,1,8,15,22]
8227 ; AVX512DQ-BW-NEXT: # zmm28 = mem[0,1,2,3,0,1,2,3]
8228 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm14, %zmm29
8229 ; AVX512DQ-BW-NEXT: vpermt2d %zmm12, %zmm28, %zmm29
8230 ; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} xmm30 = [5,12,19,26]
8231 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm10, %zmm31
8232 ; AVX512DQ-BW-NEXT: vpermt2d %zmm11, %zmm30, %zmm31
8233 ; AVX512DQ-BW-NEXT: vinserti32x4 $0, %xmm31, %zmm29, %zmm29
8234 ; AVX512DQ-BW-NEXT: vmovdqa32 %zmm26, %zmm29 {%k1}
8235 ; AVX512DQ-BW-NEXT: vpermi2d %zmm6, %zmm3, %zmm18
8236 ; AVX512DQ-BW-NEXT: vpermt2d %zmm5, %zmm27, %zmm18
8237 ; AVX512DQ-BW-NEXT: vpermi2d %zmm2, %zmm4, %zmm28
8238 ; AVX512DQ-BW-NEXT: vpermi2d %zmm1, %zmm0, %zmm30
8239 ; AVX512DQ-BW-NEXT: vinserti32x4 $0, %xmm30, %zmm28, %zmm26
8240 ; AVX512DQ-BW-NEXT: vmovdqa32 %zmm18, %zmm26 {%k1}
8241 ; AVX512DQ-BW-NEXT: vpermt2d %zmm15, %zmm21, %zmm9
8242 ; AVX512DQ-BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm15 = [0,9,10,11,12,17,24,31,0,9,10,11,12,17,24,31]
8243 ; AVX512DQ-BW-NEXT: # zmm15 = mem[0,1,2,3,0,1,2,3]
8244 ; AVX512DQ-BW-NEXT: vpermt2d %zmm13, %zmm15, %zmm9
8245 ; AVX512DQ-BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm13 = [14,0,0,0,18,25,0,7,14,0,0,0,18,25,0,7]
8246 ; AVX512DQ-BW-NEXT: # zmm13 = mem[0,1,2,3,0,1,2,3]
8247 ; AVX512DQ-BW-NEXT: vpermt2d %zmm14, %zmm13, %zmm12
8248 ; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} xmm14 = [6,13,20,27]
8249 ; AVX512DQ-BW-NEXT: vpermt2d %zmm11, %zmm14, %zmm10
8250 ; AVX512DQ-BW-NEXT: vinserti32x4 $0, %xmm10, %zmm12, %zmm10
8251 ; AVX512DQ-BW-NEXT: vmovdqa32 %zmm9, %zmm10 {%k1}
8252 ; AVX512DQ-BW-NEXT: vpermt2d %zmm6, %zmm21, %zmm3
8253 ; AVX512DQ-BW-NEXT: vpermt2d %zmm5, %zmm15, %zmm3
8254 ; AVX512DQ-BW-NEXT: vpermt2d %zmm4, %zmm13, %zmm2
8255 ; AVX512DQ-BW-NEXT: vpermt2d %zmm1, %zmm14, %zmm0
8256 ; AVX512DQ-BW-NEXT: vinserti32x4 $0, %xmm0, %zmm2, %zmm0
8257 ; AVX512DQ-BW-NEXT: vmovdqa32 %zmm3, %zmm0 {%k1}
8258 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm7, 64(%rsi)
8259 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm8, (%rsi)
8260 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm16, 64(%rdx)
8261 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm17, (%rdx)
8262 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm19, 64(%rcx)
8263 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm20, (%rcx)
8264 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm22, 64(%r8)
8265 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm23, (%r8)
8266 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm25, 64(%r9)
8267 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm24, (%r9)
8268 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm26, 64(%r10)
8269 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm29, (%r10)
8270 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm0, 64(%rax)
8271 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm10, (%rax)
8272 ; AVX512DQ-BW-NEXT: vzeroupper
8273 ; AVX512DQ-BW-NEXT: retq
8275 ; AVX512DQ-BW-FCP-LABEL: load_i32_stride7_vf32:
8276 ; AVX512DQ-BW-FCP: # %bb.0:
8277 ; AVX512DQ-BW-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
8278 ; AVX512DQ-BW-FCP-NEXT: movq {{[0-9]+}}(%rsp), %r10
8279 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 512(%rdi), %zmm1
8280 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 448(%rdi), %zmm0
8281 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 576(%rdi), %zmm4
8282 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 640(%rdi), %zmm2
8283 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 832(%rdi), %zmm5
8284 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 768(%rdi), %zmm6
8285 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 704(%rdi), %zmm3
8286 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 384(%rdi), %zmm13
8287 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 320(%rdi), %zmm15
8288 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 256(%rdi), %zmm9
8289 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rdi), %zmm10
8290 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 64(%rdi), %zmm11
8291 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 128(%rdi), %zmm14
8292 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 192(%rdi), %zmm12
8293 ; AVX512DQ-BW-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm16 = [8,15,0,0,0,19,26,1,8,15,0,0,0,19,26,1]
8294 ; AVX512DQ-BW-FCP-NEXT: # zmm16 = mem[0,1,2,3,0,1,2,3]
8295 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm12, %zmm17
8296 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm14, %zmm16, %zmm17
8297 ; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm7 = [0,7,14,21,28,0,0,0]
8298 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm10, %zmm8
8299 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm11, %zmm7, %zmm8
8300 ; AVX512DQ-BW-FCP-NEXT: movw $992, %di # imm = 0x3E0
8301 ; AVX512DQ-BW-FCP-NEXT: kmovd %edi, %k1
8302 ; AVX512DQ-BW-FCP-NEXT: vmovdqa32 %zmm17, %zmm8 {%k1}
8303 ; AVX512DQ-BW-FCP-NEXT: vbroadcasti32x4 {{.*#+}} zmm17 = [20,27,6,13,20,27,6,13,20,27,6,13,20,27,6,13]
8304 ; AVX512DQ-BW-FCP-NEXT: # zmm17 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
8305 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm9, %zmm18
8306 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm15, %zmm17, %zmm18
8307 ; AVX512DQ-BW-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm19 = [0,0,10,11,12,13,18,25,0,0,10,11,12,13,18,25]
8308 ; AVX512DQ-BW-FCP-NEXT: # zmm19 = mem[0,1,2,3,0,1,2,3]
8309 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm13, %zmm19, %zmm18
8310 ; AVX512DQ-BW-FCP-NEXT: movb $-32, %dil
8311 ; AVX512DQ-BW-FCP-NEXT: kmovd %edi, %k2
8312 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm18, %zmm8 {%k2}
8313 ; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm6, %zmm3, %zmm17
8314 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm5, %zmm19, %zmm17
8315 ; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm4, %zmm2, %zmm16
8316 ; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm7
8317 ; AVX512DQ-BW-FCP-NEXT: vmovdqa32 %zmm16, %zmm7 {%k1}
8318 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm17, %zmm7 {%k2}
8319 ; AVX512DQ-BW-FCP-NEXT: vbroadcasti32x4 {{.*#+}} zmm18 = [25,4,11,18,25,4,11,18,25,4,11,18,25,4,11,18]
8320 ; AVX512DQ-BW-FCP-NEXT: # zmm18 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
8321 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm14, %zmm19
8322 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm12, %zmm18, %zmm19
8323 ; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm16 = [1,8,15,22,29,0,0,0]
8324 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm10, %zmm17
8325 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm11, %zmm16, %zmm17
8326 ; AVX512DQ-BW-FCP-NEXT: movw $480, %di # imm = 0x1E0
8327 ; AVX512DQ-BW-FCP-NEXT: kmovd %edi, %k2
8328 ; AVX512DQ-BW-FCP-NEXT: vmovdqa32 %zmm19, %zmm17 {%k2}
8329 ; AVX512DQ-BW-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm19 = [0,0,7,14,21,28,0,0,0,0,7,14,21,28,0,0]
8330 ; AVX512DQ-BW-FCP-NEXT: # zmm19 = mem[0,1,2,3,0,1,2,3]
8331 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm9, %zmm20
8332 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm15, %zmm19, %zmm20
8333 ; AVX512DQ-BW-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm21 = [0,9,10,11,12,13,19,26,0,9,10,11,12,13,19,26]
8334 ; AVX512DQ-BW-FCP-NEXT: # zmm21 = mem[0,1,2,3,0,1,2,3]
8335 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm13, %zmm21, %zmm20
8336 ; AVX512DQ-BW-FCP-NEXT: movw $-512, %di # imm = 0xFE00
8337 ; AVX512DQ-BW-FCP-NEXT: kmovd %edi, %k1
8338 ; AVX512DQ-BW-FCP-NEXT: vmovdqa32 %zmm20, %zmm17 {%k1}
8339 ; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm6, %zmm3, %zmm19
8340 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm5, %zmm21, %zmm19
8341 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm4, %zmm20
8342 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm2, %zmm18, %zmm20
8343 ; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm16
8344 ; AVX512DQ-BW-FCP-NEXT: vmovdqa32 %zmm20, %zmm16 {%k2}
8345 ; AVX512DQ-BW-FCP-NEXT: vmovdqa32 %zmm19, %zmm16 {%k1}
8346 ; AVX512DQ-BW-FCP-NEXT: vbroadcasti32x4 {{.*#+}} zmm21 = [26,5,12,19,26,5,12,19,26,5,12,19,26,5,12,19]
8347 ; AVX512DQ-BW-FCP-NEXT: # zmm21 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
8348 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm14, %zmm22
8349 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm12, %zmm21, %zmm22
8350 ; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm19 = [18,25,0,7,14,0,0,0]
8351 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm11, %zmm20
8352 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm10, %zmm19, %zmm20
8353 ; AVX512DQ-BW-FCP-NEXT: vmovdqa32 %zmm22, %zmm20 {%k2}
8354 ; AVX512DQ-BW-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm22 = [0,1,8,15,22,29,0,0,0,1,8,15,22,29,0,0]
8355 ; AVX512DQ-BW-FCP-NEXT: # zmm22 = mem[0,1,2,3,0,1,2,3]
8356 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm9, %zmm23
8357 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm15, %zmm22, %zmm23
8358 ; AVX512DQ-BW-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm24 = [0,9,10,11,12,13,20,27,0,9,10,11,12,13,20,27]
8359 ; AVX512DQ-BW-FCP-NEXT: # zmm24 = mem[0,1,2,3,0,1,2,3]
8360 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm13, %zmm24, %zmm23
8361 ; AVX512DQ-BW-FCP-NEXT: vmovdqa32 %zmm23, %zmm20 {%k1}
8362 ; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm6, %zmm3, %zmm22
8363 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm5, %zmm24, %zmm22
8364 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm4, %zmm23
8365 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm2, %zmm21, %zmm23
8366 ; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm0, %zmm1, %zmm19
8367 ; AVX512DQ-BW-FCP-NEXT: vmovdqa32 %zmm23, %zmm19 {%k2}
8368 ; AVX512DQ-BW-FCP-NEXT: vmovdqa32 %zmm22, %zmm19 {%k1}
8369 ; AVX512DQ-BW-FCP-NEXT: vbroadcasti32x4 {{.*#+}} zmm24 = [27,6,13,20,27,6,13,20,27,6,13,20,27,6,13,20]
8370 ; AVX512DQ-BW-FCP-NEXT: # zmm24 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
8371 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm14, %zmm25
8372 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm12, %zmm24, %zmm25
8373 ; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm22 = [19,26,1,8,15,0,0,0]
8374 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm11, %zmm23
8375 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm10, %zmm22, %zmm23
8376 ; AVX512DQ-BW-FCP-NEXT: vmovdqa32 %zmm25, %zmm23 {%k2}
8377 ; AVX512DQ-BW-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm25 = [0,18,25,0,7,14,0,0,0,18,25,0,7,14,0,0]
8378 ; AVX512DQ-BW-FCP-NEXT: # zmm25 = mem[0,1,2,3,0,1,2,3]
8379 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm15, %zmm26
8380 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm9, %zmm25, %zmm26
8381 ; AVX512DQ-BW-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm27 = [0,9,10,11,12,13,21,28,0,9,10,11,12,13,21,28]
8382 ; AVX512DQ-BW-FCP-NEXT: # zmm27 = mem[0,1,2,3,0,1,2,3]
8383 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm13, %zmm27, %zmm26
8384 ; AVX512DQ-BW-FCP-NEXT: vmovdqa32 %zmm26, %zmm23 {%k1}
8385 ; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm3, %zmm6, %zmm25
8386 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm5, %zmm27, %zmm25
8387 ; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm2, %zmm4, %zmm24
8388 ; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm0, %zmm1, %zmm22
8389 ; AVX512DQ-BW-FCP-NEXT: vmovdqa32 %zmm24, %zmm22 {%k2}
8390 ; AVX512DQ-BW-FCP-NEXT: vmovdqa32 %zmm25, %zmm22 {%k1}
8391 ; AVX512DQ-BW-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm26 = [0,19,26,1,8,15,0,0,0,19,26,1,8,15,0,0]
8392 ; AVX512DQ-BW-FCP-NEXT: # zmm26 = mem[0,1,2,3,0,1,2,3]
8393 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm15, %zmm25
8394 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm9, %zmm26, %zmm25
8395 ; AVX512DQ-BW-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm27 = [0,9,10,11,12,13,22,29,0,9,10,11,12,13,22,29]
8396 ; AVX512DQ-BW-FCP-NEXT: # zmm27 = mem[0,1,2,3,0,1,2,3]
8397 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm13, %zmm27, %zmm25
8398 ; AVX512DQ-BW-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm28 = [28,0,0,0,0,7,14,21,28,0,0,0,0,7,14,21]
8399 ; AVX512DQ-BW-FCP-NEXT: # zmm28 = mem[0,1,2,3,0,1,2,3]
8400 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm14, %zmm24
8401 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm12, %zmm28, %zmm24
8402 ; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm29 = [4,11,18,25]
8403 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm10, %zmm30
8404 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm11, %zmm29, %zmm30
8405 ; AVX512DQ-BW-FCP-NEXT: vinserti32x4 $0, %xmm30, %zmm24, %zmm24
8406 ; AVX512DQ-BW-FCP-NEXT: vmovdqa32 %zmm25, %zmm24 {%k1}
8407 ; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm3, %zmm6, %zmm26
8408 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm5, %zmm27, %zmm26
8409 ; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm2, %zmm4, %zmm28
8410 ; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm29
8411 ; AVX512DQ-BW-FCP-NEXT: vinserti32x4 $0, %xmm29, %zmm28, %zmm25
8412 ; AVX512DQ-BW-FCP-NEXT: vmovdqa32 %zmm26, %zmm25 {%k1}
8413 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm9, %zmm26
8414 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm15, %zmm18, %zmm26
8415 ; AVX512DQ-BW-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm27 = [0,9,10,11,12,16,23,30,0,9,10,11,12,16,23,30]
8416 ; AVX512DQ-BW-FCP-NEXT: # zmm27 = mem[0,1,2,3,0,1,2,3]
8417 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm13, %zmm27, %zmm26
8418 ; AVX512DQ-BW-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm28 = [29,0,0,0,1,8,15,22,29,0,0,0,1,8,15,22]
8419 ; AVX512DQ-BW-FCP-NEXT: # zmm28 = mem[0,1,2,3,0,1,2,3]
8420 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm14, %zmm29
8421 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm12, %zmm28, %zmm29
8422 ; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm30 = [5,12,19,26]
8423 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm10, %zmm31
8424 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm11, %zmm30, %zmm31
8425 ; AVX512DQ-BW-FCP-NEXT: vinserti32x4 $0, %xmm31, %zmm29, %zmm29
8426 ; AVX512DQ-BW-FCP-NEXT: vmovdqa32 %zmm26, %zmm29 {%k1}
8427 ; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm6, %zmm3, %zmm18
8428 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm5, %zmm27, %zmm18
8429 ; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm2, %zmm4, %zmm28
8430 ; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm30
8431 ; AVX512DQ-BW-FCP-NEXT: vinserti32x4 $0, %xmm30, %zmm28, %zmm26
8432 ; AVX512DQ-BW-FCP-NEXT: vmovdqa32 %zmm18, %zmm26 {%k1}
8433 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm15, %zmm21, %zmm9
8434 ; AVX512DQ-BW-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm15 = [0,9,10,11,12,17,24,31,0,9,10,11,12,17,24,31]
8435 ; AVX512DQ-BW-FCP-NEXT: # zmm15 = mem[0,1,2,3,0,1,2,3]
8436 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm13, %zmm15, %zmm9
8437 ; AVX512DQ-BW-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm13 = [14,0,0,0,18,25,0,7,14,0,0,0,18,25,0,7]
8438 ; AVX512DQ-BW-FCP-NEXT: # zmm13 = mem[0,1,2,3,0,1,2,3]
8439 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm14, %zmm13, %zmm12
8440 ; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm14 = [6,13,20,27]
8441 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm11, %zmm14, %zmm10
8442 ; AVX512DQ-BW-FCP-NEXT: vinserti32x4 $0, %xmm10, %zmm12, %zmm10
8443 ; AVX512DQ-BW-FCP-NEXT: vmovdqa32 %zmm9, %zmm10 {%k1}
8444 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm6, %zmm21, %zmm3
8445 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm5, %zmm15, %zmm3
8446 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm4, %zmm13, %zmm2
8447 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm1, %zmm14, %zmm0
8448 ; AVX512DQ-BW-FCP-NEXT: vinserti32x4 $0, %xmm0, %zmm2, %zmm0
8449 ; AVX512DQ-BW-FCP-NEXT: vmovdqa32 %zmm3, %zmm0 {%k1}
8450 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm7, 64(%rsi)
8451 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm8, (%rsi)
8452 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm16, 64(%rdx)
8453 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm17, (%rdx)
8454 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm19, 64(%rcx)
8455 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm20, (%rcx)
8456 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm22, 64(%r8)
8457 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm23, (%r8)
8458 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm25, 64(%r9)
8459 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm24, (%r9)
8460 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm26, 64(%r10)
8461 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm29, (%r10)
8462 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm0, 64(%rax)
8463 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm10, (%rax)
8464 ; AVX512DQ-BW-FCP-NEXT: vzeroupper
8465 ; AVX512DQ-BW-FCP-NEXT: retq
8466 %wide.vec = load <224 x i32>, ptr %in.vec, align 64
8467 %strided.vec0 = shufflevector <224 x i32> %wide.vec, <224 x i32> poison, <32 x i32> <i32 0, i32 7, i32 14, i32 21, i32 28, i32 35, i32 42, i32 49, i32 56, i32 63, i32 70, i32 77, i32 84, i32 91, i32 98, i32 105, i32 112, i32 119, i32 126, i32 133, i32 140, i32 147, i32 154, i32 161, i32 168, i32 175, i32 182, i32 189, i32 196, i32 203, i32 210, i32 217>
8468 %strided.vec1 = shufflevector <224 x i32> %wide.vec, <224 x i32> poison, <32 x i32> <i32 1, i32 8, i32 15, i32 22, i32 29, i32 36, i32 43, i32 50, i32 57, i32 64, i32 71, i32 78, i32 85, i32 92, i32 99, i32 106, i32 113, i32 120, i32 127, i32 134, i32 141, i32 148, i32 155, i32 162, i32 169, i32 176, i32 183, i32 190, i32 197, i32 204, i32 211, i32 218>
8469 %strided.vec2 = shufflevector <224 x i32> %wide.vec, <224 x i32> poison, <32 x i32> <i32 2, i32 9, i32 16, i32 23, i32 30, i32 37, i32 44, i32 51, i32 58, i32 65, i32 72, i32 79, i32 86, i32 93, i32 100, i32 107, i32 114, i32 121, i32 128, i32 135, i32 142, i32 149, i32 156, i32 163, i32 170, i32 177, i32 184, i32 191, i32 198, i32 205, i32 212, i32 219>
8470 %strided.vec3 = shufflevector <224 x i32> %wide.vec, <224 x i32> poison, <32 x i32> <i32 3, i32 10, i32 17, i32 24, i32 31, i32 38, i32 45, i32 52, i32 59, i32 66, i32 73, i32 80, i32 87, i32 94, i32 101, i32 108, i32 115, i32 122, i32 129, i32 136, i32 143, i32 150, i32 157, i32 164, i32 171, i32 178, i32 185, i32 192, i32 199, i32 206, i32 213, i32 220>
8471 %strided.vec4 = shufflevector <224 x i32> %wide.vec, <224 x i32> poison, <32 x i32> <i32 4, i32 11, i32 18, i32 25, i32 32, i32 39, i32 46, i32 53, i32 60, i32 67, i32 74, i32 81, i32 88, i32 95, i32 102, i32 109, i32 116, i32 123, i32 130, i32 137, i32 144, i32 151, i32 158, i32 165, i32 172, i32 179, i32 186, i32 193, i32 200, i32 207, i32 214, i32 221>
8472 %strided.vec5 = shufflevector <224 x i32> %wide.vec, <224 x i32> poison, <32 x i32> <i32 5, i32 12, i32 19, i32 26, i32 33, i32 40, i32 47, i32 54, i32 61, i32 68, i32 75, i32 82, i32 89, i32 96, i32 103, i32 110, i32 117, i32 124, i32 131, i32 138, i32 145, i32 152, i32 159, i32 166, i32 173, i32 180, i32 187, i32 194, i32 201, i32 208, i32 215, i32 222>
8473 %strided.vec6 = shufflevector <224 x i32> %wide.vec, <224 x i32> poison, <32 x i32> <i32 6, i32 13, i32 20, i32 27, i32 34, i32 41, i32 48, i32 55, i32 62, i32 69, i32 76, i32 83, i32 90, i32 97, i32 104, i32 111, i32 118, i32 125, i32 132, i32 139, i32 146, i32 153, i32 160, i32 167, i32 174, i32 181, i32 188, i32 195, i32 202, i32 209, i32 216, i32 223>
8474 store <32 x i32> %strided.vec0, ptr %out.vec0, align 64
8475 store <32 x i32> %strided.vec1, ptr %out.vec1, align 64
8476 store <32 x i32> %strided.vec2, ptr %out.vec2, align 64
8477 store <32 x i32> %strided.vec3, ptr %out.vec3, align 64
8478 store <32 x i32> %strided.vec4, ptr %out.vec4, align 64
8479 store <32 x i32> %strided.vec5, ptr %out.vec5, align 64
8480 store <32 x i32> %strided.vec6, ptr %out.vec6, align 64
8484 define void @load_i32_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2, ptr %out.vec3, ptr %out.vec4, ptr %out.vec5, ptr %out.vec6) nounwind {
8485 ; SSE-LABEL: load_i32_stride7_vf64:
8487 ; SSE-NEXT: subq $2456, %rsp # imm = 0x998
8488 ; SSE-NEXT: movdqa 1088(%rdi), %xmm3
8489 ; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8490 ; SSE-NEXT: movdqa 1056(%rdi), %xmm4
8491 ; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8492 ; SSE-NEXT: movdqa 1008(%rdi), %xmm9
8493 ; SSE-NEXT: movdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8494 ; SSE-NEXT: movdqa 1024(%rdi), %xmm5
8495 ; SSE-NEXT: movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8496 ; SSE-NEXT: movdqa 640(%rdi), %xmm13
8497 ; SSE-NEXT: movdqa 608(%rdi), %xmm6
8498 ; SSE-NEXT: movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8499 ; SSE-NEXT: movdqa 560(%rdi), %xmm10
8500 ; SSE-NEXT: movdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8501 ; SSE-NEXT: movdqa 576(%rdi), %xmm7
8502 ; SSE-NEXT: movdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8503 ; SSE-NEXT: movdqa 192(%rdi), %xmm2
8504 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8505 ; SSE-NEXT: movdqa 160(%rdi), %xmm15
8506 ; SSE-NEXT: movdqa 112(%rdi), %xmm1
8507 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8508 ; SSE-NEXT: movdqa 128(%rdi), %xmm0
8509 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8510 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,3,3,3]
8511 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
8512 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm15[2,2,3,3]
8513 ; SSE-NEXT: movdqa %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8514 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
8515 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
8516 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8517 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm7[3,3,3,3]
8518 ; SSE-NEXT: movdqa %xmm10, %xmm1
8519 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
8520 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm6[2,2,3,3]
8521 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm13[0],xmm0[1],xmm13[1]
8522 ; SSE-NEXT: movdqa %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8523 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
8524 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8525 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm5[3,3,3,3]
8526 ; SSE-NEXT: movdqa %xmm9, %xmm1
8527 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
8528 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm4[2,2,3,3]
8529 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
8530 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
8531 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8532 ; SSE-NEXT: movdqa 1456(%rdi), %xmm1
8533 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8534 ; SSE-NEXT: movdqa 1472(%rdi), %xmm0
8535 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8536 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,3,3,3]
8537 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
8538 ; SSE-NEXT: movdqa 1536(%rdi), %xmm2
8539 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8540 ; SSE-NEXT: movdqa 1504(%rdi), %xmm0
8541 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8542 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,2,3,3]
8543 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
8544 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
8545 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8546 ; SSE-NEXT: movdqa (%rdi), %xmm1
8547 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8548 ; SSE-NEXT: movdqa 16(%rdi), %xmm0
8549 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8550 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,3,3,3]
8551 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
8552 ; SSE-NEXT: movdqa 80(%rdi), %xmm2
8553 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8554 ; SSE-NEXT: movdqa 48(%rdi), %xmm0
8555 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8556 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,2,3,3]
8557 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
8558 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
8559 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8560 ; SSE-NEXT: movdqa 448(%rdi), %xmm1
8561 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8562 ; SSE-NEXT: movdqa 464(%rdi), %xmm0
8563 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8564 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,3,3,3]
8565 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
8566 ; SSE-NEXT: movdqa 528(%rdi), %xmm2
8567 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8568 ; SSE-NEXT: movdqa 496(%rdi), %xmm0
8569 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8570 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,2,3,3]
8571 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
8572 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
8573 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8574 ; SSE-NEXT: movdqa 896(%rdi), %xmm1
8575 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8576 ; SSE-NEXT: movdqa 912(%rdi), %xmm0
8577 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8578 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,3,3,3]
8579 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
8580 ; SSE-NEXT: movdqa 976(%rdi), %xmm2
8581 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8582 ; SSE-NEXT: movdqa 944(%rdi), %xmm0
8583 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8584 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,2,3,3]
8585 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
8586 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
8587 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8588 ; SSE-NEXT: movdqa 1344(%rdi), %xmm1
8589 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8590 ; SSE-NEXT: movdqa 1360(%rdi), %xmm0
8591 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8592 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,3,3,3]
8593 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
8594 ; SSE-NEXT: movdqa 1424(%rdi), %xmm2
8595 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8596 ; SSE-NEXT: movdqa 1392(%rdi), %xmm0
8597 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8598 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,2,3,3]
8599 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
8600 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
8601 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8602 ; SSE-NEXT: movdqa 336(%rdi), %xmm12
8603 ; SSE-NEXT: movdqa 352(%rdi), %xmm0
8604 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8605 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,3,3,3]
8606 ; SSE-NEXT: movdqa %xmm12, %xmm5
8607 ; SSE-NEXT: movdqa %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8608 ; SSE-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1]
8609 ; SSE-NEXT: movdqa 416(%rdi), %xmm4
8610 ; SSE-NEXT: movdqa 384(%rdi), %xmm10
8611 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm10[2,2,3,3]
8612 ; SSE-NEXT: movdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8613 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
8614 ; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8615 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm5[0],xmm0[1]
8616 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8617 ; SSE-NEXT: movdqa 784(%rdi), %xmm6
8618 ; SSE-NEXT: movdqa %xmm6, (%rsp) # 16-byte Spill
8619 ; SSE-NEXT: movdqa 800(%rdi), %xmm0
8620 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8621 ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm0[3,3,3,3]
8622 ; SSE-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm5[0],xmm6[1],xmm5[1]
8623 ; SSE-NEXT: movdqa 864(%rdi), %xmm0
8624 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8625 ; SSE-NEXT: movdqa 832(%rdi), %xmm1
8626 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8627 ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm1[2,2,3,3]
8628 ; SSE-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1]
8629 ; SSE-NEXT: movsd {{.*#+}} xmm5 = xmm6[0],xmm5[1]
8630 ; SSE-NEXT: movapd %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8631 ; SSE-NEXT: movdqa 1232(%rdi), %xmm6
8632 ; SSE-NEXT: movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8633 ; SSE-NEXT: movdqa 1248(%rdi), %xmm0
8634 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8635 ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm0[3,3,3,3]
8636 ; SSE-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm5[0],xmm6[1],xmm5[1]
8637 ; SSE-NEXT: movdqa 1312(%rdi), %xmm0
8638 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8639 ; SSE-NEXT: movdqa 1280(%rdi), %xmm1
8640 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8641 ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm1[2,2,3,3]
8642 ; SSE-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1]
8643 ; SSE-NEXT: movsd {{.*#+}} xmm5 = xmm6[0],xmm5[1]
8644 ; SSE-NEXT: movapd %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8645 ; SSE-NEXT: movdqa 1680(%rdi), %xmm6
8646 ; SSE-NEXT: movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8647 ; SSE-NEXT: movdqa 1696(%rdi), %xmm0
8648 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8649 ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm0[3,3,3,3]
8650 ; SSE-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm5[0],xmm6[1],xmm5[1]
8651 ; SSE-NEXT: movdqa 1760(%rdi), %xmm1
8652 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8653 ; SSE-NEXT: movdqa 1728(%rdi), %xmm0
8654 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8655 ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm0[2,2,3,3]
8656 ; SSE-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm1[0],xmm5[1],xmm1[1]
8657 ; SSE-NEXT: movsd {{.*#+}} xmm5 = xmm6[0],xmm5[1]
8658 ; SSE-NEXT: movapd %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8659 ; SSE-NEXT: movdqa 224(%rdi), %xmm8
8660 ; SSE-NEXT: movdqa 240(%rdi), %xmm0
8661 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8662 ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm0[3,3,3,3]
8663 ; SSE-NEXT: movdqa %xmm8, %xmm6
8664 ; SSE-NEXT: movdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8665 ; SSE-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm5[0],xmm6[1],xmm5[1]
8666 ; SSE-NEXT: movdqa 304(%rdi), %xmm2
8667 ; SSE-NEXT: movdqa 272(%rdi), %xmm3
8668 ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm3[2,2,3,3]
8669 ; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8670 ; SSE-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm2[0],xmm5[1],xmm2[1]
8671 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8672 ; SSE-NEXT: movsd {{.*#+}} xmm5 = xmm6[0],xmm5[1]
8673 ; SSE-NEXT: movapd %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8674 ; SSE-NEXT: movdqa 672(%rdi), %xmm11
8675 ; SSE-NEXT: movdqa 688(%rdi), %xmm0
8676 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8677 ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm0[3,3,3,3]
8678 ; SSE-NEXT: movdqa %xmm11, %xmm7
8679 ; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm5[0],xmm7[1],xmm5[1]
8680 ; SSE-NEXT: movdqa 752(%rdi), %xmm14
8681 ; SSE-NEXT: movdqa 720(%rdi), %xmm1
8682 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,2,3,3]
8683 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8684 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm14[0],xmm0[1],xmm14[1]
8685 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm7[0],xmm0[1]
8686 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8687 ; SSE-NEXT: movdqa 1120(%rdi), %xmm9
8688 ; SSE-NEXT: movdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8689 ; SSE-NEXT: movdqa 1136(%rdi), %xmm0
8690 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8691 ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm0[3,3,3,3]
8692 ; SSE-NEXT: punpckldq {{.*#+}} xmm9 = xmm9[0],xmm7[0],xmm9[1],xmm7[1]
8693 ; SSE-NEXT: movdqa 1200(%rdi), %xmm5
8694 ; SSE-NEXT: movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8695 ; SSE-NEXT: movdqa 1168(%rdi), %xmm6
8696 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm6[2,2,3,3]
8697 ; SSE-NEXT: movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8698 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1]
8699 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm9[0],xmm0[1]
8700 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8701 ; SSE-NEXT: movdqa 1568(%rdi), %xmm9
8702 ; SSE-NEXT: movdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8703 ; SSE-NEXT: movdqa 1584(%rdi), %xmm0
8704 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8705 ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm0[3,3,3,3]
8706 ; SSE-NEXT: punpckldq {{.*#+}} xmm9 = xmm9[0],xmm7[0],xmm9[1],xmm7[1]
8707 ; SSE-NEXT: movdqa 1648(%rdi), %xmm5
8708 ; SSE-NEXT: movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8709 ; SSE-NEXT: movdqa 1616(%rdi), %xmm0
8710 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8711 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,2,3,3]
8712 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1]
8713 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm9[0],xmm0[1]
8714 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8715 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
8716 ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm0[2,2,2,2]
8717 ; SSE-NEXT: punpckhdq {{.*#+}} xmm15 = xmm15[2],xmm7[2],xmm15[3],xmm7[3]
8718 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload
8719 ; SSE-NEXT: # xmm7 = mem[1,1,1,1]
8720 ; SSE-NEXT: movdqa 144(%rdi), %xmm0
8721 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8722 ; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm0[0],xmm7[1],xmm0[1]
8723 ; SSE-NEXT: movsd {{.*#+}} xmm15 = xmm7[0],xmm15[1]
8724 ; SSE-NEXT: movapd %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8725 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
8726 ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm15[2,2,2,2]
8727 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
8728 ; SSE-NEXT: punpckhdq {{.*#+}} xmm9 = xmm9[2],xmm7[2],xmm9[3],xmm7[3]
8729 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
8730 ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm0[1,1,1,1]
8731 ; SSE-NEXT: movdqa 32(%rdi), %xmm5
8732 ; SSE-NEXT: movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8733 ; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm5[0],xmm7[1],xmm5[1]
8734 ; SSE-NEXT: movsd {{.*#+}} xmm9 = xmm7[0],xmm9[1]
8735 ; SSE-NEXT: movapd %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8736 ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm4[2,2,2,2]
8737 ; SSE-NEXT: punpckhdq {{.*#+}} xmm10 = xmm10[2],xmm7[2],xmm10[3],xmm7[3]
8738 ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm12[1,1,1,1]
8739 ; SSE-NEXT: movdqa 368(%rdi), %xmm4
8740 ; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8741 ; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm4[0],xmm7[1],xmm4[1]
8742 ; SSE-NEXT: movsd {{.*#+}} xmm10 = xmm7[0],xmm10[1]
8743 ; SSE-NEXT: movapd %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8744 ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm2[2,2,2,2]
8745 ; SSE-NEXT: punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm7[2],xmm3[3],xmm7[3]
8746 ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm8[1,1,1,1]
8747 ; SSE-NEXT: movdqa 256(%rdi), %xmm2
8748 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8749 ; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm2[0],xmm7[1],xmm2[1]
8750 ; SSE-NEXT: movsd {{.*#+}} xmm3 = xmm7[0],xmm3[1]
8751 ; SSE-NEXT: movapd %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8752 ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm13[2,2,2,2]
8753 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
8754 ; SSE-NEXT: punpckhdq {{.*#+}} xmm4 = xmm4[2],xmm7[2],xmm4[3],xmm7[3]
8755 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload
8756 ; SSE-NEXT: # xmm7 = mem[1,1,1,1]
8757 ; SSE-NEXT: movdqa 592(%rdi), %xmm2
8758 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8759 ; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm2[0],xmm7[1],xmm2[1]
8760 ; SSE-NEXT: movsd {{.*#+}} xmm4 = xmm7[0],xmm4[1]
8761 ; SSE-NEXT: movapd %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8762 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
8763 ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm4[2,2,2,2]
8764 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
8765 ; SSE-NEXT: punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm7[2],xmm2[3],xmm7[3]
8766 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
8767 ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm3[1,1,1,1]
8768 ; SSE-NEXT: movdqa 480(%rdi), %xmm5
8769 ; SSE-NEXT: movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8770 ; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm5[0],xmm7[1],xmm5[1]
8771 ; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm7[0],xmm2[1]
8772 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8773 ; SSE-NEXT: pshufd $170, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload
8774 ; SSE-NEXT: # xmm7 = mem[2,2,2,2]
8775 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
8776 ; SSE-NEXT: punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm7[2],xmm2[3],xmm7[3]
8777 ; SSE-NEXT: movdqa (%rsp), %xmm13 # 16-byte Reload
8778 ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm13[1,1,1,1]
8779 ; SSE-NEXT: movdqa 816(%rdi), %xmm5
8780 ; SSE-NEXT: movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8781 ; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm5[0],xmm7[1],xmm5[1]
8782 ; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm7[0],xmm2[1]
8783 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8784 ; SSE-NEXT: movdqa %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8785 ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm14[2,2,2,2]
8786 ; SSE-NEXT: movdqa %xmm1, %xmm2
8787 ; SSE-NEXT: punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm7[2],xmm2[3],xmm7[3]
8788 ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm11[1,1,1,1]
8789 ; SSE-NEXT: movdqa %xmm11, %xmm12
8790 ; SSE-NEXT: movdqa 704(%rdi), %xmm1
8791 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8792 ; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm1[0],xmm7[1],xmm1[1]
8793 ; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm7[0],xmm2[1]
8794 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8795 ; SSE-NEXT: pshufd $170, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload
8796 ; SSE-NEXT: # xmm7 = mem[2,2,2,2]
8797 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
8798 ; SSE-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm7[2],xmm1[3],xmm7[3]
8799 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload
8800 ; SSE-NEXT: # xmm7 = mem[1,1,1,1]
8801 ; SSE-NEXT: movdqa 1040(%rdi), %xmm2
8802 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8803 ; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm2[0],xmm7[1],xmm2[1]
8804 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm7[0],xmm1[1]
8805 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8806 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
8807 ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm5[2,2,2,2]
8808 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
8809 ; SSE-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm7[2],xmm1[3],xmm7[3]
8810 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload
8811 ; SSE-NEXT: # xmm7 = mem[1,1,1,1]
8812 ; SSE-NEXT: movdqa 928(%rdi), %xmm2
8813 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8814 ; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm2[0],xmm7[1],xmm2[1]
8815 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm7[0],xmm1[1]
8816 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8817 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
8818 ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm1[2,2,2,2]
8819 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
8820 ; SSE-NEXT: punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm7[2],xmm2[3],xmm7[3]
8821 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload
8822 ; SSE-NEXT: # xmm7 = mem[1,1,1,1]
8823 ; SSE-NEXT: movdqa 1264(%rdi), %xmm1
8824 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8825 ; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm1[0],xmm7[1],xmm1[1]
8826 ; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm7[0],xmm2[1]
8827 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8828 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
8829 ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm8[2,2,2,2]
8830 ; SSE-NEXT: movdqa %xmm6, %xmm2
8831 ; SSE-NEXT: punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm7[2],xmm2[3],xmm7[3]
8832 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
8833 ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm1[1,1,1,1]
8834 ; SSE-NEXT: movdqa 1152(%rdi), %xmm6
8835 ; SSE-NEXT: movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8836 ; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm6[0],xmm7[1],xmm6[1]
8837 ; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm7[0],xmm2[1]
8838 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8839 ; SSE-NEXT: pshufd $170, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload
8840 ; SSE-NEXT: # xmm7 = mem[2,2,2,2]
8841 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
8842 ; SSE-NEXT: punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm7[2],xmm2[3],xmm7[3]
8843 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload
8844 ; SSE-NEXT: # xmm7 = mem[1,1,1,1]
8845 ; SSE-NEXT: movdqa 1488(%rdi), %xmm6
8846 ; SSE-NEXT: movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8847 ; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm6[0],xmm7[1],xmm6[1]
8848 ; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm7[0],xmm2[1]
8849 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8850 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
8851 ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm10[2,2,2,2]
8852 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
8853 ; SSE-NEXT: punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm7[2],xmm2[3],xmm7[3]
8854 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload
8855 ; SSE-NEXT: # xmm7 = mem[1,1,1,1]
8856 ; SSE-NEXT: movdqa 1376(%rdi), %xmm6
8857 ; SSE-NEXT: movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8858 ; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm6[0],xmm7[1],xmm6[1]
8859 ; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm7[0],xmm2[1]
8860 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8861 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
8862 ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm11[2,2,2,2]
8863 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
8864 ; SSE-NEXT: punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm7[2],xmm2[3],xmm7[3]
8865 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload
8866 ; SSE-NEXT: # xmm7 = mem[1,1,1,1]
8867 ; SSE-NEXT: movdqa 1712(%rdi), %xmm6
8868 ; SSE-NEXT: movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8869 ; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm6[0],xmm7[1],xmm6[1]
8870 ; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm7[0],xmm2[1]
8871 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8872 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
8873 ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm6[2,2,2,2]
8874 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
8875 ; SSE-NEXT: punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm7[2],xmm2[3],xmm7[3]
8876 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload
8877 ; SSE-NEXT: # xmm7 = mem[1,1,1,1]
8878 ; SSE-NEXT: movdqa 1600(%rdi), %xmm9
8879 ; SSE-NEXT: movdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8880 ; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm9[0],xmm7[1],xmm9[1]
8881 ; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm7[0],xmm2[1]
8882 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8883 ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm0[2,3,2,3]
8884 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Folded Reload
8885 ; SSE-NEXT: # xmm9 = mem[1,1,1,1]
8886 ; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm9[0],xmm7[1],xmm9[1]
8887 ; SSE-NEXT: movdqa 64(%rdi), %xmm0
8888 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8889 ; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm15[2,3,2,3]
8890 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,0,1,1]
8891 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm9[0],xmm2[1],xmm9[1]
8892 ; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm7[0],xmm2[1]
8893 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8894 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
8895 ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm2[2,3,2,3]
8896 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Folded Reload
8897 ; SSE-NEXT: # xmm9 = mem[1,1,1,1]
8898 ; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm9[0],xmm7[1],xmm9[1]
8899 ; SSE-NEXT: movdqa 176(%rdi), %xmm0
8900 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8901 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Folded Reload
8902 ; SSE-NEXT: # xmm9 = mem[2,3,2,3]
8903 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
8904 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm9[0],xmm0[1],xmm9[1]
8905 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm7[0],xmm0[1]
8906 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8907 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload
8908 ; SSE-NEXT: # xmm7 = mem[2,3,2,3]
8909 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Folded Reload
8910 ; SSE-NEXT: # xmm9 = mem[1,1,1,1]
8911 ; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm9[0],xmm7[1],xmm9[1]
8912 ; SSE-NEXT: movdqa 288(%rdi), %xmm0
8913 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8914 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Folded Reload
8915 ; SSE-NEXT: # xmm9 = mem[2,3,2,3]
8916 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
8917 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm9[0],xmm0[1],xmm9[1]
8918 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm7[0],xmm0[1]
8919 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8920 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload
8921 ; SSE-NEXT: # xmm7 = mem[2,3,2,3]
8922 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Folded Reload
8923 ; SSE-NEXT: # xmm9 = mem[1,1,1,1]
8924 ; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm9[0],xmm7[1],xmm9[1]
8925 ; SSE-NEXT: movdqa 400(%rdi), %xmm0
8926 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8927 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Folded Reload
8928 ; SSE-NEXT: # xmm9 = mem[2,3,2,3]
8929 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
8930 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm9[0],xmm0[1],xmm9[1]
8931 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm7[0],xmm0[1]
8932 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8933 ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm3[2,3,2,3]
8934 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Folded Reload
8935 ; SSE-NEXT: # xmm9 = mem[1,1,1,1]
8936 ; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm9[0],xmm7[1],xmm9[1]
8937 ; SSE-NEXT: movdqa 512(%rdi), %xmm0
8938 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8939 ; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm4[2,3,2,3]
8940 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
8941 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm9[0],xmm0[1],xmm9[1]
8942 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm7[0],xmm0[1]
8943 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8944 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload
8945 ; SSE-NEXT: # xmm7 = mem[2,3,2,3]
8946 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Folded Reload
8947 ; SSE-NEXT: # xmm9 = mem[1,1,1,1]
8948 ; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm9[0],xmm7[1],xmm9[1]
8949 ; SSE-NEXT: movdqa 624(%rdi), %xmm0
8950 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8951 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Folded Reload
8952 ; SSE-NEXT: # xmm9 = mem[2,3,2,3]
8953 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
8954 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm9[0],xmm0[1],xmm9[1]
8955 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm7[0],xmm0[1]
8956 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8957 ; SSE-NEXT: movdqa %xmm12, %xmm15
8958 ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm12[2,3,2,3]
8959 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Folded Reload
8960 ; SSE-NEXT: # xmm9 = mem[1,1,1,1]
8961 ; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm9[0],xmm7[1],xmm9[1]
8962 ; SSE-NEXT: movdqa 736(%rdi), %xmm0
8963 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8964 ; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm14[2,3,2,3]
8965 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
8966 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm9[0],xmm0[1],xmm9[1]
8967 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm7[0],xmm0[1]
8968 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8969 ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm13[2,3,2,3]
8970 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
8971 ; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm12[1,1,1,1]
8972 ; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm9[0],xmm7[1],xmm9[1]
8973 ; SSE-NEXT: movdqa 848(%rdi), %xmm0
8974 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8975 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Folded Reload
8976 ; SSE-NEXT: # xmm9 = mem[2,3,2,3]
8977 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
8978 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm9[0],xmm0[1],xmm9[1]
8979 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm7[0],xmm0[1]
8980 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8981 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload
8982 ; SSE-NEXT: # xmm7 = mem[2,3,2,3]
8983 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Folded Reload
8984 ; SSE-NEXT: # xmm9 = mem[1,1,1,1]
8985 ; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm9[0],xmm7[1],xmm9[1]
8986 ; SSE-NEXT: movdqa 960(%rdi), %xmm0
8987 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8988 ; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm5[2,3,2,3]
8989 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
8990 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm9[0],xmm0[1],xmm9[1]
8991 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm7[0],xmm0[1]
8992 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8993 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload
8994 ; SSE-NEXT: # xmm7 = mem[2,3,2,3]
8995 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
8996 ; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm14[1,1,1,1]
8997 ; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm9[0],xmm7[1],xmm9[1]
8998 ; SSE-NEXT: movdqa 1072(%rdi), %xmm0
8999 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9000 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Folded Reload
9001 ; SSE-NEXT: # xmm9 = mem[2,3,2,3]
9002 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
9003 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm9[0],xmm0[1],xmm9[1]
9004 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm7[0],xmm0[1]
9005 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9006 ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm1[2,3,2,3]
9007 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
9008 ; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm3[1,1,1,1]
9009 ; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm9[0],xmm7[1],xmm9[1]
9010 ; SSE-NEXT: movdqa 1184(%rdi), %xmm0
9011 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9012 ; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm8[2,3,2,3]
9013 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
9014 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm9[0],xmm0[1],xmm9[1]
9015 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm7[0],xmm0[1]
9016 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9017 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
9018 ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm8[2,3,2,3]
9019 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Folded Reload
9020 ; SSE-NEXT: # xmm9 = mem[1,1,1,1]
9021 ; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm9[0],xmm7[1],xmm9[1]
9022 ; SSE-NEXT: movdqa 1296(%rdi), %xmm0
9023 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9024 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Folded Reload
9025 ; SSE-NEXT: # xmm9 = mem[2,3,2,3]
9026 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
9027 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm9[0],xmm0[1],xmm9[1]
9028 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm7[0],xmm0[1]
9029 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9030 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload
9031 ; SSE-NEXT: # xmm7 = mem[2,3,2,3]
9032 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Folded Reload
9033 ; SSE-NEXT: # xmm9 = mem[1,1,1,1]
9034 ; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm9[0],xmm7[1],xmm9[1]
9035 ; SSE-NEXT: movdqa 1408(%rdi), %xmm0
9036 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9037 ; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm10[2,3,2,3]
9038 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
9039 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm9[0],xmm0[1],xmm9[1]
9040 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm7[0],xmm0[1]
9041 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9042 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload
9043 ; SSE-NEXT: # xmm7 = mem[2,3,2,3]
9044 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Folded Reload
9045 ; SSE-NEXT: # xmm9 = mem[1,1,1,1]
9046 ; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm9[0],xmm7[1],xmm9[1]
9047 ; SSE-NEXT: movdqa 1520(%rdi), %xmm0
9048 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9049 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Folded Reload
9050 ; SSE-NEXT: # xmm9 = mem[2,3,2,3]
9051 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
9052 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm9[0],xmm0[1],xmm9[1]
9053 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm7[0],xmm0[1]
9054 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9055 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload
9056 ; SSE-NEXT: # xmm7 = mem[2,3,2,3]
9057 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Folded Reload
9058 ; SSE-NEXT: # xmm9 = mem[1,1,1,1]
9059 ; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm9[0],xmm7[1],xmm9[1]
9060 ; SSE-NEXT: movdqa 1632(%rdi), %xmm0
9061 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9062 ; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm6[2,3,2,3]
9063 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
9064 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm9[0],xmm0[1],xmm9[1]
9065 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm7[0],xmm0[1]
9066 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9067 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload
9068 ; SSE-NEXT: # xmm7 = mem[2,3,2,3]
9069 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Folded Reload
9070 ; SSE-NEXT: # xmm9 = mem[1,1,1,1]
9071 ; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm9[0],xmm7[1],xmm9[1]
9072 ; SSE-NEXT: movdqa 1744(%rdi), %xmm0
9073 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9074 ; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm11[2,3,2,3]
9075 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
9076 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm9[0],xmm0[1],xmm9[1]
9077 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm7[0],xmm0[1]
9078 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9079 ; SSE-NEXT: movdqa 96(%rdi), %xmm0
9080 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9081 ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm0[0,0,1,1]
9082 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
9083 ; SSE-NEXT: movdqa %xmm5, %xmm0
9084 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm7[0],xmm0[1],xmm7[1]
9085 ; SSE-NEXT: pshufd $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
9086 ; SSE-NEXT: # xmm4 = mem[2,2,3,3]
9087 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
9088 ; SSE-NEXT: punpckhdq {{.*#+}} xmm4 = xmm4[2],xmm7[2],xmm4[3],xmm7[3]
9089 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm4[0],xmm0[1]
9090 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9091 ; SSE-NEXT: movdqa 208(%rdi), %xmm0
9092 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9093 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm0[0,0,1,1]
9094 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9095 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
9096 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,2,3,3]
9097 ; SSE-NEXT: punpckhdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
9098 ; SSE-NEXT: # xmm2 = xmm2[2],mem[2],xmm2[3],mem[3]
9099 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm2[0],xmm0[1]
9100 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9101 ; SSE-NEXT: movdqa 320(%rdi), %xmm0
9102 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9103 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,0,1,1]
9104 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
9105 ; SSE-NEXT: movdqa %xmm13, %xmm0
9106 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
9107 ; SSE-NEXT: pshufd $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
9108 ; SSE-NEXT: # xmm2 = mem[2,2,3,3]
9109 ; SSE-NEXT: punpckhdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
9110 ; SSE-NEXT: # xmm2 = xmm2[2],mem[2],xmm2[3],mem[3]
9111 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm2[0],xmm0[1]
9112 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9113 ; SSE-NEXT: movdqa 432(%rdi), %xmm0
9114 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9115 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,0,1,1]
9116 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
9117 ; SSE-NEXT: movdqa %xmm11, %xmm0
9118 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
9119 ; SSE-NEXT: pshufd $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
9120 ; SSE-NEXT: # xmm1 = mem[2,2,3,3]
9121 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
9122 ; SSE-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm10[2],xmm1[3],xmm10[3]
9123 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
9124 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9125 ; SSE-NEXT: movdqa 544(%rdi), %xmm0
9126 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9127 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,1,1]
9128 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9129 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
9130 ; SSE-NEXT: pshufd $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
9131 ; SSE-NEXT: # xmm1 = mem[2,2,3,3]
9132 ; SSE-NEXT: punpckhdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
9133 ; SSE-NEXT: # xmm1 = xmm1[2],mem[2],xmm1[3],mem[3]
9134 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
9135 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9136 ; SSE-NEXT: movdqa 656(%rdi), %xmm0
9137 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9138 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,1,1]
9139 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9140 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
9141 ; SSE-NEXT: pshufd $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
9142 ; SSE-NEXT: # xmm1 = mem[2,2,3,3]
9143 ; SSE-NEXT: punpckhdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
9144 ; SSE-NEXT: # xmm1 = xmm1[2],mem[2],xmm1[3],mem[3]
9145 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
9146 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9147 ; SSE-NEXT: movdqa 768(%rdi), %xmm0
9148 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9149 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,1,1]
9150 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9151 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
9152 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm15[2,2,3,3]
9153 ; SSE-NEXT: punpckhdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
9154 ; SSE-NEXT: # xmm1 = xmm1[2],mem[2],xmm1[3],mem[3]
9155 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
9156 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9157 ; SSE-NEXT: movdqa 880(%rdi), %xmm0
9158 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9159 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,1,1]
9160 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
9161 ; SSE-NEXT: movdqa %xmm15, %xmm2
9162 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
9163 ; SSE-NEXT: pshufd $250, (%rsp), %xmm0 # 16-byte Folded Reload
9164 ; SSE-NEXT: # xmm0 = mem[2,2,3,3]
9165 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm12[2],xmm0[3],xmm12[3]
9166 ; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm0[0],xmm2[1]
9167 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9168 ; SSE-NEXT: movdqa 992(%rdi), %xmm0
9169 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9170 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
9171 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
9172 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
9173 ; SSE-NEXT: pshufd $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
9174 ; SSE-NEXT: # xmm0 = mem[2,2,3,3]
9175 ; SSE-NEXT: punpckhdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
9176 ; SSE-NEXT: # xmm0 = xmm0[2],mem[2],xmm0[3],mem[3]
9177 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
9178 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9179 ; SSE-NEXT: movdqa 1104(%rdi), %xmm0
9180 ; SSE-NEXT: movdqa %xmm0, (%rsp) # 16-byte Spill
9181 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
9182 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
9183 ; SSE-NEXT: movdqa %xmm12, %xmm1
9184 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
9185 ; SSE-NEXT: pshufd $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
9186 ; SSE-NEXT: # xmm0 = mem[2,2,3,3]
9187 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm14[2],xmm0[3],xmm14[3]
9188 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
9189 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9190 ; SSE-NEXT: movdqa 1216(%rdi), %xmm0
9191 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9192 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
9193 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
9194 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
9195 ; SSE-NEXT: pshufd $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
9196 ; SSE-NEXT: # xmm0 = mem[2,2,3,3]
9197 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm3[2],xmm0[3],xmm3[3]
9198 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
9199 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9200 ; SSE-NEXT: movdqa 1328(%rdi), %xmm0
9201 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9202 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
9203 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
9204 ; SSE-NEXT: movdqa %xmm14, %xmm1
9205 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
9206 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm8[2,2,3,3]
9207 ; SSE-NEXT: punpckhdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
9208 ; SSE-NEXT: # xmm0 = xmm0[2],mem[2],xmm0[3],mem[3]
9209 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
9210 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9211 ; SSE-NEXT: movdqa 1440(%rdi), %xmm0
9212 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9213 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
9214 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
9215 ; SSE-NEXT: movdqa %xmm9, %xmm1
9216 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
9217 ; SSE-NEXT: pshufd $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
9218 ; SSE-NEXT: # xmm0 = mem[2,2,3,3]
9219 ; SSE-NEXT: punpckhdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
9220 ; SSE-NEXT: # xmm0 = xmm0[2],mem[2],xmm0[3],mem[3]
9221 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
9222 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9223 ; SSE-NEXT: movdqa 1552(%rdi), %xmm0
9224 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9225 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
9226 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
9227 ; SSE-NEXT: movdqa %xmm6, %xmm1
9228 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
9229 ; SSE-NEXT: pshufd $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
9230 ; SSE-NEXT: # xmm0 = mem[2,2,3,3]
9231 ; SSE-NEXT: punpckhdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
9232 ; SSE-NEXT: # xmm0 = xmm0[2],mem[2],xmm0[3],mem[3]
9233 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
9234 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9235 ; SSE-NEXT: movdqa 1664(%rdi), %xmm0
9236 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9237 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
9238 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
9239 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
9240 ; SSE-NEXT: pshufd $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
9241 ; SSE-NEXT: # xmm0 = mem[2,2,3,3]
9242 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
9243 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
9244 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
9245 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9246 ; SSE-NEXT: movdqa 1776(%rdi), %xmm0
9247 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9248 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
9249 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
9250 ; SSE-NEXT: movdqa %xmm4, %xmm1
9251 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
9252 ; SSE-NEXT: pshufd $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
9253 ; SSE-NEXT: # xmm0 = mem[2,2,3,3]
9254 ; SSE-NEXT: punpckhdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
9255 ; SSE-NEXT: # xmm0 = xmm0[2],mem[2],xmm0[3],mem[3]
9256 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
9257 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9258 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm7[3,3,3,3]
9259 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
9260 ; SSE-NEXT: movdqa %xmm7, %xmm1
9261 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
9262 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm5[2,2,3,3]
9263 ; SSE-NEXT: punpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
9264 ; SSE-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
9265 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
9266 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9267 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
9268 ; SSE-NEXT: # xmm0 = mem[3,3,3,3]
9269 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
9270 ; SSE-NEXT: movdqa %xmm5, %xmm1
9271 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
9272 ; SSE-NEXT: pshufd $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
9273 ; SSE-NEXT: # xmm0 = mem[2,2,3,3]
9274 ; SSE-NEXT: punpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
9275 ; SSE-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
9276 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
9277 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9278 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
9279 ; SSE-NEXT: # xmm0 = mem[3,3,3,3]
9280 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
9281 ; SSE-NEXT: movdqa %xmm8, %xmm1
9282 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
9283 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm13[2,2,3,3]
9284 ; SSE-NEXT: punpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
9285 ; SSE-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
9286 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
9287 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9288 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm10[3,3,3,3]
9289 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
9290 ; SSE-NEXT: movdqa %xmm10, %xmm1
9291 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
9292 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm11[2,2,3,3]
9293 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
9294 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
9295 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
9296 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9297 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
9298 ; SSE-NEXT: # xmm0 = mem[3,3,3,3]
9299 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
9300 ; SSE-NEXT: movdqa %xmm11, %xmm1
9301 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
9302 ; SSE-NEXT: pshufd $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
9303 ; SSE-NEXT: # xmm0 = mem[2,2,3,3]
9304 ; SSE-NEXT: punpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
9305 ; SSE-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
9306 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
9307 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9308 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
9309 ; SSE-NEXT: # xmm0 = mem[3,3,3,3]
9310 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
9311 ; SSE-NEXT: movdqa %xmm13, %xmm1
9312 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
9313 ; SSE-NEXT: pshufd $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
9314 ; SSE-NEXT: # xmm0 = mem[2,2,3,3]
9315 ; SSE-NEXT: punpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
9316 ; SSE-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
9317 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
9318 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9319 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
9320 ; SSE-NEXT: # xmm0 = mem[3,3,3,3]
9321 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
9322 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
9323 ; SSE-NEXT: pshufd $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
9324 ; SSE-NEXT: # xmm0 = mem[2,2,3,3]
9325 ; SSE-NEXT: punpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
9326 ; SSE-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
9327 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
9328 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9329 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
9330 ; SSE-NEXT: # xmm0 = mem[3,3,3,3]
9331 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
9332 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
9333 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm15[2,2,3,3]
9334 ; SSE-NEXT: punpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
9335 ; SSE-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
9336 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
9337 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9338 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
9339 ; SSE-NEXT: # xmm0 = mem[3,3,3,3]
9340 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
9341 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
9342 ; SSE-NEXT: pshufd $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Folded Reload
9343 ; SSE-NEXT: # xmm15 = mem[2,2,3,3]
9344 ; SSE-NEXT: punpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Folded Reload
9345 ; SSE-NEXT: # xmm15 = xmm15[0],mem[0],xmm15[1],mem[1]
9346 ; SSE-NEXT: movsd {{.*#+}} xmm15 = xmm1[0],xmm15[1]
9347 ; SSE-NEXT: movapd %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9348 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
9349 ; SSE-NEXT: # xmm0 = mem[3,3,3,3]
9350 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
9351 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
9352 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm12[2,2,3,3]
9353 ; SSE-NEXT: movdqa (%rsp), %xmm15 # 16-byte Reload
9354 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm15[0],xmm0[1],xmm15[1]
9355 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
9356 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9357 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
9358 ; SSE-NEXT: # xmm0 = mem[3,3,3,3]
9359 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
9360 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
9361 ; SSE-NEXT: pshufd $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
9362 ; SSE-NEXT: # xmm0 = mem[2,2,3,3]
9363 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
9364 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm12[0],xmm0[1],xmm12[1]
9365 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
9366 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9367 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
9368 ; SSE-NEXT: # xmm0 = mem[3,3,3,3]
9369 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
9370 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
9371 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm14[2,2,3,3]
9372 ; SSE-NEXT: punpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
9373 ; SSE-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
9374 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
9375 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9376 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
9377 ; SSE-NEXT: # xmm0 = mem[3,3,3,3]
9378 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
9379 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
9380 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm9[2,2,3,3]
9381 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
9382 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm9[0],xmm0[1],xmm9[1]
9383 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
9384 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9385 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
9386 ; SSE-NEXT: # xmm0 = mem[3,3,3,3]
9387 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
9388 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
9389 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm6[2,2,3,3]
9390 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
9391 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm14[0],xmm0[1],xmm14[1]
9392 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
9393 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9394 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm2[3,3,3,3]
9395 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
9396 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
9397 ; SSE-NEXT: pshufd $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
9398 ; SSE-NEXT: # xmm0 = mem[2,2,3,3]
9399 ; SSE-NEXT: punpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
9400 ; SSE-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
9401 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
9402 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9403 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
9404 ; SSE-NEXT: # xmm2 = mem[3,3,3,3]
9405 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
9406 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
9407 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm4[2,2,3,3]
9408 ; SSE-NEXT: punpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
9409 ; SSE-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
9410 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
9411 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9412 ; SSE-NEXT: pshufd $170, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
9413 ; SSE-NEXT: # xmm0 = mem[2,2,2,2]
9414 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
9415 ; SSE-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm0[2],xmm1[3],xmm0[3]
9416 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm7[1,1,1,1]
9417 ; SSE-NEXT: punpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
9418 ; SSE-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
9419 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
9420 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9421 ; SSE-NEXT: pshufd $170, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
9422 ; SSE-NEXT: # xmm0 = mem[2,2,2,2]
9423 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
9424 ; SSE-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm0[2],xmm1[3],xmm0[3]
9425 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm5[1,1,1,1]
9426 ; SSE-NEXT: punpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
9427 ; SSE-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
9428 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
9429 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9430 ; SSE-NEXT: pshufd $170, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
9431 ; SSE-NEXT: # xmm0 = mem[2,2,2,2]
9432 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
9433 ; SSE-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm0[2],xmm1[3],xmm0[3]
9434 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm8[1,1,1,1]
9435 ; SSE-NEXT: punpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
9436 ; SSE-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
9437 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
9438 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9439 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm3[2,2,2,2]
9440 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
9441 ; SSE-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm0[2],xmm1[3],xmm0[3]
9442 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm10[1,1,1,1]
9443 ; SSE-NEXT: punpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
9444 ; SSE-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
9445 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
9446 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9447 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
9448 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm10[2,2,2,2]
9449 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
9450 ; SSE-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm0[2],xmm1[3],xmm0[3]
9451 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm11[1,1,1,1]
9452 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
9453 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1]
9454 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
9455 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9456 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
9457 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm11[2,2,2,2]
9458 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
9459 ; SSE-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm0[2],xmm1[3],xmm0[3]
9460 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm13[1,1,1,1]
9461 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
9462 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm7[0],xmm0[1],xmm7[1]
9463 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
9464 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9465 ; SSE-NEXT: pshufd $170, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
9466 ; SSE-NEXT: # xmm0 = mem[2,2,2,2]
9467 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
9468 ; SSE-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm0[2],xmm1[3],xmm0[3]
9469 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
9470 ; SSE-NEXT: # xmm0 = mem[1,1,1,1]
9471 ; SSE-NEXT: punpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
9472 ; SSE-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
9473 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
9474 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9475 ; SSE-NEXT: pshufd $170, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
9476 ; SSE-NEXT: # xmm0 = mem[2,2,2,2]
9477 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
9478 ; SSE-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm0[2],xmm1[3],xmm0[3]
9479 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
9480 ; SSE-NEXT: # xmm0 = mem[1,1,1,1]
9481 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
9482 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm8[0],xmm0[1],xmm8[1]
9483 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
9484 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9485 ; SSE-NEXT: pshufd $170, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
9486 ; SSE-NEXT: # xmm0 = mem[2,2,2,2]
9487 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
9488 ; SSE-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm0[2],xmm1[3],xmm0[3]
9489 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
9490 ; SSE-NEXT: # xmm0 = mem[1,1,1,1]
9491 ; SSE-NEXT: punpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
9492 ; SSE-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
9493 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
9494 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9495 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm15[2,2,2,2]
9496 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
9497 ; SSE-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm0[2],xmm1[3],xmm0[3]
9498 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
9499 ; SSE-NEXT: # xmm0 = mem[1,1,1,1]
9500 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
9501 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1]
9502 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
9503 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9504 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm12[2,2,2,2]
9505 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
9506 ; SSE-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm0[2],xmm1[3],xmm0[3]
9507 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
9508 ; SSE-NEXT: # xmm0 = mem[1,1,1,1]
9509 ; SSE-NEXT: punpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
9510 ; SSE-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
9511 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
9512 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9513 ; SSE-NEXT: pshufd $170, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
9514 ; SSE-NEXT: # xmm0 = mem[2,2,2,2]
9515 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
9516 ; SSE-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm0[2],xmm1[3],xmm0[3]
9517 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
9518 ; SSE-NEXT: # xmm0 = mem[1,1,1,1]
9519 ; SSE-NEXT: punpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
9520 ; SSE-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
9521 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
9522 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9523 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm9[2,2,2,2]
9524 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
9525 ; SSE-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm0[2],xmm1[3],xmm0[3]
9526 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
9527 ; SSE-NEXT: # xmm0 = mem[1,1,1,1]
9528 ; SSE-NEXT: punpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
9529 ; SSE-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
9530 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
9531 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9532 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm14[2,2,2,2]
9533 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
9534 ; SSE-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm0[2],xmm1[3],xmm0[3]
9535 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
9536 ; SSE-NEXT: # xmm0 = mem[1,1,1,1]
9537 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
9538 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
9539 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
9540 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9541 ; SSE-NEXT: pshufd $170, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
9542 ; SSE-NEXT: # xmm0 = mem[2,2,2,2]
9543 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
9544 ; SSE-NEXT: punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm0[2],xmm2[3],xmm0[3]
9545 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
9546 ; SSE-NEXT: # xmm0 = mem[1,1,1,1]
9547 ; SSE-NEXT: punpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
9548 ; SSE-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
9549 ; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm0[0],xmm2[1]
9550 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9551 ; SSE-NEXT: pshufd $170, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
9552 ; SSE-NEXT: # xmm0 = mem[2,2,2,2]
9553 ; SSE-NEXT: punpckhdq {{.*#+}} xmm4 = xmm4[2],xmm0[2],xmm4[3],xmm0[3]
9554 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
9555 ; SSE-NEXT: # xmm0 = mem[1,1,1,1]
9556 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
9557 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
9558 ; SSE-NEXT: movsd {{.*#+}} xmm4 = xmm0[0],xmm4[1]
9559 ; SSE-NEXT: movapd %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9560 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
9561 ; SSE-NEXT: # xmm0 = mem[1,1,1,1]
9562 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
9563 ; SSE-NEXT: # xmm1 = mem[2,3,2,3]
9564 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
9565 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
9566 ; SSE-NEXT: # xmm0 = mem[2,3,2,3]
9567 ; SSE-NEXT: pshufd $80, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
9568 ; SSE-NEXT: # xmm4 = mem[0,0,1,1]
9569 ; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1]
9570 ; SSE-NEXT: movsd {{.*#+}} xmm4 = xmm1[0],xmm4[1]
9571 ; SSE-NEXT: movapd %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9572 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
9573 ; SSE-NEXT: # xmm0 = mem[1,1,1,1]
9574 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
9575 ; SSE-NEXT: # xmm1 = mem[2,3,2,3]
9576 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
9577 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
9578 ; SSE-NEXT: # xmm0 = mem[2,3,2,3]
9579 ; SSE-NEXT: pshufd $80, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
9580 ; SSE-NEXT: # xmm4 = mem[0,0,1,1]
9581 ; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1]
9582 ; SSE-NEXT: movsd {{.*#+}} xmm4 = xmm1[0],xmm4[1]
9583 ; SSE-NEXT: movapd %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9584 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
9585 ; SSE-NEXT: # xmm0 = mem[1,1,1,1]
9586 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
9587 ; SSE-NEXT: # xmm1 = mem[2,3,2,3]
9588 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
9589 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
9590 ; SSE-NEXT: # xmm0 = mem[2,3,2,3]
9591 ; SSE-NEXT: pshufd $80, {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Folded Reload
9592 ; SSE-NEXT: # xmm15 = mem[0,0,1,1]
9593 ; SSE-NEXT: punpckldq {{.*#+}} xmm15 = xmm15[0],xmm0[0],xmm15[1],xmm0[1]
9594 ; SSE-NEXT: movsd {{.*#+}} xmm15 = xmm1[0],xmm15[1]
9595 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
9596 ; SSE-NEXT: # xmm0 = mem[1,1,1,1]
9597 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
9598 ; SSE-NEXT: # xmm1 = mem[2,3,2,3]
9599 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
9600 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
9601 ; SSE-NEXT: # xmm0 = mem[2,3,2,3]
9602 ; SSE-NEXT: pshufd $80, {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Folded Reload
9603 ; SSE-NEXT: # xmm14 = mem[0,0,1,1]
9604 ; SSE-NEXT: punpckldq {{.*#+}} xmm14 = xmm14[0],xmm0[0],xmm14[1],xmm0[1]
9605 ; SSE-NEXT: movsd {{.*#+}} xmm14 = xmm1[0],xmm14[1]
9606 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm6[1,1,1,1]
9607 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
9608 ; SSE-NEXT: # xmm1 = mem[2,3,2,3]
9609 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
9610 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm10[2,3,2,3]
9611 ; SSE-NEXT: pshufd $80, {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Folded Reload
9612 ; SSE-NEXT: # xmm13 = mem[0,0,1,1]
9613 ; SSE-NEXT: punpckldq {{.*#+}} xmm13 = xmm13[0],xmm0[0],xmm13[1],xmm0[1]
9614 ; SSE-NEXT: movsd {{.*#+}} xmm13 = xmm1[0],xmm13[1]
9615 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm7[1,1,1,1]
9616 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
9617 ; SSE-NEXT: # xmm1 = mem[2,3,2,3]
9618 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
9619 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm11[2,3,2,3]
9620 ; SSE-NEXT: pshufd $80, {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Folded Reload
9621 ; SSE-NEXT: # xmm12 = mem[0,0,1,1]
9622 ; SSE-NEXT: punpckldq {{.*#+}} xmm12 = xmm12[0],xmm0[0],xmm12[1],xmm0[1]
9623 ; SSE-NEXT: movsd {{.*#+}} xmm12 = xmm1[0],xmm12[1]
9624 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
9625 ; SSE-NEXT: # xmm0 = mem[1,1,1,1]
9626 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
9627 ; SSE-NEXT: # xmm1 = mem[2,3,2,3]
9628 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
9629 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
9630 ; SSE-NEXT: # xmm0 = mem[2,3,2,3]
9631 ; SSE-NEXT: pshufd $80, {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Folded Reload
9632 ; SSE-NEXT: # xmm11 = mem[0,0,1,1]
9633 ; SSE-NEXT: punpckldq {{.*#+}} xmm11 = xmm11[0],xmm0[0],xmm11[1],xmm0[1]
9634 ; SSE-NEXT: movsd {{.*#+}} xmm11 = xmm1[0],xmm11[1]
9635 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm8[1,1,1,1]
9636 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
9637 ; SSE-NEXT: # xmm1 = mem[2,3,2,3]
9638 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
9639 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
9640 ; SSE-NEXT: # xmm0 = mem[2,3,2,3]
9641 ; SSE-NEXT: pshufd $80, {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Folded Reload
9642 ; SSE-NEXT: # xmm10 = mem[0,0,1,1]
9643 ; SSE-NEXT: punpckldq {{.*#+}} xmm10 = xmm10[0],xmm0[0],xmm10[1],xmm0[1]
9644 ; SSE-NEXT: movsd {{.*#+}} xmm10 = xmm1[0],xmm10[1]
9645 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
9646 ; SSE-NEXT: # xmm0 = mem[1,1,1,1]
9647 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
9648 ; SSE-NEXT: # xmm1 = mem[2,3,2,3]
9649 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
9650 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
9651 ; SSE-NEXT: # xmm0 = mem[2,3,2,3]
9652 ; SSE-NEXT: pshufd $80, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Folded Reload
9653 ; SSE-NEXT: # xmm9 = mem[0,0,1,1]
9654 ; SSE-NEXT: punpckldq {{.*#+}} xmm9 = xmm9[0],xmm0[0],xmm9[1],xmm0[1]
9655 ; SSE-NEXT: movsd {{.*#+}} xmm9 = xmm1[0],xmm9[1]
9656 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm5[1,1,1,1]
9657 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
9658 ; SSE-NEXT: # xmm1 = mem[2,3,2,3]
9659 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
9660 ; SSE-NEXT: pshufd $238, (%rsp), %xmm0 # 16-byte Folded Reload
9661 ; SSE-NEXT: # xmm0 = mem[2,3,2,3]
9662 ; SSE-NEXT: pshufd $80, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Folded Reload
9663 ; SSE-NEXT: # xmm8 = mem[0,0,1,1]
9664 ; SSE-NEXT: punpckldq {{.*#+}} xmm8 = xmm8[0],xmm0[0],xmm8[1],xmm0[1]
9665 ; SSE-NEXT: movsd {{.*#+}} xmm8 = xmm1[0],xmm8[1]
9666 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
9667 ; SSE-NEXT: # xmm0 = mem[1,1,1,1]
9668 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
9669 ; SSE-NEXT: # xmm1 = mem[2,3,2,3]
9670 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
9671 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
9672 ; SSE-NEXT: # xmm0 = mem[2,3,2,3]
9673 ; SSE-NEXT: pshufd $80, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload
9674 ; SSE-NEXT: # xmm7 = mem[0,0,1,1]
9675 ; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm0[0],xmm7[1],xmm0[1]
9676 ; SSE-NEXT: movsd {{.*#+}} xmm7 = xmm1[0],xmm7[1]
9677 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
9678 ; SSE-NEXT: # xmm0 = mem[1,1,1,1]
9679 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
9680 ; SSE-NEXT: # xmm1 = mem[2,3,2,3]
9681 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
9682 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
9683 ; SSE-NEXT: # xmm0 = mem[2,3,2,3]
9684 ; SSE-NEXT: pshufd $80, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
9685 ; SSE-NEXT: # xmm6 = mem[0,0,1,1]
9686 ; SSE-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm0[0],xmm6[1],xmm0[1]
9687 ; SSE-NEXT: movsd {{.*#+}} xmm6 = xmm1[0],xmm6[1]
9688 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
9689 ; SSE-NEXT: # xmm0 = mem[1,1,1,1]
9690 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
9691 ; SSE-NEXT: # xmm1 = mem[2,3,2,3]
9692 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
9693 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
9694 ; SSE-NEXT: # xmm0 = mem[2,3,2,3]
9695 ; SSE-NEXT: pshufd $80, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Folded Reload
9696 ; SSE-NEXT: # xmm5 = mem[0,0,1,1]
9697 ; SSE-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1]
9698 ; SSE-NEXT: movsd {{.*#+}} xmm5 = xmm1[0],xmm5[1]
9699 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,1,1,1]
9700 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
9701 ; SSE-NEXT: # xmm1 = mem[2,3,2,3]
9702 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
9703 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
9704 ; SSE-NEXT: # xmm0 = mem[2,3,2,3]
9705 ; SSE-NEXT: pshufd $80, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
9706 ; SSE-NEXT: # xmm4 = mem[0,0,1,1]
9707 ; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1]
9708 ; SSE-NEXT: movsd {{.*#+}} xmm4 = xmm1[0],xmm4[1]
9709 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
9710 ; SSE-NEXT: # xmm1 = mem[1,1,1,1]
9711 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
9712 ; SSE-NEXT: # xmm0 = mem[2,3,2,3]
9713 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
9714 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
9715 ; SSE-NEXT: # xmm1 = mem[2,3,2,3]
9716 ; SSE-NEXT: pshufd $80, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
9717 ; SSE-NEXT: # xmm3 = mem[0,0,1,1]
9718 ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
9719 ; SSE-NEXT: movsd {{.*#+}} xmm3 = xmm0[0],xmm3[1]
9720 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,1,1]
9721 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
9722 ; SSE-NEXT: # xmm1 = mem[2,3,2,3]
9723 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
9724 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
9725 ; SSE-NEXT: # xmm0 = mem[2,3,2,3]
9726 ; SSE-NEXT: pshufd $80, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
9727 ; SSE-NEXT: # xmm2 = mem[0,0,1,1]
9728 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
9729 ; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm1[0],xmm2[1]
9730 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9731 ; SSE-NEXT: movaps %xmm0, 224(%rsi)
9732 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9733 ; SSE-NEXT: movaps %xmm0, 160(%rsi)
9734 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9735 ; SSE-NEXT: movaps %xmm0, 96(%rsi)
9736 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9737 ; SSE-NEXT: movaps %xmm0, 32(%rsi)
9738 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9739 ; SSE-NEXT: movaps %xmm0, 240(%rsi)
9740 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9741 ; SSE-NEXT: movaps %xmm0, 176(%rsi)
9742 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9743 ; SSE-NEXT: movaps %xmm0, 112(%rsi)
9744 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9745 ; SSE-NEXT: movaps %xmm0, 48(%rsi)
9746 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9747 ; SSE-NEXT: movaps %xmm0, 192(%rsi)
9748 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9749 ; SSE-NEXT: movaps %xmm0, 128(%rsi)
9750 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9751 ; SSE-NEXT: movaps %xmm0, 64(%rsi)
9752 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9753 ; SSE-NEXT: movaps %xmm0, (%rsi)
9754 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9755 ; SSE-NEXT: movaps %xmm0, 208(%rsi)
9756 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9757 ; SSE-NEXT: movaps %xmm0, 144(%rsi)
9758 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9759 ; SSE-NEXT: movaps %xmm0, 80(%rsi)
9760 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9761 ; SSE-NEXT: movaps %xmm0, 16(%rsi)
9762 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9763 ; SSE-NEXT: movaps %xmm0, 224(%rdx)
9764 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9765 ; SSE-NEXT: movaps %xmm0, 240(%rdx)
9766 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9767 ; SSE-NEXT: movaps %xmm0, 192(%rdx)
9768 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9769 ; SSE-NEXT: movaps %xmm0, 208(%rdx)
9770 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9771 ; SSE-NEXT: movaps %xmm0, 160(%rdx)
9772 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9773 ; SSE-NEXT: movaps %xmm0, 176(%rdx)
9774 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9775 ; SSE-NEXT: movaps %xmm0, 128(%rdx)
9776 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9777 ; SSE-NEXT: movaps %xmm0, 144(%rdx)
9778 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9779 ; SSE-NEXT: movaps %xmm0, 96(%rdx)
9780 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9781 ; SSE-NEXT: movaps %xmm0, 112(%rdx)
9782 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9783 ; SSE-NEXT: movaps %xmm0, 64(%rdx)
9784 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9785 ; SSE-NEXT: movaps %xmm0, 80(%rdx)
9786 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9787 ; SSE-NEXT: movaps %xmm0, 32(%rdx)
9788 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9789 ; SSE-NEXT: movaps %xmm0, 48(%rdx)
9790 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9791 ; SSE-NEXT: movaps %xmm0, (%rdx)
9792 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9793 ; SSE-NEXT: movaps %xmm0, 16(%rdx)
9794 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9795 ; SSE-NEXT: movaps %xmm0, 240(%rcx)
9796 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9797 ; SSE-NEXT: movaps %xmm0, 224(%rcx)
9798 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9799 ; SSE-NEXT: movaps %xmm0, 208(%rcx)
9800 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9801 ; SSE-NEXT: movaps %xmm0, 192(%rcx)
9802 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9803 ; SSE-NEXT: movaps %xmm0, 176(%rcx)
9804 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9805 ; SSE-NEXT: movaps %xmm0, 160(%rcx)
9806 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9807 ; SSE-NEXT: movaps %xmm0, 144(%rcx)
9808 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9809 ; SSE-NEXT: movaps %xmm0, 128(%rcx)
9810 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9811 ; SSE-NEXT: movaps %xmm0, 112(%rcx)
9812 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9813 ; SSE-NEXT: movaps %xmm0, 96(%rcx)
9814 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9815 ; SSE-NEXT: movaps %xmm0, 80(%rcx)
9816 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9817 ; SSE-NEXT: movaps %xmm0, 64(%rcx)
9818 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9819 ; SSE-NEXT: movaps %xmm0, 48(%rcx)
9820 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9821 ; SSE-NEXT: movaps %xmm0, 32(%rcx)
9822 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9823 ; SSE-NEXT: movaps %xmm0, 16(%rcx)
9824 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9825 ; SSE-NEXT: movaps %xmm0, (%rcx)
9826 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9827 ; SSE-NEXT: movaps %xmm0, 240(%r8)
9828 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9829 ; SSE-NEXT: movaps %xmm0, 224(%r8)
9830 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9831 ; SSE-NEXT: movaps %xmm0, 208(%r8)
9832 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9833 ; SSE-NEXT: movaps %xmm0, 192(%r8)
9834 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9835 ; SSE-NEXT: movaps %xmm0, 176(%r8)
9836 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9837 ; SSE-NEXT: movaps %xmm0, 160(%r8)
9838 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9839 ; SSE-NEXT: movaps %xmm0, 144(%r8)
9840 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9841 ; SSE-NEXT: movaps %xmm0, 128(%r8)
9842 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9843 ; SSE-NEXT: movaps %xmm0, 112(%r8)
9844 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9845 ; SSE-NEXT: movaps %xmm0, 96(%r8)
9846 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9847 ; SSE-NEXT: movaps %xmm0, 80(%r8)
9848 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9849 ; SSE-NEXT: movaps %xmm0, 64(%r8)
9850 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9851 ; SSE-NEXT: movaps %xmm0, 48(%r8)
9852 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9853 ; SSE-NEXT: movaps %xmm0, 32(%r8)
9854 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9855 ; SSE-NEXT: movaps %xmm0, 16(%r8)
9856 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9857 ; SSE-NEXT: movaps %xmm0, (%r8)
9858 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9859 ; SSE-NEXT: movaps %xmm0, 240(%r9)
9860 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9861 ; SSE-NEXT: movaps %xmm0, 224(%r9)
9862 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9863 ; SSE-NEXT: movaps %xmm0, 208(%r9)
9864 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9865 ; SSE-NEXT: movaps %xmm0, 192(%r9)
9866 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9867 ; SSE-NEXT: movaps %xmm0, 176(%r9)
9868 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9869 ; SSE-NEXT: movaps %xmm0, 160(%r9)
9870 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9871 ; SSE-NEXT: movaps %xmm0, 144(%r9)
9872 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9873 ; SSE-NEXT: movaps %xmm0, 128(%r9)
9874 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9875 ; SSE-NEXT: movaps %xmm0, 112(%r9)
9876 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9877 ; SSE-NEXT: movaps %xmm0, 96(%r9)
9878 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9879 ; SSE-NEXT: movaps %xmm0, 80(%r9)
9880 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9881 ; SSE-NEXT: movaps %xmm0, 64(%r9)
9882 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9883 ; SSE-NEXT: movaps %xmm0, 48(%r9)
9884 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9885 ; SSE-NEXT: movaps %xmm0, 32(%r9)
9886 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9887 ; SSE-NEXT: movaps %xmm0, 16(%r9)
9888 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9889 ; SSE-NEXT: movaps %xmm0, (%r9)
9890 ; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax
9891 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9892 ; SSE-NEXT: movaps %xmm0, 240(%rax)
9893 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9894 ; SSE-NEXT: movaps %xmm0, 224(%rax)
9895 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9896 ; SSE-NEXT: movaps %xmm0, 208(%rax)
9897 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9898 ; SSE-NEXT: movaps %xmm0, 192(%rax)
9899 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9900 ; SSE-NEXT: movaps %xmm0, 176(%rax)
9901 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9902 ; SSE-NEXT: movaps %xmm0, 160(%rax)
9903 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9904 ; SSE-NEXT: movaps %xmm0, 144(%rax)
9905 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9906 ; SSE-NEXT: movaps %xmm0, 128(%rax)
9907 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9908 ; SSE-NEXT: movaps %xmm0, 112(%rax)
9909 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9910 ; SSE-NEXT: movaps %xmm0, 96(%rax)
9911 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9912 ; SSE-NEXT: movaps %xmm0, 80(%rax)
9913 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9914 ; SSE-NEXT: movaps %xmm0, 64(%rax)
9915 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9916 ; SSE-NEXT: movaps %xmm0, 48(%rax)
9917 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9918 ; SSE-NEXT: movaps %xmm0, 32(%rax)
9919 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9920 ; SSE-NEXT: movaps %xmm0, 16(%rax)
9921 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9922 ; SSE-NEXT: movaps %xmm0, (%rax)
9923 ; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax
9924 ; SSE-NEXT: movapd %xmm2, 240(%rax)
9925 ; SSE-NEXT: movapd %xmm3, 224(%rax)
9926 ; SSE-NEXT: movapd %xmm4, 208(%rax)
9927 ; SSE-NEXT: movapd %xmm5, 192(%rax)
9928 ; SSE-NEXT: movapd %xmm6, 176(%rax)
9929 ; SSE-NEXT: movapd %xmm7, 160(%rax)
9930 ; SSE-NEXT: movapd %xmm8, 144(%rax)
9931 ; SSE-NEXT: movapd %xmm9, 128(%rax)
9932 ; SSE-NEXT: movapd %xmm10, 112(%rax)
9933 ; SSE-NEXT: movapd %xmm11, 96(%rax)
9934 ; SSE-NEXT: movapd %xmm12, 80(%rax)
9935 ; SSE-NEXT: movapd %xmm13, 64(%rax)
9936 ; SSE-NEXT: movapd %xmm14, 48(%rax)
9937 ; SSE-NEXT: movapd %xmm15, 32(%rax)
9938 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9939 ; SSE-NEXT: movaps %xmm0, 16(%rax)
9940 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9941 ; SSE-NEXT: movaps %xmm0, (%rax)
9942 ; SSE-NEXT: addq $2456, %rsp # imm = 0x998
9945 ; AVX-LABEL: load_i32_stride7_vf64:
9947 ; AVX-NEXT: subq $3176, %rsp # imm = 0xC68
9948 ; AVX-NEXT: vmovaps 704(%rdi), %ymm2
9949 ; AVX-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9950 ; AVX-NEXT: vmovaps 672(%rdi), %ymm3
9951 ; AVX-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9952 ; AVX-NEXT: vmovaps 768(%rdi), %ymm5
9953 ; AVX-NEXT: vmovaps 256(%rdi), %ymm4
9954 ; AVX-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9955 ; AVX-NEXT: vmovaps 224(%rdi), %ymm1
9956 ; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9957 ; AVX-NEXT: vmovaps 320(%rdi), %ymm7
9958 ; AVX-NEXT: vmovaps 304(%rdi), %xmm0
9959 ; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9960 ; AVX-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm7[0],ymm0[0],ymm7[2],ymm0[2]
9961 ; AVX-NEXT: vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9962 ; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm4[6],ymm1[7]
9963 ; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
9964 ; AVX-NEXT: vmovaps 224(%rdi), %xmm13
9965 ; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm13[0,1],xmm1[2,3]
9966 ; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,3,2,3]
9967 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
9968 ; AVX-NEXT: vmovaps 384(%rdi), %xmm4
9969 ; AVX-NEXT: vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9970 ; AVX-NEXT: vmovaps 352(%rdi), %xmm1
9971 ; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9972 ; AVX-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm1[1],xmm4[1]
9973 ; AVX-NEXT: vmovaps 416(%rdi), %xmm4
9974 ; AVX-NEXT: vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9975 ; AVX-NEXT: vinsertps {{.*#+}} xmm1 = zero,xmm1[1,2],xmm4[1]
9976 ; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
9977 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
9978 ; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9979 ; AVX-NEXT: vmovaps 752(%rdi), %xmm0
9980 ; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9981 ; AVX-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm5[0],ymm0[0],ymm5[2],ymm0[2]
9982 ; AVX-NEXT: vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9983 ; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm3[0,1,2,3,4,5],ymm2[6],ymm3[7]
9984 ; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
9985 ; AVX-NEXT: vmovaps 672(%rdi), %xmm3
9986 ; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm3[0,1],xmm1[2,3]
9987 ; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,3,2,3]
9988 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
9989 ; AVX-NEXT: vmovaps 832(%rdi), %xmm2
9990 ; AVX-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9991 ; AVX-NEXT: vmovaps 800(%rdi), %xmm1
9992 ; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9993 ; AVX-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm1[1],xmm2[1]
9994 ; AVX-NEXT: vmovaps 864(%rdi), %xmm2
9995 ; AVX-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9996 ; AVX-NEXT: vinsertps {{.*#+}} xmm1 = zero,xmm1[1,2],xmm2[1]
9997 ; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
9998 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
9999 ; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10000 ; AVX-NEXT: vmovaps 1152(%rdi), %ymm1
10001 ; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10002 ; AVX-NEXT: vmovaps 1120(%rdi), %ymm0
10003 ; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10004 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6],ymm0[7]
10005 ; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0
10006 ; AVX-NEXT: vmovaps 1120(%rdi), %xmm1
10007 ; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10008 ; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
10009 ; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,3,2,3]
10010 ; AVX-NEXT: vmovaps 1216(%rdi), %ymm9
10011 ; AVX-NEXT: vmovaps 1200(%rdi), %xmm1
10012 ; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10013 ; AVX-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm9[0],ymm1[0],ymm9[2],ymm1[2]
10014 ; AVX-NEXT: vmovups %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10015 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
10016 ; AVX-NEXT: vmovaps 1280(%rdi), %xmm1
10017 ; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10018 ; AVX-NEXT: vmovaps 1248(%rdi), %xmm2
10019 ; AVX-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10020 ; AVX-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm2[1],xmm1[1]
10021 ; AVX-NEXT: vmovaps 1312(%rdi), %xmm2
10022 ; AVX-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10023 ; AVX-NEXT: vinsertps {{.*#+}} xmm1 = zero,xmm1[1,2],xmm2[1]
10024 ; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
10025 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
10026 ; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10027 ; AVX-NEXT: vmovaps 1600(%rdi), %ymm1
10028 ; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10029 ; AVX-NEXT: vmovaps 1568(%rdi), %ymm0
10030 ; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10031 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6],ymm0[7]
10032 ; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0
10033 ; AVX-NEXT: vmovaps 1568(%rdi), %xmm1
10034 ; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10035 ; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
10036 ; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,3,2,3]
10037 ; AVX-NEXT: vmovaps 1664(%rdi), %ymm2
10038 ; AVX-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10039 ; AVX-NEXT: vmovaps 1648(%rdi), %xmm1
10040 ; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10041 ; AVX-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm2[0],ymm1[0],ymm2[2],ymm1[2]
10042 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
10043 ; AVX-NEXT: vmovaps 1728(%rdi), %xmm1
10044 ; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10045 ; AVX-NEXT: vmovaps 1696(%rdi), %xmm2
10046 ; AVX-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10047 ; AVX-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm2[1],xmm1[1]
10048 ; AVX-NEXT: vmovaps 1760(%rdi), %xmm2
10049 ; AVX-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10050 ; AVX-NEXT: vinsertps {{.*#+}} xmm1 = zero,xmm1[1,2],xmm2[1]
10051 ; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
10052 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
10053 ; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10054 ; AVX-NEXT: vmovaps 32(%rdi), %ymm0
10055 ; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10056 ; AVX-NEXT: vmovaps (%rdi), %ymm1
10057 ; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10058 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6],ymm1[7]
10059 ; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0
10060 ; AVX-NEXT: vmovaps (%rdi), %xmm1
10061 ; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10062 ; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
10063 ; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,3,2,3]
10064 ; AVX-NEXT: vmovaps 96(%rdi), %ymm15
10065 ; AVX-NEXT: vmovaps 80(%rdi), %xmm1
10066 ; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10067 ; AVX-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm15[0],ymm1[0],ymm15[2],ymm1[2]
10068 ; AVX-NEXT: vmovups %ymm15, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10069 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
10070 ; AVX-NEXT: vmovaps 160(%rdi), %xmm2
10071 ; AVX-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10072 ; AVX-NEXT: vmovaps 128(%rdi), %xmm1
10073 ; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10074 ; AVX-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm1[1],xmm2[1]
10075 ; AVX-NEXT: vmovaps 192(%rdi), %xmm2
10076 ; AVX-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10077 ; AVX-NEXT: vinsertps {{.*#+}} xmm1 = zero,xmm1[1,2],xmm2[1]
10078 ; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
10079 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
10080 ; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10081 ; AVX-NEXT: vmovaps 480(%rdi), %ymm1
10082 ; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10083 ; AVX-NEXT: vmovaps 448(%rdi), %ymm0
10084 ; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10085 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6],ymm0[7]
10086 ; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0
10087 ; AVX-NEXT: vmovaps 448(%rdi), %xmm1
10088 ; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10089 ; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
10090 ; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,3,2,3]
10091 ; AVX-NEXT: vmovaps 544(%rdi), %ymm2
10092 ; AVX-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10093 ; AVX-NEXT: vmovaps 528(%rdi), %xmm1
10094 ; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10095 ; AVX-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm2[0],ymm1[0],ymm2[2],ymm1[2]
10096 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
10097 ; AVX-NEXT: vmovaps 608(%rdi), %xmm1
10098 ; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10099 ; AVX-NEXT: vmovaps 576(%rdi), %xmm2
10100 ; AVX-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10101 ; AVX-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm2[1],xmm1[1]
10102 ; AVX-NEXT: vmovaps 640(%rdi), %xmm2
10103 ; AVX-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10104 ; AVX-NEXT: vinsertps {{.*#+}} xmm1 = zero,xmm1[1,2],xmm2[1]
10105 ; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
10106 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
10107 ; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10108 ; AVX-NEXT: vmovaps 928(%rdi), %ymm1
10109 ; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10110 ; AVX-NEXT: vmovaps 896(%rdi), %ymm0
10111 ; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10112 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6],ymm0[7]
10113 ; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0
10114 ; AVX-NEXT: vmovaps 896(%rdi), %xmm12
10115 ; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm12[0,1],xmm0[2,3]
10116 ; AVX-NEXT: vmovaps %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10117 ; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,3,2,3]
10118 ; AVX-NEXT: vmovaps 992(%rdi), %ymm4
10119 ; AVX-NEXT: vmovaps 976(%rdi), %xmm1
10120 ; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10121 ; AVX-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm4[0],ymm1[0],ymm4[2],ymm1[2]
10122 ; AVX-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10123 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
10124 ; AVX-NEXT: vmovaps 1056(%rdi), %xmm1
10125 ; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10126 ; AVX-NEXT: vmovaps 1024(%rdi), %xmm2
10127 ; AVX-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10128 ; AVX-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm2[1],xmm1[1]
10129 ; AVX-NEXT: vmovaps 1088(%rdi), %xmm10
10130 ; AVX-NEXT: vinsertps {{.*#+}} xmm1 = zero,xmm1[1,2],xmm10[1]
10131 ; AVX-NEXT: vmovaps %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10132 ; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
10133 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
10134 ; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10135 ; AVX-NEXT: vmovaps 1376(%rdi), %ymm1
10136 ; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10137 ; AVX-NEXT: vmovaps 1344(%rdi), %ymm0
10138 ; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10139 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6],ymm0[7]
10140 ; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0
10141 ; AVX-NEXT: vmovaps 1344(%rdi), %xmm1
10142 ; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10143 ; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
10144 ; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,3,2,3]
10145 ; AVX-NEXT: vmovaps 1440(%rdi), %ymm8
10146 ; AVX-NEXT: vmovaps 1424(%rdi), %xmm1
10147 ; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10148 ; AVX-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm8[0],ymm1[0],ymm8[2],ymm1[2]
10149 ; AVX-NEXT: vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10150 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
10151 ; AVX-NEXT: vmovaps 1504(%rdi), %xmm2
10152 ; AVX-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10153 ; AVX-NEXT: vmovaps 1472(%rdi), %xmm1
10154 ; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10155 ; AVX-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm1[1],xmm2[1]
10156 ; AVX-NEXT: vmovaps 1536(%rdi), %xmm14
10157 ; AVX-NEXT: vinsertps {{.*#+}} xmm1 = zero,xmm1[1,2],xmm14[1]
10158 ; AVX-NEXT: vmovaps %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10159 ; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
10160 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
10161 ; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10162 ; AVX-NEXT: vmovaps 288(%rdi), %ymm0
10163 ; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10164 ; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm7[1,1],ymm0[2,2],ymm7[5,5],ymm0[6,6]
10165 ; AVX-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
10166 ; AVX-NEXT: vmovaps 256(%rdi), %xmm1
10167 ; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10168 ; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0],xmm13[1],xmm1[2,3]
10169 ; AVX-NEXT: vmovaps %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10170 ; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,0],mem[3,3]
10171 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
10172 ; AVX-NEXT: vmovaps 384(%rdi), %ymm1
10173 ; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10174 ; AVX-NEXT: vmovaps 352(%rdi), %ymm2
10175 ; AVX-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10176 ; AVX-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm2[2,3],ymm1[0,1]
10177 ; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm2[0,0],ymm1[3,3],ymm2[4,4],ymm1[7,7]
10178 ; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
10179 ; AVX-NEXT: vinsertps $49, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
10180 ; AVX-NEXT: # xmm1 = zero,xmm1[1,2],mem[0]
10181 ; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
10182 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
10183 ; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10184 ; AVX-NEXT: vmovaps 736(%rdi), %ymm0
10185 ; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10186 ; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm5[1,1],ymm0[2,2],ymm5[5,5],ymm0[6,6]
10187 ; AVX-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
10188 ; AVX-NEXT: vmovaps 704(%rdi), %xmm1
10189 ; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10190 ; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0],xmm3[1],xmm1[2,3]
10191 ; AVX-NEXT: vmovaps %xmm3, %xmm5
10192 ; AVX-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10193 ; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,0],mem[3,3]
10194 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
10195 ; AVX-NEXT: vmovaps 832(%rdi), %ymm1
10196 ; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10197 ; AVX-NEXT: vmovaps 800(%rdi), %ymm3
10198 ; AVX-NEXT: vmovups %ymm3, (%rsp) # 32-byte Spill
10199 ; AVX-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm3[2,3],ymm1[0,1]
10200 ; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm3[0,0],ymm1[3,3],ymm3[4,4],ymm1[7,7]
10201 ; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
10202 ; AVX-NEXT: vinsertps $49, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
10203 ; AVX-NEXT: # xmm1 = zero,xmm1[1,2],mem[0]
10204 ; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
10205 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
10206 ; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10207 ; AVX-NEXT: vmovaps 1184(%rdi), %ymm0
10208 ; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10209 ; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm9[1,1],ymm0[2,2],ymm9[5,5],ymm0[6,6]
10210 ; AVX-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
10211 ; AVX-NEXT: vmovaps 1152(%rdi), %xmm11
10212 ; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
10213 ; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm11[0],xmm9[1],xmm11[2,3]
10214 ; AVX-NEXT: vmovaps %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10215 ; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,0],mem[3,3]
10216 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
10217 ; AVX-NEXT: vmovaps 1280(%rdi), %ymm1
10218 ; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10219 ; AVX-NEXT: vmovaps 1248(%rdi), %ymm2
10220 ; AVX-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10221 ; AVX-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm2[2,3],ymm1[0,1]
10222 ; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm2[0,0],ymm1[3,3],ymm2[4,4],ymm1[7,7]
10223 ; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
10224 ; AVX-NEXT: vinsertps $49, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
10225 ; AVX-NEXT: # xmm1 = zero,xmm1[1,2],mem[0]
10226 ; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
10227 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
10228 ; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10229 ; AVX-NEXT: vmovaps 1632(%rdi), %ymm0
10230 ; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10231 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
10232 ; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm1[1,1],ymm0[2,2],ymm1[5,5],ymm0[6,6]
10233 ; AVX-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
10234 ; AVX-NEXT: vmovaps 1600(%rdi), %xmm1
10235 ; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10236 ; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
10237 ; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0],xmm7[1],xmm1[2,3]
10238 ; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,0],mem[3,3]
10239 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
10240 ; AVX-NEXT: vmovaps 1728(%rdi), %ymm1
10241 ; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10242 ; AVX-NEXT: vmovaps 1696(%rdi), %ymm2
10243 ; AVX-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10244 ; AVX-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm2[2,3],ymm1[0,1]
10245 ; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm2[0,0],ymm1[3,3],ymm2[4,4],ymm1[7,7]
10246 ; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
10247 ; AVX-NEXT: vinsertps $49, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
10248 ; AVX-NEXT: # xmm1 = zero,xmm1[1,2],mem[0]
10249 ; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
10250 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
10251 ; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10252 ; AVX-NEXT: vmovaps 64(%rdi), %ymm0
10253 ; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10254 ; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm15[1,1],ymm0[2,2],ymm15[5,5],ymm0[6,6]
10255 ; AVX-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
10256 ; AVX-NEXT: vmovaps 32(%rdi), %xmm1
10257 ; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10258 ; AVX-NEXT: vblendps $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
10259 ; AVX-NEXT: # xmm1 = xmm1[0],mem[1],xmm1[2,3]
10260 ; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,0],mem[3,3]
10261 ; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2],ymm0[3,4,5,6,7]
10262 ; AVX-NEXT: vmovaps 160(%rdi), %ymm2
10263 ; AVX-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10264 ; AVX-NEXT: vmovaps 128(%rdi), %ymm0
10265 ; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10266 ; AVX-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm0[2,3],ymm2[0,1]
10267 ; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm0[0,0],ymm2[3,3],ymm0[4,4],ymm2[7,7]
10268 ; AVX-NEXT: vextractf128 $1, %ymm2, %xmm2
10269 ; AVX-NEXT: vinsertps $49, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm2 # 16-byte Folded Reload
10270 ; AVX-NEXT: # xmm2 = zero,xmm2[1,2],mem[0]
10271 ; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2
10272 ; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5,6,7]
10273 ; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10274 ; AVX-NEXT: vmovaps 512(%rdi), %ymm0
10275 ; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10276 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
10277 ; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm6[1,1],ymm0[2,2],ymm6[5,5],ymm0[6,6]
10278 ; AVX-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[2,3,2,3]
10279 ; AVX-NEXT: vmovaps 480(%rdi), %xmm0
10280 ; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10281 ; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
10282 ; AVX-NEXT: vblendps {{.*#+}} xmm2 = xmm0[0],xmm15[1],xmm0[2,3]
10283 ; AVX-NEXT: vshufps {{.*#+}} xmm2 = xmm2[1,0],mem[3,3]
10284 ; AVX-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2],ymm1[3,4,5,6,7]
10285 ; AVX-NEXT: vmovaps 608(%rdi), %ymm0
10286 ; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10287 ; AVX-NEXT: vmovaps 576(%rdi), %ymm1
10288 ; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10289 ; AVX-NEXT: vperm2f128 {{.*#+}} ymm3 = ymm1[2,3],ymm0[0,1]
10290 ; AVX-NEXT: vshufps {{.*#+}} ymm3 = ymm1[0,0],ymm3[3,3],ymm1[4,4],ymm3[7,7]
10291 ; AVX-NEXT: vextractf128 $1, %ymm3, %xmm3
10292 ; AVX-NEXT: vinsertps $49, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm3 # 16-byte Folded Reload
10293 ; AVX-NEXT: # xmm3 = zero,xmm3[1,2],mem[0]
10294 ; AVX-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm3
10295 ; AVX-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm3[5,6,7]
10296 ; AVX-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10297 ; AVX-NEXT: vmovaps 960(%rdi), %ymm0
10298 ; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10299 ; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm4[1,1],ymm0[2,2],ymm4[5,5],ymm0[6,6]
10300 ; AVX-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm2[2,3,2,3]
10301 ; AVX-NEXT: vmovaps 928(%rdi), %xmm0
10302 ; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10303 ; AVX-NEXT: vblendps {{.*#+}} xmm3 = xmm0[0],xmm12[1],xmm0[2,3]
10304 ; AVX-NEXT: vshufps {{.*#+}} xmm3 = xmm3[1,0],mem[3,3]
10305 ; AVX-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1,2],ymm2[3,4,5,6,7]
10306 ; AVX-NEXT: vmovaps 1056(%rdi), %ymm0
10307 ; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10308 ; AVX-NEXT: vmovaps 1024(%rdi), %ymm1
10309 ; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10310 ; AVX-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm1[2,3],ymm0[0,1]
10311 ; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm1[0,0],ymm4[3,3],ymm1[4,4],ymm4[7,7]
10312 ; AVX-NEXT: vextractf128 $1, %ymm4, %xmm4
10313 ; AVX-NEXT: vinsertps {{.*#+}} xmm4 = zero,xmm4[1,2],xmm10[2]
10314 ; AVX-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm4
10315 ; AVX-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm4[5,6,7]
10316 ; AVX-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10317 ; AVX-NEXT: vmovaps 1408(%rdi), %ymm0
10318 ; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10319 ; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm8[1,1],ymm0[2,2],ymm8[5,5],ymm0[6,6]
10320 ; AVX-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm4[2,3,2,3]
10321 ; AVX-NEXT: vmovaps 1376(%rdi), %xmm0
10322 ; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10323 ; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
10324 ; AVX-NEXT: vblendps {{.*#+}} xmm12 = xmm0[0],xmm3[1],xmm0[2,3]
10325 ; AVX-NEXT: vshufps {{.*#+}} xmm12 = xmm12[1,0],mem[3,3]
10326 ; AVX-NEXT: vblendps {{.*#+}} ymm12 = ymm12[0,1,2],ymm4[3,4,5,6,7]
10327 ; AVX-NEXT: vmovaps 1504(%rdi), %ymm0
10328 ; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10329 ; AVX-NEXT: vmovaps 1472(%rdi), %ymm1
10330 ; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10331 ; AVX-NEXT: vperm2f128 {{.*#+}} ymm10 = ymm1[2,3],ymm0[0,1]
10332 ; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm1[0,0],ymm10[3,3],ymm1[4,4],ymm10[7,7]
10333 ; AVX-NEXT: vextractf128 $1, %ymm10, %xmm10
10334 ; AVX-NEXT: vinsertps {{.*#+}} xmm10 = zero,xmm10[1,2],xmm14[2]
10335 ; AVX-NEXT: vinsertf128 $1, %xmm10, %ymm0, %ymm10
10336 ; AVX-NEXT: vblendps {{.*#+}} ymm10 = ymm12[0,1,2,3,4],ymm10[5,6,7]
10337 ; AVX-NEXT: vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10338 ; AVX-NEXT: vshufps {{.*#+}} xmm10 = xmm13[2,3,2,3]
10339 ; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
10340 ; AVX-NEXT: vblendps {{.*#+}} xmm10 = xmm10[0],xmm14[1],xmm10[2,3]
10341 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
10342 ; AVX-NEXT: vshufps $199, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm12 # 32-byte Folded Reload
10343 ; AVX-NEXT: # ymm12 = ymm12[3,1],mem[0,3],ymm12[7,5],mem[4,7]
10344 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10345 ; AVX-NEXT: vshufps {{.*#+}} ymm12 = ymm0[2,1],ymm12[2,0],ymm0[6,5],ymm12[6,4]
10346 ; AVX-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0,1],ymm12[2,3,4,5,6,7]
10347 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
10348 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10349 ; AVX-NEXT: vunpcklpd {{.*#+}} ymm12 = ymm0[0],ymm8[0],ymm0[2],ymm8[2]
10350 ; AVX-NEXT: vextractf128 $1, %ymm12, %xmm12
10351 ; AVX-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm12, %xmm12 # 16-byte Folded Reload
10352 ; AVX-NEXT: # xmm12 = xmm12[0,1,2],mem[3]
10353 ; AVX-NEXT: vinsertf128 $1, %xmm12, %ymm0, %ymm12
10354 ; AVX-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0,1,2,3,4],ymm12[5,6,7]
10355 ; AVX-NEXT: vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10356 ; AVX-NEXT: vshufps {{.*#+}} xmm10 = xmm5[2,3,2,3]
10357 ; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
10358 ; AVX-NEXT: vblendps {{.*#+}} xmm10 = xmm10[0],xmm5[1],xmm10[2,3]
10359 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
10360 ; AVX-NEXT: vshufps $199, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm12 # 32-byte Folded Reload
10361 ; AVX-NEXT: # ymm12 = ymm12[3,1],mem[0,3],ymm12[7,5],mem[4,7]
10362 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10363 ; AVX-NEXT: vshufps {{.*#+}} ymm12 = ymm0[2,1],ymm12[2,0],ymm0[6,5],ymm12[6,4]
10364 ; AVX-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0,1],ymm12[2,3,4,5,6,7]
10365 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
10366 ; AVX-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
10367 ; AVX-NEXT: vunpcklpd {{.*#+}} ymm12 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
10368 ; AVX-NEXT: vextractf128 $1, %ymm12, %xmm12
10369 ; AVX-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm12, %xmm12 # 16-byte Folded Reload
10370 ; AVX-NEXT: # xmm12 = xmm12[0,1,2],mem[3]
10371 ; AVX-NEXT: vinsertf128 $1, %xmm12, %ymm0, %ymm12
10372 ; AVX-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0,1,2,3,4],ymm12[5,6,7]
10373 ; AVX-NEXT: vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10374 ; AVX-NEXT: vshufps {{.*#+}} xmm10 = xmm9[2,3,2,3]
10375 ; AVX-NEXT: vblendps {{.*#+}} xmm10 = xmm10[0],xmm11[1],xmm10[2,3]
10376 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
10377 ; AVX-NEXT: vshufps $199, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm12 # 32-byte Folded Reload
10378 ; AVX-NEXT: # ymm12 = ymm12[3,1],mem[0,3],ymm12[7,5],mem[4,7]
10379 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10380 ; AVX-NEXT: vshufps {{.*#+}} ymm12 = ymm0[2,1],ymm12[2,0],ymm0[6,5],ymm12[6,4]
10381 ; AVX-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0,1],ymm12[2,3,4,5,6,7]
10382 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10383 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
10384 ; AVX-NEXT: vunpcklpd {{.*#+}} ymm12 = ymm13[0],ymm0[0],ymm13[2],ymm0[2]
10385 ; AVX-NEXT: vextractf128 $1, %ymm12, %xmm12
10386 ; AVX-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm12, %xmm12 # 16-byte Folded Reload
10387 ; AVX-NEXT: # xmm12 = xmm12[0,1,2],mem[3]
10388 ; AVX-NEXT: vinsertf128 $1, %xmm12, %ymm0, %ymm12
10389 ; AVX-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0,1,2,3,4],ymm12[5,6,7]
10390 ; AVX-NEXT: vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10391 ; AVX-NEXT: vshufps {{.*#+}} xmm10 = xmm7[2,3,2,3]
10392 ; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
10393 ; AVX-NEXT: vblendps {{.*#+}} xmm10 = xmm10[0],xmm9[1],xmm10[2,3]
10394 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
10395 ; AVX-NEXT: vshufps $199, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm12 # 32-byte Folded Reload
10396 ; AVX-NEXT: # ymm12 = ymm12[3,1],mem[0,3],ymm12[7,5],mem[4,7]
10397 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
10398 ; AVX-NEXT: vshufps {{.*#+}} ymm12 = ymm2[2,1],ymm12[2,0],ymm2[6,5],ymm12[6,4]
10399 ; AVX-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0,1],ymm12[2,3,4,5,6,7]
10400 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
10401 ; AVX-NEXT: vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm12 # 32-byte Folded Reload
10402 ; AVX-NEXT: # ymm12 = ymm11[0],mem[0],ymm11[2],mem[2]
10403 ; AVX-NEXT: vextractf128 $1, %ymm12, %xmm12
10404 ; AVX-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm12, %xmm12 # 16-byte Folded Reload
10405 ; AVX-NEXT: # xmm12 = xmm12[0,1,2],mem[3]
10406 ; AVX-NEXT: vinsertf128 $1, %xmm12, %ymm0, %ymm12
10407 ; AVX-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0,1,2,3,4],ymm12[5,6,7]
10408 ; AVX-NEXT: vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10409 ; AVX-NEXT: vpermilps $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Folded Reload
10410 ; AVX-NEXT: # xmm10 = mem[2,3,2,3]
10411 ; AVX-NEXT: vblendps $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm10, %xmm10 # 16-byte Folded Reload
10412 ; AVX-NEXT: # xmm10 = xmm10[0],mem[1],xmm10[2,3]
10413 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
10414 ; AVX-NEXT: vshufps $199, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm12 # 32-byte Folded Reload
10415 ; AVX-NEXT: # ymm12 = ymm12[3,1],mem[0,3],ymm12[7,5],mem[4,7]
10416 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
10417 ; AVX-NEXT: vshufps {{.*#+}} ymm12 = ymm2[2,1],ymm12[2,0],ymm2[6,5],ymm12[6,4]
10418 ; AVX-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0,1],ymm12[2,3,4,5,6,7]
10419 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
10420 ; AVX-NEXT: vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm12 # 32-byte Folded Reload
10421 ; AVX-NEXT: # ymm12 = ymm2[0],mem[0],ymm2[2],mem[2]
10422 ; AVX-NEXT: vextractf128 $1, %ymm12, %xmm12
10423 ; AVX-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm12, %xmm12 # 16-byte Folded Reload
10424 ; AVX-NEXT: # xmm12 = xmm12[0,1,2],mem[3]
10425 ; AVX-NEXT: vinsertf128 $1, %xmm12, %ymm0, %ymm12
10426 ; AVX-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0,1,2,3,4],ymm12[5,6,7]
10427 ; AVX-NEXT: vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10428 ; AVX-NEXT: vshufps {{.*#+}} xmm10 = xmm15[2,3,2,3]
10429 ; AVX-NEXT: vblendps $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm10, %xmm10 # 16-byte Folded Reload
10430 ; AVX-NEXT: # xmm10 = xmm10[0],mem[1],xmm10[2,3]
10431 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
10432 ; AVX-NEXT: vshufps $199, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm12 # 32-byte Folded Reload
10433 ; AVX-NEXT: # ymm12 = ymm12[3,1],mem[0,3],ymm12[7,5],mem[4,7]
10434 ; AVX-NEXT: vshufps {{.*#+}} ymm12 = ymm6[2,1],ymm12[2,0],ymm6[6,5],ymm12[6,4]
10435 ; AVX-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0,1],ymm12[2,3,4,5,6,7]
10436 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
10437 ; AVX-NEXT: vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm12 # 32-byte Folded Reload
10438 ; AVX-NEXT: # ymm12 = ymm2[0],mem[0],ymm2[2],mem[2]
10439 ; AVX-NEXT: vextractf128 $1, %ymm12, %xmm12
10440 ; AVX-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm12, %xmm12 # 16-byte Folded Reload
10441 ; AVX-NEXT: # xmm12 = xmm12[0,1,2],mem[3]
10442 ; AVX-NEXT: vinsertf128 $1, %xmm12, %ymm0, %ymm12
10443 ; AVX-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0,1,2,3,4],ymm12[5,6,7]
10444 ; AVX-NEXT: vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10445 ; AVX-NEXT: vpermilps $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Folded Reload
10446 ; AVX-NEXT: # xmm10 = mem[2,3,2,3]
10447 ; AVX-NEXT: vblendps $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm10, %xmm10 # 16-byte Folded Reload
10448 ; AVX-NEXT: # xmm10 = xmm10[0],mem[1],xmm10[2,3]
10449 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
10450 ; AVX-NEXT: vshufps $199, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm12 # 32-byte Folded Reload
10451 ; AVX-NEXT: # ymm12 = ymm12[3,1],mem[0,3],ymm12[7,5],mem[4,7]
10452 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
10453 ; AVX-NEXT: vshufps {{.*#+}} ymm12 = ymm2[2,1],ymm12[2,0],ymm2[6,5],ymm12[6,4]
10454 ; AVX-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0,1],ymm12[2,3,4,5,6,7]
10455 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
10456 ; AVX-NEXT: vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm12 # 32-byte Folded Reload
10457 ; AVX-NEXT: # ymm12 = ymm2[0],mem[0],ymm2[2],mem[2]
10458 ; AVX-NEXT: vextractf128 $1, %ymm12, %xmm12
10459 ; AVX-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm12, %xmm12 # 16-byte Folded Reload
10460 ; AVX-NEXT: # xmm12 = xmm12[0,1,2],mem[3]
10461 ; AVX-NEXT: vinsertf128 $1, %xmm12, %ymm0, %ymm12
10462 ; AVX-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0,1,2,3,4],ymm12[5,6,7]
10463 ; AVX-NEXT: vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10464 ; AVX-NEXT: vshufps {{.*#+}} xmm10 = xmm3[2,3,2,3]
10465 ; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
10466 ; AVX-NEXT: vblendps {{.*#+}} xmm10 = xmm10[0],xmm4[1],xmm10[2,3]
10467 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
10468 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
10469 ; AVX-NEXT: vshufps {{.*#+}} ymm12 = ymm12[3,1],ymm6[0,3],ymm12[7,5],ymm6[4,7]
10470 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
10471 ; AVX-NEXT: vshufps {{.*#+}} ymm12 = ymm2[2,1],ymm12[2,0],ymm2[6,5],ymm12[6,4]
10472 ; AVX-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0,1],ymm12[2,3,4,5,6,7]
10473 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
10474 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
10475 ; AVX-NEXT: vunpcklpd {{.*#+}} ymm12 = ymm7[0],ymm3[0],ymm7[2],ymm3[2]
10476 ; AVX-NEXT: vextractf128 $1, %ymm12, %xmm12
10477 ; AVX-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm12, %xmm12 # 16-byte Folded Reload
10478 ; AVX-NEXT: # xmm12 = xmm12[0,1,2],mem[3]
10479 ; AVX-NEXT: vinsertf128 $1, %xmm12, %ymm0, %ymm12
10480 ; AVX-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0,1,2,3,4],ymm12[5,6,7]
10481 ; AVX-NEXT: vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10482 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
10483 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
10484 ; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm10[1,0],ymm12[0,0],ymm10[5,4],ymm12[4,4]
10485 ; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm12[3,1],ymm10[0,2],ymm12[7,5],ymm10[4,6]
10486 ; AVX-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm14, %xmm12 # 16-byte Folded Reload
10487 ; AVX-NEXT: # xmm12 = xmm14[0,1,2],mem[3]
10488 ; AVX-NEXT: vshufps {{.*#+}} xmm12 = xmm12[3,2,2,3]
10489 ; AVX-NEXT: vblendps {{.*#+}} ymm10 = ymm12[0,1],ymm10[2,3,4,5,6,7]
10490 ; AVX-NEXT: vmovaps 416(%rdi), %ymm12
10491 ; AVX-NEXT: vmovups %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10492 ; AVX-NEXT: vshufps {{.*#+}} ymm15 = ymm12[0,1],ymm8[1,3],ymm12[4,5],ymm8[5,7]
10493 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
10494 ; AVX-NEXT: vshufps {{.*#+}} ymm15 = ymm8[0,2],ymm15[2,0],ymm8[4,6],ymm15[6,4]
10495 ; AVX-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0,1,2,3,4],ymm15[5,6,7]
10496 ; AVX-NEXT: vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10497 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
10498 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
10499 ; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm8[1,0],ymm12[0,0],ymm8[5,4],ymm12[4,4]
10500 ; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm12[3,1],ymm10[0,2],ymm12[7,5],ymm10[4,6]
10501 ; AVX-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5, %xmm15 # 16-byte Folded Reload
10502 ; AVX-NEXT: # xmm15 = xmm5[0,1,2],mem[3]
10503 ; AVX-NEXT: vshufps {{.*#+}} xmm15 = xmm15[3,2,2,3]
10504 ; AVX-NEXT: vblendps {{.*#+}} ymm10 = ymm15[0,1],ymm10[2,3,4,5,6,7]
10505 ; AVX-NEXT: vmovaps 864(%rdi), %ymm5
10506 ; AVX-NEXT: vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10507 ; AVX-NEXT: vshufps {{.*#+}} ymm15 = ymm5[0,1],ymm1[1,3],ymm5[4,5],ymm1[5,7]
10508 ; AVX-NEXT: vmovups (%rsp), %ymm1 # 32-byte Reload
10509 ; AVX-NEXT: vshufps {{.*#+}} ymm15 = ymm1[0,2],ymm15[2,0],ymm1[4,6],ymm15[6,4]
10510 ; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm10[0,1,2,3,4],ymm15[5,6,7]
10511 ; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10512 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
10513 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
10514 ; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm5[1,0],ymm1[0,0],ymm5[5,4],ymm1[4,4]
10515 ; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm1[3,1],ymm10[0,2],ymm1[7,5],ymm10[4,6]
10516 ; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
10517 ; AVX-NEXT: vblendps $7, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm15 # 16-byte Folded Reload
10518 ; AVX-NEXT: # xmm15 = mem[0,1,2],xmm1[3]
10519 ; AVX-NEXT: vshufps {{.*#+}} xmm15 = xmm15[3,2,2,3]
10520 ; AVX-NEXT: vblendps {{.*#+}} ymm10 = ymm15[0,1],ymm10[2,3,4,5,6,7]
10521 ; AVX-NEXT: vmovaps 1312(%rdi), %ymm1
10522 ; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10523 ; AVX-NEXT: vshufps {{.*#+}} ymm15 = ymm1[0,1],ymm0[1,3],ymm1[4,5],ymm0[5,7]
10524 ; AVX-NEXT: vshufps {{.*#+}} ymm15 = ymm13[0,2],ymm15[2,0],ymm13[4,6],ymm15[6,4]
10525 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm10[0,1,2,3,4],ymm15[5,6,7]
10526 ; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10527 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10528 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
10529 ; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm1[1,0],ymm0[0,0],ymm1[5,4],ymm0[4,4]
10530 ; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm0[3,1],ymm10[0,2],ymm0[7,5],ymm10[4,6]
10531 ; AVX-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9, %xmm15 # 16-byte Folded Reload
10532 ; AVX-NEXT: # xmm15 = xmm9[0,1,2],mem[3]
10533 ; AVX-NEXT: vshufps {{.*#+}} xmm15 = xmm15[3,2,2,3]
10534 ; AVX-NEXT: vblendps {{.*#+}} ymm10 = ymm15[0,1],ymm10[2,3,4,5,6,7]
10535 ; AVX-NEXT: vmovaps 1760(%rdi), %ymm0
10536 ; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10537 ; AVX-NEXT: vshufps $212, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm15 # 32-byte Folded Reload
10538 ; AVX-NEXT: # ymm15 = ymm0[0,1],mem[1,3],ymm0[4,5],mem[5,7]
10539 ; AVX-NEXT: vshufps {{.*#+}} ymm15 = ymm11[0,2],ymm15[2,0],ymm11[4,6],ymm15[6,4]
10540 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm10[0,1,2,3,4],ymm15[5,6,7]
10541 ; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10542 ; AVX-NEXT: vmovaps %ymm2, %ymm0
10543 ; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm6[1,0],ymm2[0,0],ymm6[5,4],ymm2[4,4]
10544 ; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm0[3,1],ymm2[0,2],ymm0[7,5],ymm2[4,6]
10545 ; AVX-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm9 # 16-byte Folded Reload
10546 ; AVX-NEXT: # xmm9 = xmm4[0,1,2],mem[3]
10547 ; AVX-NEXT: vshufps {{.*#+}} xmm9 = xmm9[3,2,2,3]
10548 ; AVX-NEXT: vblendps {{.*#+}} ymm2 = ymm9[0,1],ymm2[2,3,4,5,6,7]
10549 ; AVX-NEXT: vmovaps 1536(%rdi), %ymm0
10550 ; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10551 ; AVX-NEXT: vshufps {{.*#+}} ymm9 = ymm0[0,1],ymm3[1,3],ymm0[4,5],ymm3[5,7]
10552 ; AVX-NEXT: vshufps {{.*#+}} ymm9 = ymm7[0,2],ymm9[2,0],ymm7[4,6],ymm9[6,4]
10553 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3,4],ymm9[5,6,7]
10554 ; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10555 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10556 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
10557 ; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm1[1,0],ymm0[0,0],ymm1[5,4],ymm0[4,4]
10558 ; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm0[3,1],ymm2[0,2],ymm0[7,5],ymm2[4,6]
10559 ; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
10560 ; AVX-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm15, %xmm7 # 16-byte Folded Reload
10561 ; AVX-NEXT: # xmm7 = xmm15[0,1,2],mem[3]
10562 ; AVX-NEXT: vshufps {{.*#+}} xmm7 = xmm7[3,2,2,3]
10563 ; AVX-NEXT: vblendps {{.*#+}} ymm2 = ymm7[0,1],ymm2[2,3,4,5,6,7]
10564 ; AVX-NEXT: vmovaps 1088(%rdi), %ymm12
10565 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
10566 ; AVX-NEXT: vshufps {{.*#+}} ymm7 = ymm12[0,1],ymm8[1,3],ymm12[4,5],ymm8[5,7]
10567 ; AVX-NEXT: vmovups %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10568 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
10569 ; AVX-NEXT: vshufps {{.*#+}} ymm7 = ymm11[0,2],ymm7[2,0],ymm11[4,6],ymm7[6,4]
10570 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3,4],ymm7[5,6,7]
10571 ; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10572 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10573 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
10574 ; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm1[1,0],ymm0[0,0],ymm1[5,4],ymm0[4,4]
10575 ; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm0[3,1],ymm2[0,2],ymm0[7,5],ymm2[4,6]
10576 ; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
10577 ; AVX-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm14, %xmm7 # 16-byte Folded Reload
10578 ; AVX-NEXT: # xmm7 = xmm14[0,1,2],mem[3]
10579 ; AVX-NEXT: vshufps {{.*#+}} xmm7 = xmm7[3,2,2,3]
10580 ; AVX-NEXT: vblendps {{.*#+}} ymm2 = ymm7[0,1],ymm2[2,3,4,5,6,7]
10581 ; AVX-NEXT: vmovaps 640(%rdi), %ymm4
10582 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
10583 ; AVX-NEXT: vshufps {{.*#+}} ymm7 = ymm4[0,1],ymm10[1,3],ymm4[4,5],ymm10[5,7]
10584 ; AVX-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10585 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
10586 ; AVX-NEXT: vshufps {{.*#+}} ymm7 = ymm3[0,2],ymm7[2,0],ymm3[4,6],ymm7[6,4]
10587 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3,4],ymm7[5,6,7]
10588 ; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10589 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10590 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
10591 ; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm1[1,0],ymm0[0,0],ymm1[5,4],ymm0[4,4]
10592 ; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm0[3,1],ymm2[0,2],ymm0[7,5],ymm2[4,6]
10593 ; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
10594 ; AVX-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5, %xmm7 # 16-byte Folded Reload
10595 ; AVX-NEXT: # xmm7 = xmm5[0,1,2],mem[3]
10596 ; AVX-NEXT: vshufps {{.*#+}} xmm7 = xmm7[3,2,2,3]
10597 ; AVX-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1],ymm2[2,3,4,5,6,7]
10598 ; AVX-NEXT: vmovaps 192(%rdi), %ymm13
10599 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
10600 ; AVX-NEXT: vshufps {{.*#+}} ymm9 = ymm13[0,1],ymm1[1,3],ymm13[4,5],ymm1[5,7]
10601 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
10602 ; AVX-NEXT: vshufps {{.*#+}} ymm9 = ymm2[0,2],ymm9[2,0],ymm2[4,6],ymm9[6,4]
10603 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm7[0,1,2,3,4],ymm9[5,6,7]
10604 ; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10605 ; AVX-NEXT: vperm2f128 {{.*#+}} ymm7 = ymm2[2,3,0,1]
10606 ; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm2[3,0],ymm7[0,0],ymm2[7,4],ymm7[4,4]
10607 ; AVX-NEXT: vshufps {{.*#+}} ymm7 = ymm13[1,0],ymm1[2,0],ymm13[5,4],ymm1[6,4]
10608 ; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0],ymm7[2,0],ymm0[6,4],ymm7[6,4]
10609 ; AVX-NEXT: vmovaps 64(%rdi), %xmm9
10610 ; AVX-NEXT: vmovaps 96(%rdi), %xmm1
10611 ; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10612 ; AVX-NEXT: vshufps {{.*#+}} xmm7 = xmm1[0,1,0,1]
10613 ; AVX-NEXT: vblendps {{.*#+}} xmm7 = xmm9[0,1,2],xmm7[3]
10614 ; AVX-NEXT: vshufps {{.*#+}} xmm6 = xmm5[2,3,2,3]
10615 ; AVX-NEXT: vblendps {{.*#+}} xmm6 = mem[0],xmm6[1],mem[2,3]
10616 ; AVX-NEXT: vblendps {{.*#+}} xmm6 = xmm6[0,1],xmm7[2,3]
10617 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm6[0,1,2,3],ymm0[4,5,6,7]
10618 ; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10619 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
10620 ; AVX-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm1[2,3,0,1]
10621 ; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm1[3,0],ymm0[0,0],ymm1[7,4],ymm0[4,4]
10622 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
10623 ; AVX-NEXT: vshufps $33, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm5 # 32-byte Folded Reload
10624 ; AVX-NEXT: # ymm5 = ymm7[1,0],mem[2,0],ymm7[5,4],mem[6,4]
10625 ; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0],ymm5[2,0],ymm0[6,4],ymm5[6,4]
10626 ; AVX-NEXT: vmovaps 320(%rdi), %xmm1
10627 ; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10628 ; AVX-NEXT: vshufps {{.*#+}} xmm5 = xmm1[0,1,0,1]
10629 ; AVX-NEXT: vmovaps 288(%rdi), %xmm1
10630 ; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10631 ; AVX-NEXT: vblendps {{.*#+}} xmm5 = xmm1[0,1,2],xmm5[3]
10632 ; AVX-NEXT: vpermilps $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
10633 ; AVX-NEXT: # xmm6 = mem[2,3,2,3]
10634 ; AVX-NEXT: vblendps {{.*#+}} xmm6 = mem[0],xmm6[1],mem[2,3]
10635 ; AVX-NEXT: vblendps {{.*#+}} xmm5 = xmm6[0,1],xmm5[2,3]
10636 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm5[0,1,2,3],ymm0[4,5,6,7]
10637 ; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10638 ; AVX-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm3[2,3,0,1]
10639 ; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm3[3,0],ymm0[0,0],ymm3[7,4],ymm0[4,4]
10640 ; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm4[1,0],ymm10[2,0],ymm4[5,4],ymm10[6,4]
10641 ; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0],ymm1[2,0],ymm0[6,4],ymm1[6,4]
10642 ; AVX-NEXT: vmovaps 544(%rdi), %xmm1
10643 ; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10644 ; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,1,0,1]
10645 ; AVX-NEXT: vmovaps 512(%rdi), %xmm6
10646 ; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm6[0,1,2],xmm1[3]
10647 ; AVX-NEXT: vshufps {{.*#+}} xmm5 = xmm14[2,3,2,3]
10648 ; AVX-NEXT: vblendps {{.*#+}} xmm5 = mem[0],xmm5[1],mem[2,3]
10649 ; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm5[0,1],xmm1[2,3]
10650 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
10651 ; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10652 ; AVX-NEXT: vmovups (%rsp), %ymm1 # 32-byte Reload
10653 ; AVX-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm1[2,3,0,1]
10654 ; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm1[3,0],ymm0[0,0],ymm1[7,4],ymm0[4,4]
10655 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
10656 ; AVX-NEXT: vshufps $33, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm1 # 32-byte Folded Reload
10657 ; AVX-NEXT: # ymm1 = ymm5[1,0],mem[2,0],ymm5[5,4],mem[6,4]
10658 ; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0],ymm1[2,0],ymm0[6,4],ymm1[6,4]
10659 ; AVX-NEXT: vmovaps 768(%rdi), %xmm1
10660 ; AVX-NEXT: vmovaps %xmm1, (%rsp) # 16-byte Spill
10661 ; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,1,0,1]
10662 ; AVX-NEXT: vmovaps 736(%rdi), %xmm2
10663 ; AVX-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10664 ; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm2[0,1,2],xmm1[3]
10665 ; AVX-NEXT: vpermilps $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Folded Reload
10666 ; AVX-NEXT: # xmm14 = mem[2,3,2,3]
10667 ; AVX-NEXT: vblendps {{.*#+}} xmm14 = mem[0],xmm14[1],mem[2,3]
10668 ; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm14[0,1],xmm1[2,3]
10669 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
10670 ; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10671 ; AVX-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm11[2,3,0,1]
10672 ; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm11[3,0],ymm0[0,0],ymm11[7,4],ymm0[4,4]
10673 ; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm12[1,0],ymm8[2,0],ymm12[5,4],ymm8[6,4]
10674 ; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0],ymm1[2,0],ymm0[6,4],ymm1[6,4]
10675 ; AVX-NEXT: vmovaps 992(%rdi), %xmm1
10676 ; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10677 ; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,1,0,1]
10678 ; AVX-NEXT: vmovaps 960(%rdi), %xmm3
10679 ; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm3[0,1,2],xmm1[3]
10680 ; AVX-NEXT: vshufps {{.*#+}} xmm14 = xmm15[2,3,2,3]
10681 ; AVX-NEXT: vblendps {{.*#+}} xmm14 = mem[0],xmm14[1],mem[2,3]
10682 ; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm14[0,1],xmm1[2,3]
10683 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
10684 ; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10685 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
10686 ; AVX-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm1[2,3,0,1]
10687 ; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm1[3,0],ymm0[0,0],ymm1[7,4],ymm0[4,4]
10688 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
10689 ; AVX-NEXT: vshufps $33, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm1 # 32-byte Folded Reload
10690 ; AVX-NEXT: # ymm1 = ymm8[1,0],mem[2,0],ymm8[5,4],mem[6,4]
10691 ; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0],ymm1[2,0],ymm0[6,4],ymm1[6,4]
10692 ; AVX-NEXT: vmovaps 1216(%rdi), %xmm1
10693 ; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10694 ; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,1,0,1]
10695 ; AVX-NEXT: vmovaps 1184(%rdi), %xmm2
10696 ; AVX-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10697 ; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm2[0,1,2],xmm1[3]
10698 ; AVX-NEXT: vpermilps $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Folded Reload
10699 ; AVX-NEXT: # xmm14 = mem[2,3,2,3]
10700 ; AVX-NEXT: vblendps {{.*#+}} xmm14 = mem[0],xmm14[1],mem[2,3]
10701 ; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm14[0,1],xmm1[2,3]
10702 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
10703 ; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10704 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
10705 ; AVX-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm1[2,3,0,1]
10706 ; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm1[3,0],ymm0[0,0],ymm1[7,4],ymm0[4,4]
10707 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
10708 ; AVX-NEXT: vshufps $33, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
10709 ; AVX-NEXT: # ymm1 = ymm1[1,0],mem[2,0],ymm1[5,4],mem[6,4]
10710 ; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0],ymm1[2,0],ymm0[6,4],ymm1[6,4]
10711 ; AVX-NEXT: vmovaps 1440(%rdi), %xmm1
10712 ; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10713 ; AVX-NEXT: vshufps {{.*#+}} xmm14 = xmm1[0,1,0,1]
10714 ; AVX-NEXT: vmovaps 1408(%rdi), %xmm1
10715 ; AVX-NEXT: vblendps {{.*#+}} xmm14 = xmm1[0,1,2],xmm14[3]
10716 ; AVX-NEXT: vpermilps $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
10717 ; AVX-NEXT: # xmm4 = mem[2,3,2,3]
10718 ; AVX-NEXT: vblendps {{.*#+}} xmm4 = mem[0],xmm4[1],mem[2,3]
10719 ; AVX-NEXT: vblendps {{.*#+}} xmm4 = xmm4[0,1],xmm14[2,3]
10720 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm4[0,1,2,3],ymm0[4,5,6,7]
10721 ; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10722 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
10723 ; AVX-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm2[2,3,0,1]
10724 ; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm2[3,0],ymm0[0,0],ymm2[7,4],ymm0[4,4]
10725 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
10726 ; AVX-NEXT: vshufps $33, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm4 # 32-byte Folded Reload
10727 ; AVX-NEXT: # ymm4 = ymm2[1,0],mem[2,0],ymm2[5,4],mem[6,4]
10728 ; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm0[2,0],ymm4[2,0],ymm0[6,4],ymm4[6,4]
10729 ; AVX-NEXT: vmovaps 1664(%rdi), %xmm0
10730 ; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10731 ; AVX-NEXT: vshufps {{.*#+}} xmm14 = xmm0[0,1,0,1]
10732 ; AVX-NEXT: vmovaps 1632(%rdi), %xmm0
10733 ; AVX-NEXT: vblendps {{.*#+}} xmm14 = xmm0[0,1,2],xmm14[3]
10734 ; AVX-NEXT: vpermilps $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Folded Reload
10735 ; AVX-NEXT: # xmm11 = mem[2,3,2,3]
10736 ; AVX-NEXT: vblendps {{.*#+}} xmm11 = mem[0],xmm11[1],mem[2,3]
10737 ; AVX-NEXT: vblendps {{.*#+}} xmm11 = xmm11[0,1],xmm14[2,3]
10738 ; AVX-NEXT: vblendps {{.*#+}} ymm4 = ymm11[0,1,2,3],ymm4[4,5,6,7]
10739 ; AVX-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10740 ; AVX-NEXT: vshufps $246, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm4 # 32-byte Folded Reload
10741 ; AVX-NEXT: # ymm4 = ymm13[2,1],mem[3,3],ymm13[6,5],mem[7,7]
10742 ; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
10743 ; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
10744 ; AVX-NEXT: vblendps {{.*#+}} xmm11 = xmm12[0],xmm10[1],xmm12[2,3]
10745 ; AVX-NEXT: vinsertf128 $1, %xmm11, %ymm0, %ymm11
10746 ; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm11[1,0],ymm4[2,0],ymm11[5,4],ymm4[6,4]
10747 ; AVX-NEXT: vblendps $7, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9, %xmm11 # 16-byte Folded Reload
10748 ; AVX-NEXT: # xmm11 = mem[0,1,2],xmm9[3]
10749 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
10750 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
10751 ; AVX-NEXT: vshufps {{.*#+}} ymm14 = ymm9[0,0],ymm15[1,0],ymm9[4,4],ymm15[5,4]
10752 ; AVX-NEXT: vshufps {{.*#+}} ymm14 = ymm14[2,0,2,3,6,4,6,7]
10753 ; AVX-NEXT: vextractf128 $1, %ymm14, %xmm14
10754 ; AVX-NEXT: vshufps {{.*#+}} xmm11 = xmm14[0,1],xmm11[3,2]
10755 ; AVX-NEXT: vblendps {{.*#+}} ymm4 = ymm11[0,1,2,3],ymm4[4,5,6,7]
10756 ; AVX-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10757 ; AVX-NEXT: vshufps $246, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm4 # 32-byte Folded Reload
10758 ; AVX-NEXT: # ymm4 = ymm7[2,1],mem[3,3],ymm7[6,5],mem[7,7]
10759 ; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
10760 ; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
10761 ; AVX-NEXT: vblendps {{.*#+}} xmm11 = xmm14[0],xmm9[1],xmm14[2,3]
10762 ; AVX-NEXT: vinsertf128 $1, %xmm11, %ymm0, %ymm11
10763 ; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm11[1,0],ymm4[2,0],ymm11[5,4],ymm4[6,4]
10764 ; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
10765 ; AVX-NEXT: vblendps $7, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7, %xmm7 # 16-byte Folded Reload
10766 ; AVX-NEXT: # xmm7 = mem[0,1,2],xmm7[3]
10767 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
10768 ; AVX-NEXT: vshufps $16, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm11 # 32-byte Folded Reload
10769 ; AVX-NEXT: # ymm11 = ymm11[0,0],mem[1,0],ymm11[4,4],mem[5,4]
10770 ; AVX-NEXT: vshufps {{.*#+}} ymm11 = ymm11[2,0,2,3,6,4,6,7]
10771 ; AVX-NEXT: vextractf128 $1, %ymm11, %xmm11
10772 ; AVX-NEXT: vshufps {{.*#+}} xmm7 = xmm11[0,1],xmm7[3,2]
10773 ; AVX-NEXT: vblendps {{.*#+}} ymm4 = ymm7[0,1,2,3],ymm4[4,5,6,7]
10774 ; AVX-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10775 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
10776 ; AVX-NEXT: vshufps $246, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm4 # 32-byte Folded Reload
10777 ; AVX-NEXT: # ymm4 = ymm4[2,1],mem[3,3],ymm4[6,5],mem[7,7]
10778 ; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
10779 ; AVX-NEXT: vblendps $13, {{[-0-9]+}}(%r{{[sb]}}p), %xmm11, %xmm7 # 16-byte Folded Reload
10780 ; AVX-NEXT: # xmm7 = mem[0],xmm11[1],mem[2,3]
10781 ; AVX-NEXT: vinsertf128 $1, %xmm7, %ymm0, %ymm7
10782 ; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm7[1,0],ymm4[2,0],ymm7[5,4],ymm4[6,4]
10783 ; AVX-NEXT: vblendps $7, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6, %xmm6 # 16-byte Folded Reload
10784 ; AVX-NEXT: # xmm6 = mem[0,1,2],xmm6[3]
10785 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
10786 ; AVX-NEXT: vshufps $16, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm7 # 32-byte Folded Reload
10787 ; AVX-NEXT: # ymm7 = ymm7[0,0],mem[1,0],ymm7[4,4],mem[5,4]
10788 ; AVX-NEXT: vshufps {{.*#+}} ymm7 = ymm7[2,0,2,3,6,4,6,7]
10789 ; AVX-NEXT: vextractf128 $1, %ymm7, %xmm7
10790 ; AVX-NEXT: vshufps {{.*#+}} xmm6 = xmm7[0,1],xmm6[3,2]
10791 ; AVX-NEXT: vblendps {{.*#+}} ymm4 = ymm6[0,1,2,3],ymm4[4,5,6,7]
10792 ; AVX-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10793 ; AVX-NEXT: vshufps $246, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm4 # 32-byte Folded Reload
10794 ; AVX-NEXT: # ymm4 = ymm5[2,1],mem[3,3],ymm5[6,5],mem[7,7]
10795 ; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
10796 ; AVX-NEXT: vblendps $13, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7, %xmm6 # 16-byte Folded Reload
10797 ; AVX-NEXT: # xmm6 = mem[0],xmm7[1],mem[2,3]
10798 ; AVX-NEXT: vinsertf128 $1, %xmm6, %ymm0, %ymm6
10799 ; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm6[1,0],ymm4[2,0],ymm6[5,4],ymm4[6,4]
10800 ; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
10801 ; AVX-NEXT: vblendps $7, (%rsp), %xmm5, %xmm5 # 16-byte Folded Reload
10802 ; AVX-NEXT: # xmm5 = mem[0,1,2],xmm5[3]
10803 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
10804 ; AVX-NEXT: vshufps $16, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm6 # 32-byte Folded Reload
10805 ; AVX-NEXT: # ymm6 = ymm6[0,0],mem[1,0],ymm6[4,4],mem[5,4]
10806 ; AVX-NEXT: vshufps {{.*#+}} ymm6 = ymm6[2,0,2,3,6,4,6,7]
10807 ; AVX-NEXT: vextractf128 $1, %ymm6, %xmm6
10808 ; AVX-NEXT: vshufps {{.*#+}} xmm5 = xmm6[0,1],xmm5[3,2]
10809 ; AVX-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0,1,2,3],ymm4[4,5,6,7]
10810 ; AVX-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10811 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
10812 ; AVX-NEXT: vshufps $246, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm4 # 32-byte Folded Reload
10813 ; AVX-NEXT: # ymm4 = ymm4[2,1],mem[3,3],ymm4[6,5],mem[7,7]
10814 ; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
10815 ; AVX-NEXT: vblendps $13, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5, %xmm5 # 16-byte Folded Reload
10816 ; AVX-NEXT: # xmm5 = mem[0],xmm5[1],mem[2,3]
10817 ; AVX-NEXT: vinsertf128 $1, %xmm5, %ymm0, %ymm5
10818 ; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm5[1,0],ymm4[2,0],ymm5[5,4],ymm4[6,4]
10819 ; AVX-NEXT: vblendps $7, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm3 # 16-byte Folded Reload
10820 ; AVX-NEXT: # xmm3 = mem[0,1,2],xmm3[3]
10821 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
10822 ; AVX-NEXT: vshufps $16, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm5 # 32-byte Folded Reload
10823 ; AVX-NEXT: # ymm5 = ymm5[0,0],mem[1,0],ymm5[4,4],mem[5,4]
10824 ; AVX-NEXT: vshufps {{.*#+}} ymm5 = ymm5[2,0,2,3,6,4,6,7]
10825 ; AVX-NEXT: vextractf128 $1, %ymm5, %xmm5
10826 ; AVX-NEXT: vshufps {{.*#+}} xmm3 = xmm5[0,1],xmm3[3,2]
10827 ; AVX-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm4[4,5,6,7]
10828 ; AVX-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10829 ; AVX-NEXT: vshufps $246, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm3 # 32-byte Folded Reload
10830 ; AVX-NEXT: # ymm3 = ymm8[2,1],mem[3,3],ymm8[6,5],mem[7,7]
10831 ; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
10832 ; AVX-NEXT: vblendps $13, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6, %xmm4 # 16-byte Folded Reload
10833 ; AVX-NEXT: # xmm4 = mem[0],xmm6[1],mem[2,3]
10834 ; AVX-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm4
10835 ; AVX-NEXT: vshufps {{.*#+}} ymm3 = ymm4[1,0],ymm3[2,0],ymm4[5,4],ymm3[6,4]
10836 ; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
10837 ; AVX-NEXT: vblendps $7, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm4 # 16-byte Folded Reload
10838 ; AVX-NEXT: # xmm4 = mem[0,1,2],xmm4[3]
10839 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
10840 ; AVX-NEXT: vshufps $16, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm5 # 32-byte Folded Reload
10841 ; AVX-NEXT: # ymm5 = ymm5[0,0],mem[1,0],ymm5[4,4],mem[5,4]
10842 ; AVX-NEXT: vshufps {{.*#+}} ymm5 = ymm5[2,0,2,3,6,4,6,7]
10843 ; AVX-NEXT: vextractf128 $1, %ymm5, %xmm5
10844 ; AVX-NEXT: vshufps {{.*#+}} xmm4 = xmm5[0,1],xmm4[3,2]
10845 ; AVX-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
10846 ; AVX-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10847 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
10848 ; AVX-NEXT: vshufps $246, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm3 # 32-byte Folded Reload
10849 ; AVX-NEXT: # ymm3 = ymm8[2,1],mem[3,3],ymm8[6,5],mem[7,7]
10850 ; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
10851 ; AVX-NEXT: vblendps $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5, %xmm4 # 16-byte Folded Reload
10852 ; AVX-NEXT: # xmm4 = xmm5[0],mem[1],xmm5[2,3]
10853 ; AVX-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm4
10854 ; AVX-NEXT: vshufps {{.*#+}} ymm3 = ymm4[1,0],ymm3[2,0],ymm4[5,4],ymm3[6,4]
10855 ; AVX-NEXT: vblendps $7, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
10856 ; AVX-NEXT: # xmm1 = mem[0,1,2],xmm1[3]
10857 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
10858 ; AVX-NEXT: vshufps $16, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm4 # 32-byte Folded Reload
10859 ; AVX-NEXT: # ymm4 = ymm4[0,0],mem[1,0],ymm4[4,4],mem[5,4]
10860 ; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm4[2,0,2,3,6,4,6,7]
10861 ; AVX-NEXT: vextractf128 $1, %ymm4, %xmm4
10862 ; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm4[0,1],xmm1[3,2]
10863 ; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm3[4,5,6,7]
10864 ; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10865 ; AVX-NEXT: vshufps $246, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm1 # 32-byte Folded Reload
10866 ; AVX-NEXT: # ymm1 = ymm2[2,1],mem[3,3],ymm2[6,5],mem[7,7]
10867 ; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
10868 ; AVX-NEXT: vblendps $13, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm3 # 16-byte Folded Reload
10869 ; AVX-NEXT: # xmm3 = mem[0],xmm3[1],mem[2,3]
10870 ; AVX-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm3
10871 ; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm3[1,0],ymm1[2,0],ymm3[5,4],ymm1[6,4]
10872 ; AVX-NEXT: vblendps $7, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
10873 ; AVX-NEXT: # xmm0 = mem[0,1,2],xmm0[3]
10874 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
10875 ; AVX-NEXT: vshufps $16, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
10876 ; AVX-NEXT: # ymm3 = ymm3[0,0],mem[1,0],ymm3[4,4],mem[5,4]
10877 ; AVX-NEXT: vshufps {{.*#+}} ymm3 = ymm3[2,0,2,3,6,4,6,7]
10878 ; AVX-NEXT: vextractf128 $1, %ymm3, %xmm3
10879 ; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm3[0,1],xmm0[3,2]
10880 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
10881 ; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10882 ; AVX-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm13[2,3,0,1]
10883 ; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm13[3,0],ymm0[0,0],ymm13[7,4],ymm0[4,4]
10884 ; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm10[2,3,2,3]
10885 ; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0],xmm12[1],xmm1[2,3]
10886 ; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
10887 ; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,0],ymm1[4,5],ymm0[6,4]
10888 ; AVX-NEXT: vpermilps $68, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
10889 ; AVX-NEXT: # xmm1 = mem[0,1,0,1]
10890 ; AVX-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
10891 ; AVX-NEXT: # xmm1 = xmm1[0,1,2],mem[3]
10892 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
10893 ; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm2[1,0],ymm15[2,0],ymm2[5,4],ymm15[6,4]
10894 ; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm2[2,0,2,3,6,4,6,7]
10895 ; AVX-NEXT: vextractf128 $1, %ymm2, %xmm2
10896 ; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3]
10897 ; AVX-NEXT: vblendps {{.*#+}} ymm13 = ymm1[0,1,2,3],ymm0[4,5,6,7]
10898 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10899 ; AVX-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm0[2,3,0,1]
10900 ; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm0[3,0],ymm1[0,0],ymm0[7,4],ymm1[4,4]
10901 ; AVX-NEXT: vshufps {{.*#+}} xmm2 = xmm9[2,3,2,3]
10902 ; AVX-NEXT: vblendps {{.*#+}} xmm2 = xmm2[0],xmm14[1],xmm2[2,3]
10903 ; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2
10904 ; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm2[0,1],ymm1[2,0],ymm2[4,5],ymm1[6,4]
10905 ; AVX-NEXT: vpermilps $68, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
10906 ; AVX-NEXT: # xmm2 = mem[0,1,0,1]
10907 ; AVX-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm2 # 16-byte Folded Reload
10908 ; AVX-NEXT: # xmm2 = xmm2[0,1,2],mem[3]
10909 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10910 ; AVX-NEXT: vshufps $33, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm3 # 32-byte Folded Reload
10911 ; AVX-NEXT: # ymm3 = ymm0[1,0],mem[2,0],ymm0[5,4],mem[6,4]
10912 ; AVX-NEXT: vshufps {{.*#+}} ymm3 = ymm3[2,0,2,3,6,4,6,7]
10913 ; AVX-NEXT: vextractf128 $1, %ymm3, %xmm3
10914 ; AVX-NEXT: vblendps {{.*#+}} xmm2 = xmm3[0,1],xmm2[2,3]
10915 ; AVX-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm1[4,5,6,7]
10916 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10917 ; AVX-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm0[2,3,0,1]
10918 ; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm0[3,0],ymm1[0,0],ymm0[7,4],ymm1[4,4]
10919 ; AVX-NEXT: vshufps {{.*#+}} xmm3 = xmm11[2,3,2,3]
10920 ; AVX-NEXT: vblendps $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm3 # 16-byte Folded Reload
10921 ; AVX-NEXT: # xmm3 = xmm3[0],mem[1],xmm3[2,3]
10922 ; AVX-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm3
10923 ; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm3[0,1],ymm1[2,0],ymm3[4,5],ymm1[6,4]
10924 ; AVX-NEXT: vpermilps $68, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
10925 ; AVX-NEXT: # xmm3 = mem[0,1,0,1]
10926 ; AVX-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm3 # 16-byte Folded Reload
10927 ; AVX-NEXT: # xmm3 = xmm3[0,1,2],mem[3]
10928 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10929 ; AVX-NEXT: vshufps $33, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm4 # 32-byte Folded Reload
10930 ; AVX-NEXT: # ymm4 = ymm0[1,0],mem[2,0],ymm0[5,4],mem[6,4]
10931 ; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm4[2,0,2,3,6,4,6,7]
10932 ; AVX-NEXT: vextractf128 $1, %ymm4, %xmm4
10933 ; AVX-NEXT: vblendps {{.*#+}} xmm3 = xmm4[0,1],xmm3[2,3]
10934 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm1[4,5,6,7]
10935 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
10936 ; AVX-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm3[2,3,0,1]
10937 ; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm3[3,0],ymm1[0,0],ymm3[7,4],ymm1[4,4]
10938 ; AVX-NEXT: vshufps {{.*#+}} xmm3 = xmm7[2,3,2,3]
10939 ; AVX-NEXT: vblendps $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm3 # 16-byte Folded Reload
10940 ; AVX-NEXT: # xmm3 = xmm3[0],mem[1],xmm3[2,3]
10941 ; AVX-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm3
10942 ; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm3[0,1],ymm1[2,0],ymm3[4,5],ymm1[6,4]
10943 ; AVX-NEXT: vpermilps $68, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
10944 ; AVX-NEXT: # xmm3 = mem[0,1,0,1]
10945 ; AVX-NEXT: vblendps $8, (%rsp), %xmm3, %xmm3 # 16-byte Folded Reload
10946 ; AVX-NEXT: # xmm3 = xmm3[0,1,2],mem[3]
10947 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
10948 ; AVX-NEXT: vshufps $33, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm4 # 32-byte Folded Reload
10949 ; AVX-NEXT: # ymm4 = ymm4[1,0],mem[2,0],ymm4[5,4],mem[6,4]
10950 ; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm4[2,0,2,3,6,4,6,7]
10951 ; AVX-NEXT: vextractf128 $1, %ymm4, %xmm4
10952 ; AVX-NEXT: vblendps {{.*#+}} xmm3 = xmm4[0,1],xmm3[2,3]
10953 ; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm1[4,5,6,7]
10954 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
10955 ; AVX-NEXT: vperm2f128 {{.*#+}} ymm3 = ymm4[2,3,0,1]
10956 ; AVX-NEXT: vshufps {{.*#+}} ymm3 = ymm4[3,0],ymm3[0,0],ymm4[7,4],ymm3[4,4]
10957 ; AVX-NEXT: vpermilps $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
10958 ; AVX-NEXT: # xmm4 = mem[2,3,2,3]
10959 ; AVX-NEXT: vblendps $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm4 # 16-byte Folded Reload
10960 ; AVX-NEXT: # xmm4 = xmm4[0],mem[1],xmm4[2,3]
10961 ; AVX-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm4
10962 ; AVX-NEXT: vshufps {{.*#+}} ymm3 = ymm4[0,1],ymm3[2,0],ymm4[4,5],ymm3[6,4]
10963 ; AVX-NEXT: vpermilps $68, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
10964 ; AVX-NEXT: # xmm4 = mem[0,1,0,1]
10965 ; AVX-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm4 # 16-byte Folded Reload
10966 ; AVX-NEXT: # xmm4 = xmm4[0,1,2],mem[3]
10967 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
10968 ; AVX-NEXT: vshufps $33, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm9 # 32-byte Folded Reload
10969 ; AVX-NEXT: # ymm9 = ymm7[1,0],mem[2,0],ymm7[5,4],mem[6,4]
10970 ; AVX-NEXT: vshufps {{.*#+}} ymm9 = ymm9[2,0,2,3,6,4,6,7]
10971 ; AVX-NEXT: vextractf128 $1, %ymm9, %xmm9
10972 ; AVX-NEXT: vblendps {{.*#+}} xmm4 = xmm9[0,1],xmm4[2,3]
10973 ; AVX-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
10974 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
10975 ; AVX-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm7[2,3,0,1]
10976 ; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm7[3,0],ymm4[0,0],ymm7[7,4],ymm4[4,4]
10977 ; AVX-NEXT: vshufps {{.*#+}} xmm9 = xmm6[2,3,2,3]
10978 ; AVX-NEXT: vblendps $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9, %xmm9 # 16-byte Folded Reload
10979 ; AVX-NEXT: # xmm9 = xmm9[0],mem[1],xmm9[2,3]
10980 ; AVX-NEXT: vinsertf128 $1, %xmm9, %ymm0, %ymm9
10981 ; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm9[0,1],ymm4[2,0],ymm9[4,5],ymm4[6,4]
10982 ; AVX-NEXT: vpermilps $68, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Folded Reload
10983 ; AVX-NEXT: # xmm9 = mem[0,1,0,1]
10984 ; AVX-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9, %xmm9 # 16-byte Folded Reload
10985 ; AVX-NEXT: # xmm9 = xmm9[0,1,2],mem[3]
10986 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
10987 ; AVX-NEXT: vshufps $33, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm10 # 32-byte Folded Reload
10988 ; AVX-NEXT: # ymm10 = ymm6[1,0],mem[2,0],ymm6[5,4],mem[6,4]
10989 ; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm10[2,0,2,3,6,4,6,7]
10990 ; AVX-NEXT: vextractf128 $1, %ymm10, %xmm10
10991 ; AVX-NEXT: vblendps {{.*#+}} xmm9 = xmm10[0,1],xmm9[2,3]
10992 ; AVX-NEXT: vblendps {{.*#+}} ymm9 = ymm9[0,1,2,3],ymm4[4,5,6,7]
10993 ; AVX-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm8[2,3,0,1]
10994 ; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm8[3,0],ymm4[0,0],ymm8[7,4],ymm4[4,4]
10995 ; AVX-NEXT: vpermilps $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Folded Reload
10996 ; AVX-NEXT: # xmm10 = mem[2,3,2,3]
10997 ; AVX-NEXT: vblendps {{.*#+}} xmm10 = xmm10[0],xmm5[1],xmm10[2,3]
10998 ; AVX-NEXT: vinsertf128 $1, %xmm10, %ymm0, %ymm10
10999 ; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm10[0,1],ymm4[2,0],ymm10[4,5],ymm4[6,4]
11000 ; AVX-NEXT: vpermilps $68, {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Folded Reload
11001 ; AVX-NEXT: # xmm10 = mem[0,1,0,1]
11002 ; AVX-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm10, %xmm10 # 16-byte Folded Reload
11003 ; AVX-NEXT: # xmm10 = xmm10[0,1,2],mem[3]
11004 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
11005 ; AVX-NEXT: vshufps $33, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm11 # 32-byte Folded Reload
11006 ; AVX-NEXT: # ymm11 = ymm6[1,0],mem[2,0],ymm6[5,4],mem[6,4]
11007 ; AVX-NEXT: vshufps {{.*#+}} ymm11 = ymm11[2,0,2,3,6,4,6,7]
11008 ; AVX-NEXT: vextractf128 $1, %ymm11, %xmm11
11009 ; AVX-NEXT: vblendps {{.*#+}} xmm10 = xmm11[0,1],xmm10[2,3]
11010 ; AVX-NEXT: vblendps {{.*#+}} ymm4 = ymm10[0,1,2,3],ymm4[4,5,6,7]
11011 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
11012 ; AVX-NEXT: vperm2f128 {{.*#+}} ymm10 = ymm5[2,3,0,1]
11013 ; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm5[3,0],ymm10[0,0],ymm5[7,4],ymm10[4,4]
11014 ; AVX-NEXT: vpermilps $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Folded Reload
11015 ; AVX-NEXT: # xmm11 = mem[2,3,2,3]
11016 ; AVX-NEXT: vblendps $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm11, %xmm11 # 16-byte Folded Reload
11017 ; AVX-NEXT: # xmm11 = xmm11[0],mem[1],xmm11[2,3]
11018 ; AVX-NEXT: vinsertf128 $1, %xmm11, %ymm0, %ymm11
11019 ; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm11[0,1],ymm10[2,0],ymm11[4,5],ymm10[6,4]
11020 ; AVX-NEXT: vpermilps $68, {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Folded Reload
11021 ; AVX-NEXT: # xmm11 = mem[0,1,0,1]
11022 ; AVX-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm11, %xmm11 # 16-byte Folded Reload
11023 ; AVX-NEXT: # xmm11 = xmm11[0,1,2],mem[3]
11024 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
11025 ; AVX-NEXT: vshufps $33, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm12 # 32-byte Folded Reload
11026 ; AVX-NEXT: # ymm12 = ymm5[1,0],mem[2,0],ymm5[5,4],mem[6,4]
11027 ; AVX-NEXT: vshufps {{.*#+}} ymm12 = ymm12[2,0,2,3,6,4,6,7]
11028 ; AVX-NEXT: vextractf128 $1, %ymm12, %xmm12
11029 ; AVX-NEXT: vblendps {{.*#+}} xmm11 = xmm12[0,1],xmm11[2,3]
11030 ; AVX-NEXT: vblendps {{.*#+}} ymm10 = ymm11[0,1,2,3],ymm10[4,5,6,7]
11031 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
11032 ; AVX-NEXT: vmovaps %ymm5, 192(%rsi)
11033 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
11034 ; AVX-NEXT: vmovaps %ymm5, 128(%rsi)
11035 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
11036 ; AVX-NEXT: vmovaps %ymm5, 64(%rsi)
11037 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
11038 ; AVX-NEXT: vmovaps %ymm5, (%rsi)
11039 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
11040 ; AVX-NEXT: vmovaps %ymm11, 224(%rsi)
11041 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
11042 ; AVX-NEXT: vmovaps %ymm11, 160(%rsi)
11043 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
11044 ; AVX-NEXT: vmovaps %ymm11, 96(%rsi)
11045 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
11046 ; AVX-NEXT: vmovaps %ymm11, 32(%rsi)
11047 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
11048 ; AVX-NEXT: vmovaps %ymm5, 192(%rdx)
11049 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
11050 ; AVX-NEXT: vmovaps %ymm5, 128(%rdx)
11051 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
11052 ; AVX-NEXT: vmovaps %ymm5, 64(%rdx)
11053 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
11054 ; AVX-NEXT: vmovaps %ymm5, (%rdx)
11055 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
11056 ; AVX-NEXT: vmovaps %ymm5, 224(%rdx)
11057 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
11058 ; AVX-NEXT: vmovaps %ymm5, 160(%rdx)
11059 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
11060 ; AVX-NEXT: vmovaps %ymm5, 96(%rdx)
11061 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
11062 ; AVX-NEXT: vmovaps %ymm5, 32(%rdx)
11063 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
11064 ; AVX-NEXT: vmovaps %ymm5, 192(%rcx)
11065 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
11066 ; AVX-NEXT: vmovaps %ymm5, 128(%rcx)
11067 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
11068 ; AVX-NEXT: vmovaps %ymm5, 64(%rcx)
11069 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
11070 ; AVX-NEXT: vmovaps %ymm5, (%rcx)
11071 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
11072 ; AVX-NEXT: vmovaps %ymm5, 224(%rcx)
11073 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
11074 ; AVX-NEXT: vmovaps %ymm5, 160(%rcx)
11075 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
11076 ; AVX-NEXT: vmovaps %ymm5, 96(%rcx)
11077 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
11078 ; AVX-NEXT: vmovaps %ymm5, 32(%rcx)
11079 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
11080 ; AVX-NEXT: vmovaps %ymm5, (%r8)
11081 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
11082 ; AVX-NEXT: vmovaps %ymm5, 64(%r8)
11083 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
11084 ; AVX-NEXT: vmovaps %ymm5, 128(%r8)
11085 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
11086 ; AVX-NEXT: vmovaps %ymm5, 192(%r8)
11087 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
11088 ; AVX-NEXT: vmovaps %ymm5, 224(%r8)
11089 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
11090 ; AVX-NEXT: vmovaps %ymm5, 160(%r8)
11091 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
11092 ; AVX-NEXT: vmovaps %ymm5, 96(%r8)
11093 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
11094 ; AVX-NEXT: vmovaps %ymm5, 32(%r8)
11095 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
11096 ; AVX-NEXT: vmovaps %ymm5, 224(%r9)
11097 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
11098 ; AVX-NEXT: vmovaps %ymm5, 192(%r9)
11099 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
11100 ; AVX-NEXT: vmovaps %ymm5, 160(%r9)
11101 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
11102 ; AVX-NEXT: vmovaps %ymm5, 128(%r9)
11103 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
11104 ; AVX-NEXT: vmovaps %ymm5, 96(%r9)
11105 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
11106 ; AVX-NEXT: vmovaps %ymm5, 64(%r9)
11107 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
11108 ; AVX-NEXT: vmovaps %ymm5, 32(%r9)
11109 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
11110 ; AVX-NEXT: vmovaps %ymm5, (%r9)
11111 ; AVX-NEXT: movq {{[0-9]+}}(%rsp), %rax
11112 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
11113 ; AVX-NEXT: vmovaps %ymm5, 224(%rax)
11114 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
11115 ; AVX-NEXT: vmovaps %ymm5, 192(%rax)
11116 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
11117 ; AVX-NEXT: vmovaps %ymm5, 160(%rax)
11118 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
11119 ; AVX-NEXT: vmovaps %ymm5, 128(%rax)
11120 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
11121 ; AVX-NEXT: vmovaps %ymm5, 96(%rax)
11122 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
11123 ; AVX-NEXT: vmovaps %ymm5, 64(%rax)
11124 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
11125 ; AVX-NEXT: vmovaps %ymm5, 32(%rax)
11126 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
11127 ; AVX-NEXT: vmovaps %ymm5, (%rax)
11128 ; AVX-NEXT: movq {{[0-9]+}}(%rsp), %rax
11129 ; AVX-NEXT: vmovaps %ymm10, 224(%rax)
11130 ; AVX-NEXT: vmovaps %ymm4, 192(%rax)
11131 ; AVX-NEXT: vmovaps %ymm9, 160(%rax)
11132 ; AVX-NEXT: vmovaps %ymm3, 128(%rax)
11133 ; AVX-NEXT: vmovaps %ymm1, 96(%rax)
11134 ; AVX-NEXT: vmovaps %ymm0, 64(%rax)
11135 ; AVX-NEXT: vmovaps %ymm2, 32(%rax)
11136 ; AVX-NEXT: vmovaps %ymm13, (%rax)
11137 ; AVX-NEXT: addq $3176, %rsp # imm = 0xC68
11138 ; AVX-NEXT: vzeroupper
11141 ; AVX2-LABEL: load_i32_stride7_vf64:
11143 ; AVX2-NEXT: subq $2648, %rsp # imm = 0xA58
11144 ; AVX2-NEXT: vmovdqa 1216(%rdi), %ymm9
11145 ; AVX2-NEXT: vmovdqu %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11146 ; AVX2-NEXT: vmovdqa 1152(%rdi), %ymm4
11147 ; AVX2-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11148 ; AVX2-NEXT: vmovdqa 1120(%rdi), %ymm5
11149 ; AVX2-NEXT: vmovdqa 768(%rdi), %ymm12
11150 ; AVX2-NEXT: vmovdqu %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11151 ; AVX2-NEXT: vmovdqa 704(%rdi), %ymm6
11152 ; AVX2-NEXT: vmovdqa 672(%rdi), %ymm7
11153 ; AVX2-NEXT: vmovdqa 320(%rdi), %ymm8
11154 ; AVX2-NEXT: vmovdqu %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11155 ; AVX2-NEXT: vmovdqa 256(%rdi), %ymm10
11156 ; AVX2-NEXT: vmovdqa 224(%rdi), %ymm11
11157 ; AVX2-NEXT: vpmovsxbd {{.*#+}} xmm0 = [0,7,6,0]
11158 ; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm11[0,1,2,3,4,5],ymm10[6],ymm11[7]
11159 ; AVX2-NEXT: vmovdqu %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11160 ; AVX2-NEXT: vmovdqu %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11161 ; AVX2-NEXT: vpermd %ymm1, %ymm0, %ymm1
11162 ; AVX2-NEXT: vpbroadcastq 304(%rdi), %ymm2
11163 ; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm8[4,5,6,7]
11164 ; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4,5,6,7]
11165 ; AVX2-NEXT: vmovdqa 352(%rdi), %xmm2
11166 ; AVX2-NEXT: vmovdqa 384(%rdi), %xmm3
11167 ; AVX2-NEXT: vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
11168 ; AVX2-NEXT: vpunpckhqdq {{.*#+}} xmm2 = xmm2[1],xmm3[1]
11169 ; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
11170 ; AVX2-NEXT: vpbroadcastd 420(%rdi), %ymm3
11171 ; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,6],ymm3[7]
11172 ; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5,6,7]
11173 ; AVX2-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11174 ; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm7[0,1,2,3,4,5],ymm6[6],ymm7[7]
11175 ; AVX2-NEXT: vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11176 ; AVX2-NEXT: vmovdqa %ymm6, %ymm8
11177 ; AVX2-NEXT: vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11178 ; AVX2-NEXT: vpermd %ymm1, %ymm0, %ymm1
11179 ; AVX2-NEXT: vpbroadcastq 752(%rdi), %ymm2
11180 ; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm12[4,5,6,7]
11181 ; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4,5,6,7]
11182 ; AVX2-NEXT: vmovdqa 800(%rdi), %xmm2
11183 ; AVX2-NEXT: vmovdqa 832(%rdi), %xmm3
11184 ; AVX2-NEXT: vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
11185 ; AVX2-NEXT: vpunpckhqdq {{.*#+}} xmm2 = xmm2[1],xmm3[1]
11186 ; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
11187 ; AVX2-NEXT: vpbroadcastd 868(%rdi), %ymm3
11188 ; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,6],ymm3[7]
11189 ; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5,6,7]
11190 ; AVX2-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11191 ; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm5[0,1,2,3,4,5],ymm4[6],ymm5[7]
11192 ; AVX2-NEXT: vmovdqa %ymm5, %ymm6
11193 ; AVX2-NEXT: vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11194 ; AVX2-NEXT: vpermd %ymm1, %ymm0, %ymm1
11195 ; AVX2-NEXT: vpbroadcastq 1200(%rdi), %ymm2
11196 ; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm9[4,5,6,7]
11197 ; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4,5,6,7]
11198 ; AVX2-NEXT: vmovdqa 1248(%rdi), %xmm2
11199 ; AVX2-NEXT: vmovdqa 1280(%rdi), %xmm3
11200 ; AVX2-NEXT: vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
11201 ; AVX2-NEXT: vpunpckhqdq {{.*#+}} xmm2 = xmm2[1],xmm3[1]
11202 ; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
11203 ; AVX2-NEXT: vpbroadcastd 1316(%rdi), %ymm3
11204 ; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,6],ymm3[7]
11205 ; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5,6,7]
11206 ; AVX2-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11207 ; AVX2-NEXT: vmovdqa 1600(%rdi), %ymm13
11208 ; AVX2-NEXT: vmovdqa 1568(%rdi), %ymm5
11209 ; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm5[0,1,2,3,4,5],ymm13[6],ymm5[7]
11210 ; AVX2-NEXT: vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11211 ; AVX2-NEXT: vmovdqu %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11212 ; AVX2-NEXT: vpermd %ymm1, %ymm0, %ymm1
11213 ; AVX2-NEXT: vmovdqa 1664(%rdi), %ymm3
11214 ; AVX2-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11215 ; AVX2-NEXT: vpbroadcastq 1648(%rdi), %ymm2
11216 ; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
11217 ; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4,5,6,7]
11218 ; AVX2-NEXT: vmovdqa 1696(%rdi), %xmm2
11219 ; AVX2-NEXT: vmovdqa 1728(%rdi), %xmm3
11220 ; AVX2-NEXT: vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
11221 ; AVX2-NEXT: vpunpckhqdq {{.*#+}} xmm2 = xmm2[1],xmm3[1]
11222 ; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
11223 ; AVX2-NEXT: vpbroadcastd 1764(%rdi), %ymm3
11224 ; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,6],ymm3[7]
11225 ; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5,6,7]
11226 ; AVX2-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11227 ; AVX2-NEXT: vmovdqa 96(%rdi), %ymm2
11228 ; AVX2-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11229 ; AVX2-NEXT: vpbroadcastq 80(%rdi), %ymm1
11230 ; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
11231 ; AVX2-NEXT: vmovdqa (%rdi), %ymm2
11232 ; AVX2-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11233 ; AVX2-NEXT: vmovdqa 32(%rdi), %ymm3
11234 ; AVX2-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11235 ; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5],ymm3[6],ymm2[7]
11236 ; AVX2-NEXT: vpermd %ymm2, %ymm0, %ymm2
11237 ; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3,4,5,6,7]
11238 ; AVX2-NEXT: vmovdqa 128(%rdi), %xmm2
11239 ; AVX2-NEXT: vmovdqa 160(%rdi), %xmm3
11240 ; AVX2-NEXT: vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
11241 ; AVX2-NEXT: vpunpckhqdq {{.*#+}} xmm2 = xmm2[1],xmm3[1]
11242 ; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
11243 ; AVX2-NEXT: vpbroadcastd 196(%rdi), %ymm3
11244 ; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,6],ymm3[7]
11245 ; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5,6,7]
11246 ; AVX2-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11247 ; AVX2-NEXT: vmovdqa 480(%rdi), %ymm2
11248 ; AVX2-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11249 ; AVX2-NEXT: vmovdqa 448(%rdi), %ymm1
11250 ; AVX2-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11251 ; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm2[6],ymm1[7]
11252 ; AVX2-NEXT: vpermd %ymm1, %ymm0, %ymm1
11253 ; AVX2-NEXT: vmovdqa 544(%rdi), %ymm3
11254 ; AVX2-NEXT: vmovdqu %ymm3, (%rsp) # 32-byte Spill
11255 ; AVX2-NEXT: vpbroadcastq 528(%rdi), %ymm2
11256 ; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
11257 ; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4,5,6,7]
11258 ; AVX2-NEXT: vmovdqa 576(%rdi), %xmm2
11259 ; AVX2-NEXT: vmovdqa 608(%rdi), %xmm3
11260 ; AVX2-NEXT: vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
11261 ; AVX2-NEXT: vpunpckhqdq {{.*#+}} xmm2 = xmm2[1],xmm3[1]
11262 ; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
11263 ; AVX2-NEXT: vpbroadcastd 644(%rdi), %ymm3
11264 ; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,6],ymm3[7]
11265 ; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5,6,7]
11266 ; AVX2-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11267 ; AVX2-NEXT: vmovdqa 928(%rdi), %ymm2
11268 ; AVX2-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11269 ; AVX2-NEXT: vmovdqa 896(%rdi), %ymm1
11270 ; AVX2-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11271 ; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm2[6],ymm1[7]
11272 ; AVX2-NEXT: vpermd %ymm1, %ymm0, %ymm1
11273 ; AVX2-NEXT: vmovdqa 992(%rdi), %ymm3
11274 ; AVX2-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11275 ; AVX2-NEXT: vpbroadcastq 976(%rdi), %ymm2
11276 ; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
11277 ; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4,5,6,7]
11278 ; AVX2-NEXT: vmovdqa 1024(%rdi), %xmm2
11279 ; AVX2-NEXT: vmovdqa 1056(%rdi), %xmm3
11280 ; AVX2-NEXT: vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
11281 ; AVX2-NEXT: vpunpckhqdq {{.*#+}} xmm2 = xmm2[1],xmm3[1]
11282 ; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
11283 ; AVX2-NEXT: vpbroadcastd 1092(%rdi), %ymm3
11284 ; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,6],ymm3[7]
11285 ; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5,6,7]
11286 ; AVX2-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11287 ; AVX2-NEXT: vmovdqa 1376(%rdi), %ymm14
11288 ; AVX2-NEXT: vmovdqa 1344(%rdi), %ymm15
11289 ; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm15[0,1,2,3,4,5],ymm14[6],ymm15[7]
11290 ; AVX2-NEXT: vmovdqu %ymm15, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11291 ; AVX2-NEXT: vmovdqu %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11292 ; AVX2-NEXT: vpermd %ymm1, %ymm0, %ymm0
11293 ; AVX2-NEXT: vmovdqa 1440(%rdi), %ymm4
11294 ; AVX2-NEXT: vpbroadcastq 1424(%rdi), %ymm1
11295 ; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm4[4,5,6,7]
11296 ; AVX2-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11297 ; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
11298 ; AVX2-NEXT: vmovdqa 1472(%rdi), %xmm1
11299 ; AVX2-NEXT: vmovdqa 1504(%rdi), %xmm2
11300 ; AVX2-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
11301 ; AVX2-NEXT: vpunpckhqdq {{.*#+}} xmm1 = xmm1[1],xmm2[1]
11302 ; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
11303 ; AVX2-NEXT: vpbroadcastd 1540(%rdi), %ymm2
11304 ; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm2[7]
11305 ; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
11306 ; AVX2-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11307 ; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = mem[2,2,2,2]
11308 ; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm3
11309 ; AVX2-NEXT: vmovdqa 384(%rdi), %ymm1
11310 ; AVX2-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11311 ; AVX2-NEXT: vmovdqa 352(%rdi), %ymm0
11312 ; AVX2-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11313 ; AVX2-NEXT: vpalignr {{.*#+}} ymm1 = ymm1[12,13,14,15],ymm0[0,1,2,3,4,5,6,7,8,9,10,11],ymm1[28,29,30,31],ymm0[16,17,18,19,20,21,22,23,24,25,26,27]
11314 ; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,1,2,0]
11315 ; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm3[7]
11316 ; AVX2-NEXT: vmovdqa 288(%rdi), %ymm12
11317 ; AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
11318 ; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm9[0,1],ymm12[2,3],ymm9[4,5],ymm12[6,7]
11319 ; AVX2-NEXT: vmovdqu %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11320 ; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm10[0],ymm11[1],ymm10[2,3,4],ymm11[5],ymm10[6,7]
11321 ; AVX2-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11322 ; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm0[5,6],ymm2[7]
11323 ; AVX2-NEXT: vpmovsxbd {{.*#+}} ymm0 = [1,0,7,6,5,6,5,6]
11324 ; AVX2-NEXT: vpermd %ymm2, %ymm0, %ymm2
11325 ; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3,4],ymm1[5,6,7]
11326 ; AVX2-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11327 ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = mem[2,2,2,2]
11328 ; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
11329 ; AVX2-NEXT: vmovdqa 832(%rdi), %ymm3
11330 ; AVX2-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11331 ; AVX2-NEXT: vmovdqa 800(%rdi), %ymm2
11332 ; AVX2-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11333 ; AVX2-NEXT: vpalignr {{.*#+}} ymm2 = ymm3[12,13,14,15],ymm2[0,1,2,3,4,5,6,7,8,9,10,11],ymm3[28,29,30,31],ymm2[16,17,18,19,20,21,22,23,24,25,26,27]
11334 ; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,1,2,0]
11335 ; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3,4,5,6],ymm1[7]
11336 ; AVX2-NEXT: vmovdqa 736(%rdi), %ymm2
11337 ; AVX2-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11338 ; AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
11339 ; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm11[0,1],ymm2[2,3],ymm11[4,5],ymm2[6,7]
11340 ; AVX2-NEXT: vpblendd {{.*#+}} ymm8 = ymm8[0],ymm7[1],ymm8[2,3,4],ymm7[5],ymm8[6,7]
11341 ; AVX2-NEXT: vmovdqu %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11342 ; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm8[0,1,2,3,4],ymm2[5,6],ymm8[7]
11343 ; AVX2-NEXT: vpermd %ymm2, %ymm0, %ymm2
11344 ; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3,4],ymm1[5,6,7]
11345 ; AVX2-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11346 ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = mem[2,2,2,2]
11347 ; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
11348 ; AVX2-NEXT: vmovdqa 1280(%rdi), %ymm3
11349 ; AVX2-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11350 ; AVX2-NEXT: vmovdqa 1248(%rdi), %ymm2
11351 ; AVX2-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11352 ; AVX2-NEXT: vpalignr {{.*#+}} ymm2 = ymm3[12,13,14,15],ymm2[0,1,2,3,4,5,6,7,8,9,10,11],ymm3[28,29,30,31],ymm2[16,17,18,19,20,21,22,23,24,25,26,27]
11353 ; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,1,2,0]
11354 ; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3,4,5,6],ymm1[7]
11355 ; AVX2-NEXT: vmovdqa 1184(%rdi), %ymm2
11356 ; AVX2-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11357 ; AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
11358 ; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm10[0,1],ymm2[2,3],ymm10[4,5],ymm2[6,7]
11359 ; AVX2-NEXT: vpblendd $221, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm7 # 32-byte Folded Reload
11360 ; AVX2-NEXT: # ymm7 = mem[0],ymm6[1],mem[2,3,4],ymm6[5],mem[6,7]
11361 ; AVX2-NEXT: vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11362 ; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm7[0,1,2,3,4],ymm2[5,6],ymm7[7]
11363 ; AVX2-NEXT: vpermd %ymm2, %ymm0, %ymm2
11364 ; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3,4],ymm1[5,6,7]
11365 ; AVX2-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11366 ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = mem[2,2,2,2]
11367 ; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
11368 ; AVX2-NEXT: vmovdqa 1728(%rdi), %ymm3
11369 ; AVX2-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11370 ; AVX2-NEXT: vmovdqa 1696(%rdi), %ymm2
11371 ; AVX2-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11372 ; AVX2-NEXT: vpalignr {{.*#+}} ymm2 = ymm3[12,13,14,15],ymm2[0,1,2,3,4,5,6,7,8,9,10,11],ymm3[28,29,30,31],ymm2[16,17,18,19,20,21,22,23,24,25,26,27]
11373 ; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,1,2,0]
11374 ; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3,4,5,6],ymm1[7]
11375 ; AVX2-NEXT: vmovdqa 1632(%rdi), %ymm2
11376 ; AVX2-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11377 ; AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
11378 ; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm6[0,1],ymm2[2,3],ymm6[4,5],ymm2[6,7]
11379 ; AVX2-NEXT: vpblendd {{.*#+}} ymm5 = ymm13[0],ymm5[1],ymm13[2,3,4],ymm5[5],ymm13[6,7]
11380 ; AVX2-NEXT: vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11381 ; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm5[0,1,2,3,4],ymm2[5,6],ymm5[7]
11382 ; AVX2-NEXT: vpermd %ymm2, %ymm0, %ymm2
11383 ; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3,4],ymm1[5,6,7]
11384 ; AVX2-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11385 ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = mem[2,2,2,2]
11386 ; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
11387 ; AVX2-NEXT: vmovdqa 608(%rdi), %ymm3
11388 ; AVX2-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11389 ; AVX2-NEXT: vmovdqa 576(%rdi), %ymm2
11390 ; AVX2-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11391 ; AVX2-NEXT: vpalignr {{.*#+}} ymm2 = ymm3[12,13,14,15],ymm2[0,1,2,3,4,5,6,7,8,9,10,11],ymm3[28,29,30,31],ymm2[16,17,18,19,20,21,22,23,24,25,26,27]
11392 ; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,1,2,0]
11393 ; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3,4,5,6],ymm1[7]
11394 ; AVX2-NEXT: vmovdqa 512(%rdi), %ymm2
11395 ; AVX2-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11396 ; AVX2-NEXT: vmovdqu (%rsp), %ymm8 # 32-byte Reload
11397 ; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm8[0,1],ymm2[2,3],ymm8[4,5],ymm2[6,7]
11398 ; AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
11399 ; AVX2-NEXT: vpblendd $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
11400 ; AVX2-NEXT: # ymm3 = ymm3[0],mem[1],ymm3[2,3,4],mem[5],ymm3[6,7]
11401 ; AVX2-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11402 ; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3,4],ymm2[5,6],ymm3[7]
11403 ; AVX2-NEXT: vpermd %ymm2, %ymm0, %ymm2
11404 ; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3,4],ymm1[5,6,7]
11405 ; AVX2-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11406 ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = mem[2,2,2,2]
11407 ; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
11408 ; AVX2-NEXT: vmovdqa 1056(%rdi), %ymm3
11409 ; AVX2-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11410 ; AVX2-NEXT: vmovdqa 1024(%rdi), %ymm2
11411 ; AVX2-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11412 ; AVX2-NEXT: vpalignr {{.*#+}} ymm2 = ymm3[12,13,14,15],ymm2[0,1,2,3,4,5,6,7,8,9,10,11],ymm3[28,29,30,31],ymm2[16,17,18,19,20,21,22,23,24,25,26,27]
11413 ; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,1,2,0]
11414 ; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3,4,5,6],ymm1[7]
11415 ; AVX2-NEXT: vmovdqa 960(%rdi), %ymm2
11416 ; AVX2-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11417 ; AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
11418 ; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm13[0,1],ymm2[2,3],ymm13[4,5],ymm2[6,7]
11419 ; AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
11420 ; AVX2-NEXT: vpblendd $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
11421 ; AVX2-NEXT: # ymm3 = ymm3[0],mem[1],ymm3[2,3,4],mem[5],ymm3[6,7]
11422 ; AVX2-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11423 ; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3,4],ymm2[5,6],ymm3[7]
11424 ; AVX2-NEXT: vpermd %ymm2, %ymm0, %ymm2
11425 ; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3,4],ymm1[5,6,7]
11426 ; AVX2-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11427 ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = mem[2,2,2,2]
11428 ; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
11429 ; AVX2-NEXT: vmovdqa 1504(%rdi), %ymm3
11430 ; AVX2-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11431 ; AVX2-NEXT: vmovdqa 1472(%rdi), %ymm2
11432 ; AVX2-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11433 ; AVX2-NEXT: vpalignr {{.*#+}} ymm2 = ymm3[12,13,14,15],ymm2[0,1,2,3,4,5,6,7,8,9,10,11],ymm3[28,29,30,31],ymm2[16,17,18,19,20,21,22,23,24,25,26,27]
11434 ; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,1,2,0]
11435 ; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3,4,5,6],ymm1[7]
11436 ; AVX2-NEXT: vmovdqa 1408(%rdi), %ymm2
11437 ; AVX2-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11438 ; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm4[0,1],ymm2[2,3],ymm4[4,5],ymm2[6,7]
11439 ; AVX2-NEXT: vpblendd {{.*#+}} ymm3 = ymm14[0],ymm15[1],ymm14[2,3,4],ymm15[5],ymm14[6,7]
11440 ; AVX2-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11441 ; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3,4],ymm2[5,6],ymm3[7]
11442 ; AVX2-NEXT: vpermd %ymm2, %ymm0, %ymm2
11443 ; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3,4],ymm1[5,6,7]
11444 ; AVX2-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11445 ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = mem[2,2,2,2]
11446 ; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
11447 ; AVX2-NEXT: vmovdqa 160(%rdi), %ymm15
11448 ; AVX2-NEXT: vmovdqa 128(%rdi), %ymm14
11449 ; AVX2-NEXT: vpalignr {{.*#+}} ymm2 = ymm15[12,13,14,15],ymm14[0,1,2,3,4,5,6,7,8,9,10,11],ymm15[28,29,30,31],ymm14[16,17,18,19,20,21,22,23,24,25,26,27]
11450 ; AVX2-NEXT: vmovdqu %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11451 ; AVX2-NEXT: vmovdqu %ymm15, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11452 ; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,1,2,0]
11453 ; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3,4,5,6],ymm1[7]
11454 ; AVX2-NEXT: vmovdqa 64(%rdi), %ymm4
11455 ; AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
11456 ; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm7[0,1],ymm4[2,3],ymm7[4,5],ymm4[6,7]
11457 ; AVX2-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11458 ; AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
11459 ; AVX2-NEXT: vpblendd $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
11460 ; AVX2-NEXT: # ymm3 = ymm3[0],mem[1],ymm3[2,3,4],mem[5],ymm3[6,7]
11461 ; AVX2-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11462 ; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3,4],ymm2[5,6],ymm3[7]
11463 ; AVX2-NEXT: vpermd %ymm2, %ymm0, %ymm0
11464 ; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
11465 ; AVX2-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11466 ; AVX2-NEXT: vmovdqa 304(%rdi), %xmm0
11467 ; AVX2-NEXT: vpalignr {{.*#+}} ymm1 = ymm9[8,9,10,11,12,13,14,15],ymm12[0,1,2,3,4,5,6,7],ymm9[24,25,26,27,28,29,30,31],ymm12[16,17,18,19,20,21,22,23]
11468 ; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5,6,7]
11469 ; AVX2-NEXT: vpbroadcastd 232(%rdi), %xmm1
11470 ; AVX2-NEXT: vmovdqa 256(%rdi), %xmm5
11471 ; AVX2-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm5[1],xmm1[2,3]
11472 ; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5,6,7]
11473 ; AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
11474 ; AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
11475 ; AVX2-NEXT: vpunpcklqdq {{.*#+}} ymm1 = ymm1[0],ymm12[0],ymm1[2],ymm12[2]
11476 ; AVX2-NEXT: vpbroadcastd 428(%rdi), %ymm2
11477 ; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm2[7]
11478 ; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
11479 ; AVX2-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11480 ; AVX2-NEXT: vmovdqa 752(%rdi), %xmm0
11481 ; AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
11482 ; AVX2-NEXT: vpalignr {{.*#+}} ymm1 = ymm11[8,9,10,11,12,13,14,15],ymm1[0,1,2,3,4,5,6,7],ymm11[24,25,26,27,28,29,30,31],ymm1[16,17,18,19,20,21,22,23]
11483 ; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5,6,7]
11484 ; AVX2-NEXT: vpbroadcastd 680(%rdi), %xmm1
11485 ; AVX2-NEXT: vmovdqa 704(%rdi), %xmm2
11486 ; AVX2-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
11487 ; AVX2-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2,3]
11488 ; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5,6,7]
11489 ; AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
11490 ; AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
11491 ; AVX2-NEXT: vpunpcklqdq {{.*#+}} ymm1 = ymm1[0],ymm3[0],ymm1[2],ymm3[2]
11492 ; AVX2-NEXT: vpbroadcastd 876(%rdi), %ymm2
11493 ; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm2[7]
11494 ; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
11495 ; AVX2-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11496 ; AVX2-NEXT: vmovdqa 1200(%rdi), %xmm0
11497 ; AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
11498 ; AVX2-NEXT: vpalignr {{.*#+}} ymm1 = ymm10[8,9,10,11,12,13,14,15],ymm1[0,1,2,3,4,5,6,7],ymm10[24,25,26,27,28,29,30,31],ymm1[16,17,18,19,20,21,22,23]
11499 ; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5,6,7]
11500 ; AVX2-NEXT: vpbroadcastd 1128(%rdi), %xmm1
11501 ; AVX2-NEXT: vmovdqa 1152(%rdi), %xmm2
11502 ; AVX2-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
11503 ; AVX2-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2,3]
11504 ; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5,6,7]
11505 ; AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
11506 ; AVX2-NEXT: vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
11507 ; AVX2-NEXT: # ymm1 = ymm1[0],mem[0],ymm1[2],mem[2]
11508 ; AVX2-NEXT: vpbroadcastd 1324(%rdi), %ymm2
11509 ; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm2[7]
11510 ; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
11511 ; AVX2-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11512 ; AVX2-NEXT: vmovdqa 1648(%rdi), %xmm0
11513 ; AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
11514 ; AVX2-NEXT: vpalignr {{.*#+}} ymm1 = ymm6[8,9,10,11,12,13,14,15],ymm1[0,1,2,3,4,5,6,7],ymm6[24,25,26,27,28,29,30,31],ymm1[16,17,18,19,20,21,22,23]
11515 ; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5,6,7]
11516 ; AVX2-NEXT: vpbroadcastd 1576(%rdi), %xmm1
11517 ; AVX2-NEXT: vmovdqa 1600(%rdi), %xmm2
11518 ; AVX2-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2,3]
11519 ; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5,6,7]
11520 ; AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
11521 ; AVX2-NEXT: vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
11522 ; AVX2-NEXT: # ymm1 = ymm1[0],mem[0],ymm1[2],mem[2]
11523 ; AVX2-NEXT: vpbroadcastd 1772(%rdi), %ymm6
11524 ; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm6[7]
11525 ; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
11526 ; AVX2-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11527 ; AVX2-NEXT: vmovdqa 80(%rdi), %xmm0
11528 ; AVX2-NEXT: vpalignr {{.*#+}} ymm1 = ymm7[8,9,10,11,12,13,14,15],ymm4[0,1,2,3,4,5,6,7],ymm7[24,25,26,27,28,29,30,31],ymm4[16,17,18,19,20,21,22,23]
11529 ; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5,6,7]
11530 ; AVX2-NEXT: vpbroadcastd 8(%rdi), %xmm1
11531 ; AVX2-NEXT: vmovdqa 32(%rdi), %xmm4
11532 ; AVX2-NEXT: vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
11533 ; AVX2-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm4[1],xmm1[2,3]
11534 ; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5,6,7]
11535 ; AVX2-NEXT: vpunpcklqdq {{.*#+}} ymm1 = ymm14[0],ymm15[0],ymm14[2],ymm15[2]
11536 ; AVX2-NEXT: vpbroadcastd 204(%rdi), %ymm6
11537 ; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm6[7]
11538 ; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
11539 ; AVX2-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11540 ; AVX2-NEXT: vmovdqa 528(%rdi), %xmm0
11541 ; AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
11542 ; AVX2-NEXT: vpalignr {{.*#+}} ymm1 = ymm8[8,9,10,11,12,13,14,15],ymm1[0,1,2,3,4,5,6,7],ymm8[24,25,26,27,28,29,30,31],ymm1[16,17,18,19,20,21,22,23]
11543 ; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5,6,7]
11544 ; AVX2-NEXT: vpbroadcastd 456(%rdi), %xmm1
11545 ; AVX2-NEXT: vmovdqa 480(%rdi), %xmm4
11546 ; AVX2-NEXT: vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
11547 ; AVX2-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm4[1],xmm1[2,3]
11548 ; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5,6,7]
11549 ; AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
11550 ; AVX2-NEXT: vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
11551 ; AVX2-NEXT: # ymm1 = ymm1[0],mem[0],ymm1[2],mem[2]
11552 ; AVX2-NEXT: vpbroadcastd 652(%rdi), %ymm15
11553 ; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm15[7]
11554 ; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
11555 ; AVX2-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11556 ; AVX2-NEXT: vmovdqa 976(%rdi), %xmm0
11557 ; AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
11558 ; AVX2-NEXT: vpalignr {{.*#+}} ymm1 = ymm13[8,9,10,11,12,13,14,15],ymm1[0,1,2,3,4,5,6,7],ymm13[24,25,26,27,28,29,30,31],ymm1[16,17,18,19,20,21,22,23]
11559 ; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5,6,7]
11560 ; AVX2-NEXT: vpbroadcastd 904(%rdi), %xmm15
11561 ; AVX2-NEXT: vmovdqa 928(%rdi), %xmm11
11562 ; AVX2-NEXT: vpblendd {{.*#+}} xmm15 = xmm15[0],xmm11[1],xmm15[2,3]
11563 ; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm15[0,1],ymm0[2,3,4,5,6,7]
11564 ; AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
11565 ; AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
11566 ; AVX2-NEXT: vpunpcklqdq {{.*#+}} ymm15 = ymm10[0],ymm7[0],ymm10[2],ymm7[2]
11567 ; AVX2-NEXT: vpbroadcastd 1100(%rdi), %ymm14
11568 ; AVX2-NEXT: vpblendd {{.*#+}} ymm14 = ymm15[0,1,2,3,4,5,6],ymm14[7]
11569 ; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm14[5,6,7]
11570 ; AVX2-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11571 ; AVX2-NEXT: vmovdqa 1424(%rdi), %xmm0
11572 ; AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
11573 ; AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
11574 ; AVX2-NEXT: vpalignr {{.*#+}} ymm14 = ymm9[8,9,10,11,12,13,14,15],ymm8[0,1,2,3,4,5,6,7],ymm9[24,25,26,27,28,29,30,31],ymm8[16,17,18,19,20,21,22,23]
11575 ; AVX2-NEXT: vpblendd {{.*#+}} ymm14 = ymm14[0,1,2],ymm0[3],ymm14[4,5,6,7]
11576 ; AVX2-NEXT: vpbroadcastd 1352(%rdi), %xmm15
11577 ; AVX2-NEXT: vmovdqa 1376(%rdi), %xmm0
11578 ; AVX2-NEXT: vpblendd {{.*#+}} xmm15 = xmm15[0],xmm0[1],xmm15[2,3]
11579 ; AVX2-NEXT: vpblendd {{.*#+}} ymm14 = ymm15[0,1],ymm14[2,3,4,5,6,7]
11580 ; AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
11581 ; AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
11582 ; AVX2-NEXT: vpunpcklqdq {{.*#+}} ymm15 = ymm6[0],ymm1[0],ymm6[2],ymm1[2]
11583 ; AVX2-NEXT: vpbroadcastd 1548(%rdi), %ymm13
11584 ; AVX2-NEXT: vpblendd {{.*#+}} ymm13 = ymm15[0,1,2,3,4,5,6],ymm13[7]
11585 ; AVX2-NEXT: vpblendd {{.*#+}} ymm13 = ymm14[0,1,2,3,4],ymm13[5,6,7]
11586 ; AVX2-NEXT: vmovdqu %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11587 ; AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
11588 ; AVX2-NEXT: vpblendd $2, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm13 # 32-byte Folded Reload
11589 ; AVX2-NEXT: # ymm13 = ymm4[0],mem[1],ymm4[2,3,4,5,6,7]
11590 ; AVX2-NEXT: vpblendd {{.*#+}} xmm5 = xmm5[0,1,2],mem[3]
11591 ; AVX2-NEXT: vpshufd {{.*#+}} xmm5 = xmm5[3,2,2,3]
11592 ; AVX2-NEXT: vpshufd {{.*#+}} ymm13 = ymm13[3,1,1,0,7,5,5,4]
11593 ; AVX2-NEXT: vpblendd {{.*#+}} ymm5 = ymm5[0,1],ymm13[2,3,4,5,6,7]
11594 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
11595 ; AVX2-NEXT: vshufps {{.*#+}} ymm13 = ymm4[0,2],ymm12[1,3],ymm4[4,6],ymm12[5,7]
11596 ; AVX2-NEXT: vmovaps %ymm4, %ymm12
11597 ; AVX2-NEXT: vbroadcastss 432(%rdi), %ymm14
11598 ; AVX2-NEXT: vblendps {{.*#+}} ymm13 = ymm13[0,1,2,3,4,5,6],ymm14[7]
11599 ; AVX2-NEXT: vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3,4],ymm13[5,6,7]
11600 ; AVX2-NEXT: vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11601 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
11602 ; AVX2-NEXT: vblendps $2, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm5 # 32-byte Folded Reload
11603 ; AVX2-NEXT: # ymm5 = ymm4[0],mem[1],ymm4[2,3,4,5,6,7]
11604 ; AVX2-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
11605 ; AVX2-NEXT: vblendps {{.*#+}} xmm4 = xmm4[0,1,2],mem[3]
11606 ; AVX2-NEXT: vshufps {{.*#+}} xmm4 = xmm4[3,2,2,3]
11607 ; AVX2-NEXT: vshufps {{.*#+}} ymm5 = ymm5[3,1,1,0,7,5,5,4]
11608 ; AVX2-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm5[2,3,4,5,6,7]
11609 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
11610 ; AVX2-NEXT: vshufps {{.*#+}} ymm5 = ymm14[0,2],ymm3[1,3],ymm14[4,6],ymm3[5,7]
11611 ; AVX2-NEXT: vbroadcastss 880(%rdi), %ymm13
11612 ; AVX2-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3,4,5,6],ymm13[7]
11613 ; AVX2-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0,1,2,3,4],ymm5[5,6,7]
11614 ; AVX2-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11615 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
11616 ; AVX2-NEXT: vblendps $2, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm4 # 32-byte Folded Reload
11617 ; AVX2-NEXT: # ymm4 = ymm3[0],mem[1],ymm3[2,3,4,5,6,7]
11618 ; AVX2-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
11619 ; AVX2-NEXT: vblendps {{.*#+}} xmm3 = xmm3[0,1,2],mem[3]
11620 ; AVX2-NEXT: vshufps {{.*#+}} xmm3 = xmm3[3,2,2,3]
11621 ; AVX2-NEXT: vshufps {{.*#+}} ymm4 = ymm4[3,1,1,0,7,5,5,4]
11622 ; AVX2-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm4[2,3,4,5,6,7]
11623 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
11624 ; AVX2-NEXT: vshufps $216, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm4 # 32-byte Folded Reload
11625 ; AVX2-NEXT: # ymm4 = ymm4[0,2],mem[1,3],ymm4[4,6],mem[5,7]
11626 ; AVX2-NEXT: vbroadcastss 1328(%rdi), %ymm5
11627 ; AVX2-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3,4,5,6],ymm5[7]
11628 ; AVX2-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3,4],ymm4[5,6,7]
11629 ; AVX2-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11630 ; AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
11631 ; AVX2-NEXT: vpblendd $2, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
11632 ; AVX2-NEXT: # ymm3 = ymm3[0],mem[1],ymm3[2,3,4,5,6,7]
11633 ; AVX2-NEXT: vpblendd {{.*#+}} xmm2 = xmm2[0,1,2],mem[3]
11634 ; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[3,2,2,3]
11635 ; AVX2-NEXT: vpshufd {{.*#+}} ymm3 = ymm3[3,1,1,0,7,5,5,4]
11636 ; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1],ymm3[2,3,4,5,6,7]
11637 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
11638 ; AVX2-NEXT: vshufps $216, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm3 # 32-byte Folded Reload
11639 ; AVX2-NEXT: # ymm3 = ymm13[0,2],mem[1,3],ymm13[4,6],mem[5,7]
11640 ; AVX2-NEXT: vbroadcastss 1776(%rdi), %ymm4
11641 ; AVX2-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5,6],ymm4[7]
11642 ; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm3[5,6,7]
11643 ; AVX2-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11644 ; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm9[0],ymm8[1],ymm9[2,3,4,5,6,7]
11645 ; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1,2],mem[3]
11646 ; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[3,2,2,3]
11647 ; AVX2-NEXT: vpshufd {{.*#+}} ymm2 = ymm2[3,1,1,0,7,5,5,4]
11648 ; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm2[2,3,4,5,6,7]
11649 ; AVX2-NEXT: vshufps {{.*#+}} ymm2 = ymm6[0,2],ymm1[1,3],ymm6[4,6],ymm1[5,7]
11650 ; AVX2-NEXT: vmovaps %ymm1, %ymm9
11651 ; AVX2-NEXT: vbroadcastss 1552(%rdi), %ymm3
11652 ; AVX2-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,6],ymm3[7]
11653 ; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm2[5,6,7]
11654 ; AVX2-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11655 ; AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11656 ; AVX2-NEXT: vpblendd $2, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
11657 ; AVX2-NEXT: # ymm0 = ymm0[0],mem[1],ymm0[2,3,4,5,6,7]
11658 ; AVX2-NEXT: vpblendd {{.*#+}} xmm1 = xmm11[0,1,2],mem[3]
11659 ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[3,2,2,3]
11660 ; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[3,1,1,0,7,5,5,4]
11661 ; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5,6,7]
11662 ; AVX2-NEXT: vmovdqa %ymm10, %ymm8
11663 ; AVX2-NEXT: vshufps {{.*#+}} ymm1 = ymm10[0,2],ymm7[1,3],ymm10[4,6],ymm7[5,7]
11664 ; AVX2-NEXT: vmovaps %ymm7, %ymm11
11665 ; AVX2-NEXT: vbroadcastss 1104(%rdi), %ymm2
11666 ; AVX2-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm2[7]
11667 ; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
11668 ; AVX2-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11669 ; AVX2-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
11670 ; AVX2-NEXT: vblendps $2, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
11671 ; AVX2-NEXT: # ymm0 = ymm0[0],mem[1],ymm0[2,3,4,5,6,7]
11672 ; AVX2-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
11673 ; AVX2-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1,2],mem[3]
11674 ; AVX2-NEXT: vshufps {{.*#+}} xmm1 = xmm1[3,2,2,3]
11675 ; AVX2-NEXT: vshufps {{.*#+}} ymm0 = ymm0[3,1,1,0,7,5,5,4]
11676 ; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5,6,7]
11677 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
11678 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
11679 ; AVX2-NEXT: vshufps {{.*#+}} ymm1 = ymm4[0,2],ymm7[1,3],ymm4[4,6],ymm7[5,7]
11680 ; AVX2-NEXT: vbroadcastss 656(%rdi), %ymm2
11681 ; AVX2-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm2[7]
11682 ; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
11683 ; AVX2-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11684 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11685 ; AVX2-NEXT: vblendps $2, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
11686 ; AVX2-NEXT: # ymm0 = ymm0[0],mem[1],ymm0[2,3,4,5,6,7]
11687 ; AVX2-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
11688 ; AVX2-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1,2],mem[3]
11689 ; AVX2-NEXT: vshufps {{.*#+}} xmm1 = xmm1[3,2,2,3]
11690 ; AVX2-NEXT: vshufps {{.*#+}} ymm0 = ymm0[3,1,1,0,7,5,5,4]
11691 ; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5,6,7]
11692 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
11693 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
11694 ; AVX2-NEXT: vshufps {{.*#+}} ymm1 = ymm3[0,2],ymm10[1,3],ymm3[4,6],ymm10[5,7]
11695 ; AVX2-NEXT: vbroadcastss 208(%rdi), %ymm2
11696 ; AVX2-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm2[7]
11697 ; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
11698 ; AVX2-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11699 ; AVX2-NEXT: vbroadcastss 100(%rdi), %xmm0
11700 ; AVX2-NEXT: vmovaps 64(%rdi), %xmm6
11701 ; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm6[0,1,2],xmm0[3]
11702 ; AVX2-NEXT: vmovsd {{.*#+}} xmm5 = [4,3,0,0]
11703 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
11704 ; AVX2-NEXT: vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
11705 ; AVX2-NEXT: # ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
11706 ; AVX2-NEXT: vpermps %ymm1, %ymm5, %ymm1
11707 ; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
11708 ; AVX2-NEXT: vbroadcastsd {{.*#+}} ymm15 = [0,7,0,7,0,7,0,7]
11709 ; AVX2-NEXT: vpermps %ymm3, %ymm15, %ymm1
11710 ; AVX2-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm10[6,7]
11711 ; AVX2-NEXT: vbroadcastss 212(%rdi), %ymm2
11712 ; AVX2-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm2[7]
11713 ; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
11714 ; AVX2-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11715 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11716 ; AVX2-NEXT: vblendps $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
11717 ; AVX2-NEXT: # ymm0 = mem[0,1,2,3],ymm0[4,5,6,7]
11718 ; AVX2-NEXT: vpermps %ymm0, %ymm5, %ymm0
11719 ; AVX2-NEXT: vbroadcastss 324(%rdi), %xmm2
11720 ; AVX2-NEXT: vmovaps 288(%rdi), %xmm1
11721 ; AVX2-NEXT: vblendps {{.*#+}} xmm2 = xmm1[0,1,2],xmm2[3]
11722 ; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3]
11723 ; AVX2-NEXT: vpermps %ymm12, %ymm15, %ymm2
11724 ; AVX2-NEXT: vblendps $192, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
11725 ; AVX2-NEXT: # ymm2 = ymm2[0,1,2,3,4,5],mem[6,7]
11726 ; AVX2-NEXT: vbroadcastss 436(%rdi), %ymm3
11727 ; AVX2-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,6],ymm3[7]
11728 ; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7]
11729 ; AVX2-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11730 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11731 ; AVX2-NEXT: vblendps $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
11732 ; AVX2-NEXT: # ymm0 = mem[0,1,2,3],ymm0[4,5,6,7]
11733 ; AVX2-NEXT: vpermps %ymm0, %ymm5, %ymm0
11734 ; AVX2-NEXT: vbroadcastss 548(%rdi), %xmm3
11735 ; AVX2-NEXT: vmovaps 512(%rdi), %xmm2
11736 ; AVX2-NEXT: vblendps {{.*#+}} xmm3 = xmm2[0,1,2],xmm3[3]
11737 ; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm3[2,3]
11738 ; AVX2-NEXT: vpermps %ymm4, %ymm15, %ymm3
11739 ; AVX2-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5],ymm7[6,7]
11740 ; AVX2-NEXT: vbroadcastss 660(%rdi), %ymm4
11741 ; AVX2-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5,6],ymm4[7]
11742 ; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm3[4,5,6,7]
11743 ; AVX2-NEXT: vmovups %ymm0, (%rsp) # 32-byte Spill
11744 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11745 ; AVX2-NEXT: vblendps $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
11746 ; AVX2-NEXT: # ymm0 = mem[0,1,2,3],ymm0[4,5,6,7]
11747 ; AVX2-NEXT: vpermps %ymm0, %ymm5, %ymm0
11748 ; AVX2-NEXT: vbroadcastss 772(%rdi), %xmm4
11749 ; AVX2-NEXT: vmovaps 736(%rdi), %xmm3
11750 ; AVX2-NEXT: vblendps {{.*#+}} xmm4 = xmm3[0,1,2],xmm4[3]
11751 ; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm4[2,3]
11752 ; AVX2-NEXT: vpermps %ymm14, %ymm15, %ymm4
11753 ; AVX2-NEXT: vblendps $192, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm4 # 32-byte Folded Reload
11754 ; AVX2-NEXT: # ymm4 = ymm4[0,1,2,3,4,5],mem[6,7]
11755 ; AVX2-NEXT: vbroadcastss 884(%rdi), %ymm7
11756 ; AVX2-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3,4,5,6],ymm7[7]
11757 ; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm4[4,5,6,7]
11758 ; AVX2-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11759 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11760 ; AVX2-NEXT: vblendps $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
11761 ; AVX2-NEXT: # ymm0 = mem[0,1,2,3],ymm0[4,5,6,7]
11762 ; AVX2-NEXT: vpermps %ymm0, %ymm5, %ymm0
11763 ; AVX2-NEXT: vbroadcastss 996(%rdi), %xmm7
11764 ; AVX2-NEXT: vmovaps 960(%rdi), %xmm4
11765 ; AVX2-NEXT: vblendps {{.*#+}} xmm7 = xmm4[0,1,2],xmm7[3]
11766 ; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm7[2,3]
11767 ; AVX2-NEXT: vpermps %ymm8, %ymm15, %ymm7
11768 ; AVX2-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3,4,5],ymm11[6,7]
11769 ; AVX2-NEXT: vbroadcastss 1108(%rdi), %ymm8
11770 ; AVX2-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3,4,5,6],ymm8[7]
11771 ; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm7[4,5,6,7]
11772 ; AVX2-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11773 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11774 ; AVX2-NEXT: vblendps $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
11775 ; AVX2-NEXT: # ymm0 = mem[0,1,2,3],ymm0[4,5,6,7]
11776 ; AVX2-NEXT: vpermps %ymm0, %ymm5, %ymm0
11777 ; AVX2-NEXT: vbroadcastss 1220(%rdi), %xmm7
11778 ; AVX2-NEXT: vmovaps 1184(%rdi), %xmm14
11779 ; AVX2-NEXT: vblendps {{.*#+}} xmm7 = xmm14[0,1,2],xmm7[3]
11780 ; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm7[2,3]
11781 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
11782 ; AVX2-NEXT: vpermps %ymm10, %ymm15, %ymm7
11783 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
11784 ; AVX2-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3,4,5],ymm12[6,7]
11785 ; AVX2-NEXT: vbroadcastss 1332(%rdi), %ymm8
11786 ; AVX2-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3,4,5,6],ymm8[7]
11787 ; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm7[4,5,6,7]
11788 ; AVX2-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11789 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11790 ; AVX2-NEXT: vblendps $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
11791 ; AVX2-NEXT: # ymm0 = mem[0,1,2,3],ymm0[4,5,6,7]
11792 ; AVX2-NEXT: vpermps %ymm0, %ymm5, %ymm7
11793 ; AVX2-NEXT: vbroadcastss 1444(%rdi), %xmm8
11794 ; AVX2-NEXT: vmovaps 1408(%rdi), %xmm0
11795 ; AVX2-NEXT: vblendps {{.*#+}} xmm8 = xmm0[0,1,2],xmm8[3]
11796 ; AVX2-NEXT: vblendps {{.*#+}} xmm7 = xmm7[0,1],xmm8[2,3]
11797 ; AVX2-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm8 # 32-byte Folded Reload
11798 ; AVX2-NEXT: vmovaps %ymm9, %ymm11
11799 ; AVX2-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1,2,3,4,5],ymm9[6,7]
11800 ; AVX2-NEXT: vbroadcastss 1556(%rdi), %ymm9
11801 ; AVX2-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1,2,3,4,5,6],ymm9[7]
11802 ; AVX2-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm8[4,5,6,7]
11803 ; AVX2-NEXT: vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11804 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
11805 ; AVX2-NEXT: vblendps $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm7 # 32-byte Folded Reload
11806 ; AVX2-NEXT: # ymm7 = mem[0,1,2,3],ymm7[4,5,6,7]
11807 ; AVX2-NEXT: vpermps %ymm7, %ymm5, %ymm7
11808 ; AVX2-NEXT: vbroadcastss 1668(%rdi), %xmm8
11809 ; AVX2-NEXT: vmovaps 1632(%rdi), %xmm5
11810 ; AVX2-NEXT: vblendps {{.*#+}} xmm8 = xmm5[0,1,2],xmm8[3]
11811 ; AVX2-NEXT: vblendps {{.*#+}} xmm7 = xmm7[0,1],xmm8[2,3]
11812 ; AVX2-NEXT: vpermps %ymm13, %ymm15, %ymm8
11813 ; AVX2-NEXT: vblendps $192, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm8 # 32-byte Folded Reload
11814 ; AVX2-NEXT: # ymm8 = ymm8[0,1,2,3,4,5],mem[6,7]
11815 ; AVX2-NEXT: vbroadcastss 1780(%rdi), %ymm9
11816 ; AVX2-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1,2,3,4,5,6],ymm9[7]
11817 ; AVX2-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm8[4,5,6,7]
11818 ; AVX2-NEXT: vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11819 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
11820 ; AVX2-NEXT: vblendps $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm7 # 32-byte Folded Reload
11821 ; AVX2-NEXT: # ymm7 = ymm7[0],mem[1],ymm7[2,3,4],mem[5],ymm7[6,7]
11822 ; AVX2-NEXT: vshufps {{.*#+}} ymm7 = ymm7[1,0,3,3,5,4,7,7]
11823 ; AVX2-NEXT: vpermpd {{.*#+}} ymm7 = ymm7[0,1,0,3]
11824 ; AVX2-NEXT: vbroadcastss 216(%rdi), %ymm8
11825 ; AVX2-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3,4,5,6],ymm8[7]
11826 ; AVX2-NEXT: vmovaps 96(%rdi), %xmm9
11827 ; AVX2-NEXT: vblendps {{.*#+}} xmm6 = xmm9[0,1,2],xmm6[3]
11828 ; AVX2-NEXT: vshufps {{.*#+}} xmm6 = xmm6[0,1,3,2]
11829 ; AVX2-NEXT: vpermilps $225, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Folded Reload
11830 ; AVX2-NEXT: # ymm8 = mem[1,0,2,3,5,4,6,7]
11831 ; AVX2-NEXT: vextractf128 $1, %ymm8, %xmm8
11832 ; AVX2-NEXT: vblendps {{.*#+}} xmm6 = xmm8[0,1],xmm6[2,3]
11833 ; AVX2-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm7[4,5,6,7]
11834 ; AVX2-NEXT: vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11835 ; AVX2-NEXT: vmovaps 320(%rdi), %xmm13
11836 ; AVX2-NEXT: vblendps {{.*#+}} xmm1 = xmm13[0,1,2],xmm1[3]
11837 ; AVX2-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,1,3,2]
11838 ; AVX2-NEXT: vpermilps $225, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Folded Reload
11839 ; AVX2-NEXT: # ymm6 = mem[1,0,2,3,5,4,6,7]
11840 ; AVX2-NEXT: vextractf128 $1, %ymm6, %xmm6
11841 ; AVX2-NEXT: vblendps {{.*#+}} xmm1 = xmm6[0,1],xmm1[2,3]
11842 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
11843 ; AVX2-NEXT: vblendps $221, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm6 # 32-byte Folded Reload
11844 ; AVX2-NEXT: # ymm6 = mem[0],ymm6[1],mem[2,3,4],ymm6[5],mem[6,7]
11845 ; AVX2-NEXT: vshufps {{.*#+}} ymm6 = ymm6[1,0,3,3,5,4,7,7]
11846 ; AVX2-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[0,1,0,3]
11847 ; AVX2-NEXT: vbroadcastss 440(%rdi), %ymm7
11848 ; AVX2-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3,4,5,6],ymm7[7]
11849 ; AVX2-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm6[4,5,6,7]
11850 ; AVX2-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11851 ; AVX2-NEXT: vmovaps 544(%rdi), %xmm8
11852 ; AVX2-NEXT: vblendps {{.*#+}} xmm1 = xmm8[0,1,2],xmm2[3]
11853 ; AVX2-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,1,3,2]
11854 ; AVX2-NEXT: vpermilps $225, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
11855 ; AVX2-NEXT: # ymm2 = mem[1,0,2,3,5,4,6,7]
11856 ; AVX2-NEXT: vextractf128 $1, %ymm2, %xmm2
11857 ; AVX2-NEXT: vblendps {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3]
11858 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
11859 ; AVX2-NEXT: vblendps $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
11860 ; AVX2-NEXT: # ymm2 = ymm2[0],mem[1],ymm2[2,3,4],mem[5],ymm2[6,7]
11861 ; AVX2-NEXT: vshufps {{.*#+}} ymm2 = ymm2[1,0,3,3,5,4,7,7]
11862 ; AVX2-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,1,0,3]
11863 ; AVX2-NEXT: vbroadcastss 664(%rdi), %ymm6
11864 ; AVX2-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,6],ymm6[7]
11865 ; AVX2-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
11866 ; AVX2-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11867 ; AVX2-NEXT: vmovaps 768(%rdi), %xmm1
11868 ; AVX2-NEXT: vblendps {{.*#+}} xmm2 = xmm1[0,1,2],xmm3[3]
11869 ; AVX2-NEXT: vshufps {{.*#+}} xmm2 = xmm2[0,1,3,2]
11870 ; AVX2-NEXT: vpermilps $225, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Folded Reload
11871 ; AVX2-NEXT: # ymm3 = mem[1,0,2,3,5,4,6,7]
11872 ; AVX2-NEXT: vextractf128 $1, %ymm3, %xmm3
11873 ; AVX2-NEXT: vblendps {{.*#+}} xmm2 = xmm3[0,1],xmm2[2,3]
11874 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
11875 ; AVX2-NEXT: vblendps $221, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
11876 ; AVX2-NEXT: # ymm3 = mem[0],ymm3[1],mem[2,3,4],ymm3[5],mem[6,7]
11877 ; AVX2-NEXT: vshufps {{.*#+}} ymm3 = ymm3[1,0,3,3,5,4,7,7]
11878 ; AVX2-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[0,1,0,3]
11879 ; AVX2-NEXT: vbroadcastss 888(%rdi), %ymm6
11880 ; AVX2-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5,6],ymm6[7]
11881 ; AVX2-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
11882 ; AVX2-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11883 ; AVX2-NEXT: vmovaps 992(%rdi), %xmm2
11884 ; AVX2-NEXT: vblendps {{.*#+}} xmm3 = xmm2[0,1,2],xmm4[3]
11885 ; AVX2-NEXT: vshufps {{.*#+}} xmm3 = xmm3[0,1,3,2]
11886 ; AVX2-NEXT: vpermilps $225, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Folded Reload
11887 ; AVX2-NEXT: # ymm4 = mem[1,0,2,3,5,4,6,7]
11888 ; AVX2-NEXT: vextractf128 $1, %ymm4, %xmm4
11889 ; AVX2-NEXT: vblendps {{.*#+}} xmm3 = xmm4[0,1],xmm3[2,3]
11890 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
11891 ; AVX2-NEXT: vblendps $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm4 # 32-byte Folded Reload
11892 ; AVX2-NEXT: # ymm4 = ymm4[0],mem[1],ymm4[2,3,4],mem[5],ymm4[6,7]
11893 ; AVX2-NEXT: vshufps {{.*#+}} ymm4 = ymm4[1,0,3,3,5,4,7,7]
11894 ; AVX2-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,1,0,3]
11895 ; AVX2-NEXT: vbroadcastss 1112(%rdi), %ymm6
11896 ; AVX2-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3,4,5,6],ymm6[7]
11897 ; AVX2-NEXT: vblendps {{.*#+}} ymm7 = ymm3[0,1,2,3],ymm4[4,5,6,7]
11898 ; AVX2-NEXT: vmovaps 1216(%rdi), %xmm3
11899 ; AVX2-NEXT: vblendps {{.*#+}} xmm4 = xmm3[0,1,2],xmm14[3]
11900 ; AVX2-NEXT: vshufps {{.*#+}} xmm4 = xmm4[0,1,3,2]
11901 ; AVX2-NEXT: vpermilps $225, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Folded Reload
11902 ; AVX2-NEXT: # ymm6 = mem[1,0,2,3,5,4,6,7]
11903 ; AVX2-NEXT: vextractf128 $1, %ymm6, %xmm6
11904 ; AVX2-NEXT: vblendps {{.*#+}} xmm4 = xmm6[0,1],xmm4[2,3]
11905 ; AVX2-NEXT: vblendps {{.*#+}} ymm6 = ymm12[0],ymm10[1],ymm12[2,3,4],ymm10[5],ymm12[6,7]
11906 ; AVX2-NEXT: vshufps {{.*#+}} ymm6 = ymm6[1,0,3,3,5,4,7,7]
11907 ; AVX2-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[0,1,0,3]
11908 ; AVX2-NEXT: vbroadcastss 1336(%rdi), %ymm10
11909 ; AVX2-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3,4,5,6],ymm10[7]
11910 ; AVX2-NEXT: vblendps {{.*#+}} ymm6 = ymm4[0,1,2,3],ymm6[4,5,6,7]
11911 ; AVX2-NEXT: vmovaps 1440(%rdi), %xmm4
11912 ; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm4[0,1,2],xmm0[3]
11913 ; AVX2-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,1,3,2]
11914 ; AVX2-NEXT: vpermilps $225, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Folded Reload
11915 ; AVX2-NEXT: # ymm10 = mem[1,0,2,3,5,4,6,7]
11916 ; AVX2-NEXT: vextractf128 $1, %ymm10, %xmm10
11917 ; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm10[0,1],xmm0[2,3]
11918 ; AVX2-NEXT: vblendps $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm10 # 32-byte Folded Reload
11919 ; AVX2-NEXT: # ymm10 = ymm11[0],mem[1],ymm11[2,3,4],mem[5],ymm11[6,7]
11920 ; AVX2-NEXT: vshufps {{.*#+}} ymm10 = ymm10[1,0,3,3,5,4,7,7]
11921 ; AVX2-NEXT: vpermpd {{.*#+}} ymm10 = ymm10[0,1,0,3]
11922 ; AVX2-NEXT: vbroadcastss 1560(%rdi), %ymm12
11923 ; AVX2-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0,1,2,3,4,5,6],ymm12[7]
11924 ; AVX2-NEXT: vblendps {{.*#+}} ymm10 = ymm0[0,1,2,3],ymm10[4,5,6,7]
11925 ; AVX2-NEXT: vmovaps 1664(%rdi), %xmm14
11926 ; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm14[0,1,2],xmm5[3]
11927 ; AVX2-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,1,3,2]
11928 ; AVX2-NEXT: vpermilps $225, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload
11929 ; AVX2-NEXT: # ymm5 = mem[1,0,2,3,5,4,6,7]
11930 ; AVX2-NEXT: vextractf128 $1, %ymm5, %xmm5
11931 ; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm5[0,1],xmm0[2,3]
11932 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
11933 ; AVX2-NEXT: vblendps $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm5 # 32-byte Folded Reload
11934 ; AVX2-NEXT: # ymm5 = ymm5[0],mem[1],ymm5[2,3,4],mem[5],ymm5[6,7]
11935 ; AVX2-NEXT: vshufps {{.*#+}} ymm5 = ymm5[1,0,3,3,5,4,7,7]
11936 ; AVX2-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,1,0,3]
11937 ; AVX2-NEXT: vbroadcastss 1784(%rdi), %ymm12
11938 ; AVX2-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3,4,5,6],ymm12[7]
11939 ; AVX2-NEXT: vblendps {{.*#+}} ymm12 = ymm0[0,1,2,3],ymm5[4,5,6,7]
11940 ; AVX2-NEXT: vbroadcastss 136(%rdi), %xmm0
11941 ; AVX2-NEXT: vblendps $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
11942 ; AVX2-NEXT: # xmm0 = xmm0[0],mem[1],xmm0[2,3]
11943 ; AVX2-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
11944 ; AVX2-NEXT: vpermps 192(%rdi), %ymm15, %ymm5
11945 ; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm5[6,7]
11946 ; AVX2-NEXT: vbroadcastss 80(%rdi), %ymm5
11947 ; AVX2-NEXT: vblendps {{.*#+}} xmm5 = xmm5[0,1,2],xmm9[3]
11948 ; AVX2-NEXT: vpermilps $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Folded Reload
11949 ; AVX2-NEXT: # ymm11 = mem[2,3,2,3,6,7,6,7]
11950 ; AVX2-NEXT: vblendps $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm11 # 32-byte Folded Reload
11951 ; AVX2-NEXT: # ymm11 = ymm11[0],mem[1],ymm11[2,3,4],mem[5],ymm11[6,7]
11952 ; AVX2-NEXT: vextractf128 $1, %ymm11, %xmm11
11953 ; AVX2-NEXT: vblendps {{.*#+}} xmm5 = xmm11[0,1],xmm5[2,3]
11954 ; AVX2-NEXT: vblendps {{.*#+}} ymm11 = ymm5[0,1,2,3],ymm0[4,5,6,7]
11955 ; AVX2-NEXT: vbroadcastss 360(%rdi), %xmm0
11956 ; AVX2-NEXT: vblendps $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
11957 ; AVX2-NEXT: # xmm0 = xmm0[0],mem[1],xmm0[2,3]
11958 ; AVX2-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
11959 ; AVX2-NEXT: vpermps 416(%rdi), %ymm15, %ymm5
11960 ; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm5[6,7]
11961 ; AVX2-NEXT: vbroadcastss 304(%rdi), %ymm5
11962 ; AVX2-NEXT: vblendps {{.*#+}} xmm5 = xmm5[0,1,2],xmm13[3]
11963 ; AVX2-NEXT: vpermilps $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Folded Reload
11964 ; AVX2-NEXT: # ymm13 = mem[2,3,2,3,6,7,6,7]
11965 ; AVX2-NEXT: vblendps $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm13 # 32-byte Folded Reload
11966 ; AVX2-NEXT: # ymm13 = ymm13[0],mem[1],ymm13[2,3,4],mem[5],ymm13[6,7]
11967 ; AVX2-NEXT: vextractf128 $1, %ymm13, %xmm13
11968 ; AVX2-NEXT: vblendps {{.*#+}} xmm5 = xmm13[0,1],xmm5[2,3]
11969 ; AVX2-NEXT: vblendps {{.*#+}} ymm13 = ymm5[0,1,2,3],ymm0[4,5,6,7]
11970 ; AVX2-NEXT: vbroadcastss 584(%rdi), %xmm0
11971 ; AVX2-NEXT: vblendps $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
11972 ; AVX2-NEXT: # xmm0 = xmm0[0],mem[1],xmm0[2,3]
11973 ; AVX2-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
11974 ; AVX2-NEXT: vpermps 640(%rdi), %ymm15, %ymm5
11975 ; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm5[6,7]
11976 ; AVX2-NEXT: vbroadcastss 528(%rdi), %ymm5
11977 ; AVX2-NEXT: vblendps {{.*#+}} xmm5 = xmm5[0,1,2],xmm8[3]
11978 ; AVX2-NEXT: vpermilps $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Folded Reload
11979 ; AVX2-NEXT: # ymm8 = mem[2,3,2,3,6,7,6,7]
11980 ; AVX2-NEXT: vblendps $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm8 # 32-byte Folded Reload
11981 ; AVX2-NEXT: # ymm8 = ymm8[0],mem[1],ymm8[2,3,4],mem[5],ymm8[6,7]
11982 ; AVX2-NEXT: vextractf128 $1, %ymm8, %xmm8
11983 ; AVX2-NEXT: vblendps {{.*#+}} xmm5 = xmm8[0,1],xmm5[2,3]
11984 ; AVX2-NEXT: vblendps {{.*#+}} ymm8 = ymm5[0,1,2,3],ymm0[4,5,6,7]
11985 ; AVX2-NEXT: vbroadcastss 808(%rdi), %xmm0
11986 ; AVX2-NEXT: vblendps $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
11987 ; AVX2-NEXT: # xmm0 = xmm0[0],mem[1],xmm0[2,3]
11988 ; AVX2-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
11989 ; AVX2-NEXT: vpermps 864(%rdi), %ymm15, %ymm5
11990 ; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm5[6,7]
11991 ; AVX2-NEXT: vbroadcastss 752(%rdi), %ymm5
11992 ; AVX2-NEXT: vblendps {{.*#+}} xmm1 = xmm5[0,1,2],xmm1[3]
11993 ; AVX2-NEXT: vpermilps $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload
11994 ; AVX2-NEXT: # ymm5 = mem[2,3,2,3,6,7,6,7]
11995 ; AVX2-NEXT: vblendps $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm5 # 32-byte Folded Reload
11996 ; AVX2-NEXT: # ymm5 = ymm5[0],mem[1],ymm5[2,3,4],mem[5],ymm5[6,7]
11997 ; AVX2-NEXT: vextractf128 $1, %ymm5, %xmm5
11998 ; AVX2-NEXT: vblendps {{.*#+}} xmm1 = xmm5[0,1],xmm1[2,3]
11999 ; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
12000 ; AVX2-NEXT: vbroadcastss 1032(%rdi), %xmm1
12001 ; AVX2-NEXT: vblendps $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
12002 ; AVX2-NEXT: # xmm1 = xmm1[0],mem[1],xmm1[2,3]
12003 ; AVX2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
12004 ; AVX2-NEXT: vpermps 1088(%rdi), %ymm15, %ymm5
12005 ; AVX2-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm5[6,7]
12006 ; AVX2-NEXT: vbroadcastss 976(%rdi), %ymm5
12007 ; AVX2-NEXT: vblendps {{.*#+}} xmm2 = xmm5[0,1,2],xmm2[3]
12008 ; AVX2-NEXT: vpermilps $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload
12009 ; AVX2-NEXT: # ymm5 = mem[2,3,2,3,6,7,6,7]
12010 ; AVX2-NEXT: vblendps $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm5 # 32-byte Folded Reload
12011 ; AVX2-NEXT: # ymm5 = ymm5[0],mem[1],ymm5[2,3,4],mem[5],ymm5[6,7]
12012 ; AVX2-NEXT: vextractf128 $1, %ymm5, %xmm5
12013 ; AVX2-NEXT: vblendps {{.*#+}} xmm2 = xmm5[0,1],xmm2[2,3]
12014 ; AVX2-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
12015 ; AVX2-NEXT: vbroadcastss 1256(%rdi), %xmm2
12016 ; AVX2-NEXT: vblendps $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm2 # 16-byte Folded Reload
12017 ; AVX2-NEXT: # xmm2 = xmm2[0],mem[1],xmm2[2,3]
12018 ; AVX2-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2
12019 ; AVX2-NEXT: vpermps 1312(%rdi), %ymm15, %ymm5
12020 ; AVX2-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5],ymm5[6,7]
12021 ; AVX2-NEXT: vbroadcastss 1200(%rdi), %ymm5
12022 ; AVX2-NEXT: vblendps {{.*#+}} xmm3 = xmm5[0,1,2],xmm3[3]
12023 ; AVX2-NEXT: vpermilps $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload
12024 ; AVX2-NEXT: # ymm5 = mem[2,3,2,3,6,7,6,7]
12025 ; AVX2-NEXT: vblendps $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm5 # 32-byte Folded Reload
12026 ; AVX2-NEXT: # ymm5 = ymm5[0],mem[1],ymm5[2,3,4],mem[5],ymm5[6,7]
12027 ; AVX2-NEXT: vextractf128 $1, %ymm5, %xmm5
12028 ; AVX2-NEXT: vblendps {{.*#+}} xmm3 = xmm5[0,1],xmm3[2,3]
12029 ; AVX2-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm2[4,5,6,7]
12030 ; AVX2-NEXT: vbroadcastss 1480(%rdi), %xmm3
12031 ; AVX2-NEXT: vblendps $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm3 # 16-byte Folded Reload
12032 ; AVX2-NEXT: # xmm3 = xmm3[0],mem[1],xmm3[2,3]
12033 ; AVX2-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm3
12034 ; AVX2-NEXT: vpermps 1536(%rdi), %ymm15, %ymm5
12035 ; AVX2-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5],ymm5[6,7]
12036 ; AVX2-NEXT: vbroadcastss 1424(%rdi), %ymm5
12037 ; AVX2-NEXT: vblendps {{.*#+}} xmm4 = xmm5[0,1,2],xmm4[3]
12038 ; AVX2-NEXT: vpermilps $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload
12039 ; AVX2-NEXT: # ymm5 = mem[2,3,2,3,6,7,6,7]
12040 ; AVX2-NEXT: vblendps $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm5 # 32-byte Folded Reload
12041 ; AVX2-NEXT: # ymm5 = ymm5[0],mem[1],ymm5[2,3,4],mem[5],ymm5[6,7]
12042 ; AVX2-NEXT: vextractf128 $1, %ymm5, %xmm5
12043 ; AVX2-NEXT: vblendps {{.*#+}} xmm4 = xmm5[0,1],xmm4[2,3]
12044 ; AVX2-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
12045 ; AVX2-NEXT: vbroadcastss 1704(%rdi), %xmm4
12046 ; AVX2-NEXT: vblendps $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm4 # 16-byte Folded Reload
12047 ; AVX2-NEXT: # xmm4 = xmm4[0],mem[1],xmm4[2,3]
12048 ; AVX2-NEXT: vpermps 1760(%rdi), %ymm15, %ymm5
12049 ; AVX2-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm4
12050 ; AVX2-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3,4,5],ymm5[6,7]
12051 ; AVX2-NEXT: vbroadcastss 1648(%rdi), %ymm5
12052 ; AVX2-NEXT: vblendps {{.*#+}} xmm5 = xmm5[0,1,2],xmm14[3]
12053 ; AVX2-NEXT: vpermilps $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Folded Reload
12054 ; AVX2-NEXT: # ymm14 = mem[2,3,2,3,6,7,6,7]
12055 ; AVX2-NEXT: vblendps $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
12056 ; AVX2-NEXT: # ymm14 = ymm14[0],mem[1],ymm14[2,3,4],mem[5],ymm14[6,7]
12057 ; AVX2-NEXT: vextractf128 $1, %ymm14, %xmm14
12058 ; AVX2-NEXT: vblendps {{.*#+}} xmm5 = xmm14[0,1],xmm5[2,3]
12059 ; AVX2-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0,1,2,3],ymm4[4,5,6,7]
12060 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
12061 ; AVX2-NEXT: vmovaps %ymm5, 192(%rsi)
12062 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
12063 ; AVX2-NEXT: vmovaps %ymm5, 128(%rsi)
12064 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
12065 ; AVX2-NEXT: vmovaps %ymm5, 64(%rsi)
12066 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
12067 ; AVX2-NEXT: vmovaps %ymm5, (%rsi)
12068 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
12069 ; AVX2-NEXT: vmovaps %ymm5, 224(%rsi)
12070 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
12071 ; AVX2-NEXT: vmovaps %ymm5, 160(%rsi)
12072 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
12073 ; AVX2-NEXT: vmovaps %ymm5, 96(%rsi)
12074 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
12075 ; AVX2-NEXT: vmovaps %ymm5, 32(%rsi)
12076 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
12077 ; AVX2-NEXT: vmovaps %ymm5, 192(%rdx)
12078 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
12079 ; AVX2-NEXT: vmovaps %ymm5, 128(%rdx)
12080 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
12081 ; AVX2-NEXT: vmovaps %ymm5, 64(%rdx)
12082 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
12083 ; AVX2-NEXT: vmovaps %ymm5, (%rdx)
12084 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
12085 ; AVX2-NEXT: vmovaps %ymm5, 224(%rdx)
12086 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
12087 ; AVX2-NEXT: vmovaps %ymm5, 160(%rdx)
12088 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
12089 ; AVX2-NEXT: vmovaps %ymm5, 96(%rdx)
12090 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
12091 ; AVX2-NEXT: vmovaps %ymm5, 32(%rdx)
12092 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
12093 ; AVX2-NEXT: vmovaps %ymm5, 192(%rcx)
12094 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
12095 ; AVX2-NEXT: vmovaps %ymm5, 128(%rcx)
12096 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
12097 ; AVX2-NEXT: vmovaps %ymm5, 64(%rcx)
12098 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
12099 ; AVX2-NEXT: vmovaps %ymm5, (%rcx)
12100 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
12101 ; AVX2-NEXT: vmovaps %ymm5, 224(%rcx)
12102 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
12103 ; AVX2-NEXT: vmovaps %ymm5, 160(%rcx)
12104 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
12105 ; AVX2-NEXT: vmovaps %ymm5, 96(%rcx)
12106 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
12107 ; AVX2-NEXT: vmovaps %ymm5, 32(%rcx)
12108 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
12109 ; AVX2-NEXT: vmovaps %ymm5, (%r8)
12110 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
12111 ; AVX2-NEXT: vmovaps %ymm5, 64(%r8)
12112 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
12113 ; AVX2-NEXT: vmovaps %ymm5, 128(%r8)
12114 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
12115 ; AVX2-NEXT: vmovaps %ymm5, 192(%r8)
12116 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
12117 ; AVX2-NEXT: vmovaps %ymm5, 224(%r8)
12118 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
12119 ; AVX2-NEXT: vmovaps %ymm5, 160(%r8)
12120 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
12121 ; AVX2-NEXT: vmovaps %ymm5, 96(%r8)
12122 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
12123 ; AVX2-NEXT: vmovaps %ymm5, 32(%r8)
12124 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
12125 ; AVX2-NEXT: vmovaps %ymm5, 224(%r9)
12126 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
12127 ; AVX2-NEXT: vmovaps %ymm5, 192(%r9)
12128 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
12129 ; AVX2-NEXT: vmovaps %ymm5, 160(%r9)
12130 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
12131 ; AVX2-NEXT: vmovaps %ymm5, 128(%r9)
12132 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
12133 ; AVX2-NEXT: vmovaps %ymm5, 96(%r9)
12134 ; AVX2-NEXT: vmovups (%rsp), %ymm5 # 32-byte Reload
12135 ; AVX2-NEXT: vmovaps %ymm5, 64(%r9)
12136 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
12137 ; AVX2-NEXT: vmovaps %ymm5, 32(%r9)
12138 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
12139 ; AVX2-NEXT: vmovaps %ymm5, (%r9)
12140 ; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax
12141 ; AVX2-NEXT: vmovaps %ymm12, 224(%rax)
12142 ; AVX2-NEXT: vmovaps %ymm10, 192(%rax)
12143 ; AVX2-NEXT: vmovaps %ymm6, 160(%rax)
12144 ; AVX2-NEXT: vmovaps %ymm7, 128(%rax)
12145 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
12146 ; AVX2-NEXT: vmovaps %ymm5, 96(%rax)
12147 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
12148 ; AVX2-NEXT: vmovaps %ymm5, 64(%rax)
12149 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
12150 ; AVX2-NEXT: vmovaps %ymm5, 32(%rax)
12151 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
12152 ; AVX2-NEXT: vmovaps %ymm5, (%rax)
12153 ; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax
12154 ; AVX2-NEXT: vmovaps %ymm4, 224(%rax)
12155 ; AVX2-NEXT: vmovaps %ymm3, 192(%rax)
12156 ; AVX2-NEXT: vmovaps %ymm2, 160(%rax)
12157 ; AVX2-NEXT: vmovaps %ymm1, 128(%rax)
12158 ; AVX2-NEXT: vmovaps %ymm0, 96(%rax)
12159 ; AVX2-NEXT: vmovaps %ymm8, 64(%rax)
12160 ; AVX2-NEXT: vmovaps %ymm13, 32(%rax)
12161 ; AVX2-NEXT: vmovaps %ymm11, (%rax)
12162 ; AVX2-NEXT: addq $2648, %rsp # imm = 0xA58
12163 ; AVX2-NEXT: vzeroupper
12166 ; AVX2-FP-LABEL: load_i32_stride7_vf64:
12167 ; AVX2-FP: # %bb.0:
12168 ; AVX2-FP-NEXT: subq $2648, %rsp # imm = 0xA58
12169 ; AVX2-FP-NEXT: vmovdqa 1216(%rdi), %ymm9
12170 ; AVX2-FP-NEXT: vmovdqu %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
12171 ; AVX2-FP-NEXT: vmovdqa 1152(%rdi), %ymm4
12172 ; AVX2-FP-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
12173 ; AVX2-FP-NEXT: vmovdqa 1120(%rdi), %ymm5
12174 ; AVX2-FP-NEXT: vmovdqa 768(%rdi), %ymm12
12175 ; AVX2-FP-NEXT: vmovdqu %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
12176 ; AVX2-FP-NEXT: vmovdqa 704(%rdi), %ymm6
12177 ; AVX2-FP-NEXT: vmovdqa 672(%rdi), %ymm7
12178 ; AVX2-FP-NEXT: vmovdqa 320(%rdi), %ymm8
12179 ; AVX2-FP-NEXT: vmovdqu %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
12180 ; AVX2-FP-NEXT: vmovdqa 256(%rdi), %ymm10
12181 ; AVX2-FP-NEXT: vmovdqa 224(%rdi), %ymm11
12182 ; AVX2-FP-NEXT: vpmovsxbd {{.*#+}} xmm0 = [0,7,6,0]
12183 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm11[0,1,2,3,4,5],ymm10[6],ymm11[7]
12184 ; AVX2-FP-NEXT: vmovdqu %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
12185 ; AVX2-FP-NEXT: vmovdqu %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
12186 ; AVX2-FP-NEXT: vpermd %ymm1, %ymm0, %ymm1
12187 ; AVX2-FP-NEXT: vpbroadcastq 304(%rdi), %ymm2
12188 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm8[4,5,6,7]
12189 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4,5,6,7]
12190 ; AVX2-FP-NEXT: vmovdqa 352(%rdi), %xmm2
12191 ; AVX2-FP-NEXT: vmovdqa 384(%rdi), %xmm3
12192 ; AVX2-FP-NEXT: vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
12193 ; AVX2-FP-NEXT: vpunpckhqdq {{.*#+}} xmm2 = xmm2[1],xmm3[1]
12194 ; AVX2-FP-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
12195 ; AVX2-FP-NEXT: vpbroadcastd 420(%rdi), %ymm3
12196 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,6],ymm3[7]
12197 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5,6,7]
12198 ; AVX2-FP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
12199 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm7[0,1,2,3,4,5],ymm6[6],ymm7[7]
12200 ; AVX2-FP-NEXT: vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
12201 ; AVX2-FP-NEXT: vmovdqa %ymm6, %ymm8
12202 ; AVX2-FP-NEXT: vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
12203 ; AVX2-FP-NEXT: vpermd %ymm1, %ymm0, %ymm1
12204 ; AVX2-FP-NEXT: vpbroadcastq 752(%rdi), %ymm2
12205 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm12[4,5,6,7]
12206 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4,5,6,7]
12207 ; AVX2-FP-NEXT: vmovdqa 800(%rdi), %xmm2
12208 ; AVX2-FP-NEXT: vmovdqa 832(%rdi), %xmm3
12209 ; AVX2-FP-NEXT: vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
12210 ; AVX2-FP-NEXT: vpunpckhqdq {{.*#+}} xmm2 = xmm2[1],xmm3[1]
12211 ; AVX2-FP-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
12212 ; AVX2-FP-NEXT: vpbroadcastd 868(%rdi), %ymm3
12213 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,6],ymm3[7]
12214 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5,6,7]
12215 ; AVX2-FP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
12216 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm5[0,1,2,3,4,5],ymm4[6],ymm5[7]
12217 ; AVX2-FP-NEXT: vmovdqa %ymm5, %ymm6
12218 ; AVX2-FP-NEXT: vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
12219 ; AVX2-FP-NEXT: vpermd %ymm1, %ymm0, %ymm1
12220 ; AVX2-FP-NEXT: vpbroadcastq 1200(%rdi), %ymm2
12221 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm9[4,5,6,7]
12222 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4,5,6,7]
12223 ; AVX2-FP-NEXT: vmovdqa 1248(%rdi), %xmm2
12224 ; AVX2-FP-NEXT: vmovdqa 1280(%rdi), %xmm3
12225 ; AVX2-FP-NEXT: vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
12226 ; AVX2-FP-NEXT: vpunpckhqdq {{.*#+}} xmm2 = xmm2[1],xmm3[1]
12227 ; AVX2-FP-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
12228 ; AVX2-FP-NEXT: vpbroadcastd 1316(%rdi), %ymm3
12229 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,6],ymm3[7]
12230 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5,6,7]
12231 ; AVX2-FP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
12232 ; AVX2-FP-NEXT: vmovdqa 1600(%rdi), %ymm13
12233 ; AVX2-FP-NEXT: vmovdqa 1568(%rdi), %ymm5
12234 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm5[0,1,2,3,4,5],ymm13[6],ymm5[7]
12235 ; AVX2-FP-NEXT: vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
12236 ; AVX2-FP-NEXT: vmovdqu %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
12237 ; AVX2-FP-NEXT: vpermd %ymm1, %ymm0, %ymm1
12238 ; AVX2-FP-NEXT: vmovdqa 1664(%rdi), %ymm3
12239 ; AVX2-FP-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
12240 ; AVX2-FP-NEXT: vpbroadcastq 1648(%rdi), %ymm2
12241 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
12242 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4,5,6,7]
12243 ; AVX2-FP-NEXT: vmovdqa 1696(%rdi), %xmm2
12244 ; AVX2-FP-NEXT: vmovdqa 1728(%rdi), %xmm3
12245 ; AVX2-FP-NEXT: vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
12246 ; AVX2-FP-NEXT: vpunpckhqdq {{.*#+}} xmm2 = xmm2[1],xmm3[1]
12247 ; AVX2-FP-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
12248 ; AVX2-FP-NEXT: vpbroadcastd 1764(%rdi), %ymm3
12249 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,6],ymm3[7]
12250 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5,6,7]
12251 ; AVX2-FP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
12252 ; AVX2-FP-NEXT: vmovdqa 96(%rdi), %ymm2
12253 ; AVX2-FP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
12254 ; AVX2-FP-NEXT: vpbroadcastq 80(%rdi), %ymm1
12255 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
12256 ; AVX2-FP-NEXT: vmovdqa (%rdi), %ymm2
12257 ; AVX2-FP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
12258 ; AVX2-FP-NEXT: vmovdqa 32(%rdi), %ymm3
12259 ; AVX2-FP-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
12260 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5],ymm3[6],ymm2[7]
12261 ; AVX2-FP-NEXT: vpermd %ymm2, %ymm0, %ymm2
12262 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3,4,5,6,7]
12263 ; AVX2-FP-NEXT: vmovdqa 128(%rdi), %xmm2
12264 ; AVX2-FP-NEXT: vmovdqa 160(%rdi), %xmm3
12265 ; AVX2-FP-NEXT: vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
12266 ; AVX2-FP-NEXT: vpunpckhqdq {{.*#+}} xmm2 = xmm2[1],xmm3[1]
12267 ; AVX2-FP-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
12268 ; AVX2-FP-NEXT: vpbroadcastd 196(%rdi), %ymm3
12269 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,6],ymm3[7]
12270 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5,6,7]
12271 ; AVX2-FP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
12272 ; AVX2-FP-NEXT: vmovdqa 480(%rdi), %ymm2
12273 ; AVX2-FP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
12274 ; AVX2-FP-NEXT: vmovdqa 448(%rdi), %ymm1
12275 ; AVX2-FP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
12276 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm2[6],ymm1[7]
12277 ; AVX2-FP-NEXT: vpermd %ymm1, %ymm0, %ymm1
12278 ; AVX2-FP-NEXT: vmovdqa 544(%rdi), %ymm3
12279 ; AVX2-FP-NEXT: vmovdqu %ymm3, (%rsp) # 32-byte Spill
12280 ; AVX2-FP-NEXT: vpbroadcastq 528(%rdi), %ymm2
12281 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
12282 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4,5,6,7]
12283 ; AVX2-FP-NEXT: vmovdqa 576(%rdi), %xmm2
12284 ; AVX2-FP-NEXT: vmovdqa 608(%rdi), %xmm3
12285 ; AVX2-FP-NEXT: vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
12286 ; AVX2-FP-NEXT: vpunpckhqdq {{.*#+}} xmm2 = xmm2[1],xmm3[1]
12287 ; AVX2-FP-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
12288 ; AVX2-FP-NEXT: vpbroadcastd 644(%rdi), %ymm3
12289 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,6],ymm3[7]
12290 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5,6,7]
12291 ; AVX2-FP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
12292 ; AVX2-FP-NEXT: vmovdqa 928(%rdi), %ymm2
12293 ; AVX2-FP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
12294 ; AVX2-FP-NEXT: vmovdqa 896(%rdi), %ymm1
12295 ; AVX2-FP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
12296 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm2[6],ymm1[7]
12297 ; AVX2-FP-NEXT: vpermd %ymm1, %ymm0, %ymm1
12298 ; AVX2-FP-NEXT: vmovdqa 992(%rdi), %ymm3
12299 ; AVX2-FP-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
12300 ; AVX2-FP-NEXT: vpbroadcastq 976(%rdi), %ymm2
12301 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
12302 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4,5,6,7]
12303 ; AVX2-FP-NEXT: vmovdqa 1024(%rdi), %xmm2
12304 ; AVX2-FP-NEXT: vmovdqa 1056(%rdi), %xmm3
12305 ; AVX2-FP-NEXT: vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
12306 ; AVX2-FP-NEXT: vpunpckhqdq {{.*#+}} xmm2 = xmm2[1],xmm3[1]
12307 ; AVX2-FP-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
12308 ; AVX2-FP-NEXT: vpbroadcastd 1092(%rdi), %ymm3
12309 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,6],ymm3[7]
12310 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5,6,7]
12311 ; AVX2-FP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
12312 ; AVX2-FP-NEXT: vmovdqa 1376(%rdi), %ymm14
12313 ; AVX2-FP-NEXT: vmovdqa 1344(%rdi), %ymm15
12314 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm15[0,1,2,3,4,5],ymm14[6],ymm15[7]
12315 ; AVX2-FP-NEXT: vmovdqu %ymm15, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
12316 ; AVX2-FP-NEXT: vmovdqu %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
12317 ; AVX2-FP-NEXT: vpermd %ymm1, %ymm0, %ymm0
12318 ; AVX2-FP-NEXT: vmovdqa 1440(%rdi), %ymm4
12319 ; AVX2-FP-NEXT: vpbroadcastq 1424(%rdi), %ymm1
12320 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm4[4,5,6,7]
12321 ; AVX2-FP-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
12322 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
12323 ; AVX2-FP-NEXT: vmovdqa 1472(%rdi), %xmm1
12324 ; AVX2-FP-NEXT: vmovdqa 1504(%rdi), %xmm2
12325 ; AVX2-FP-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
12326 ; AVX2-FP-NEXT: vpunpckhqdq {{.*#+}} xmm1 = xmm1[1],xmm2[1]
12327 ; AVX2-FP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
12328 ; AVX2-FP-NEXT: vpbroadcastd 1540(%rdi), %ymm2
12329 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm2[7]
12330 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
12331 ; AVX2-FP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
12332 ; AVX2-FP-NEXT: vpshufd {{.*#+}} xmm0 = mem[2,2,2,2]
12333 ; AVX2-FP-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm3
12334 ; AVX2-FP-NEXT: vmovdqa 384(%rdi), %ymm1
12335 ; AVX2-FP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
12336 ; AVX2-FP-NEXT: vmovdqa 352(%rdi), %ymm0
12337 ; AVX2-FP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
12338 ; AVX2-FP-NEXT: vpalignr {{.*#+}} ymm1 = ymm1[12,13,14,15],ymm0[0,1,2,3,4,5,6,7,8,9,10,11],ymm1[28,29,30,31],ymm0[16,17,18,19,20,21,22,23,24,25,26,27]
12339 ; AVX2-FP-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,1,2,0]
12340 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm3[7]
12341 ; AVX2-FP-NEXT: vmovdqa 288(%rdi), %ymm12
12342 ; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
12343 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm9[0,1],ymm12[2,3],ymm9[4,5],ymm12[6,7]
12344 ; AVX2-FP-NEXT: vmovdqu %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
12345 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm2 = ymm10[0],ymm11[1],ymm10[2,3,4],ymm11[5],ymm10[6,7]
12346 ; AVX2-FP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
12347 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm0[5,6],ymm2[7]
12348 ; AVX2-FP-NEXT: vpmovsxbd {{.*#+}} ymm0 = [1,0,7,6,5,6,5,6]
12349 ; AVX2-FP-NEXT: vpermd %ymm2, %ymm0, %ymm2
12350 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3,4],ymm1[5,6,7]
12351 ; AVX2-FP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
12352 ; AVX2-FP-NEXT: vpshufd {{.*#+}} xmm1 = mem[2,2,2,2]
12353 ; AVX2-FP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
12354 ; AVX2-FP-NEXT: vmovdqa 832(%rdi), %ymm3
12355 ; AVX2-FP-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
12356 ; AVX2-FP-NEXT: vmovdqa 800(%rdi), %ymm2
12357 ; AVX2-FP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
12358 ; AVX2-FP-NEXT: vpalignr {{.*#+}} ymm2 = ymm3[12,13,14,15],ymm2[0,1,2,3,4,5,6,7,8,9,10,11],ymm3[28,29,30,31],ymm2[16,17,18,19,20,21,22,23,24,25,26,27]
12359 ; AVX2-FP-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,1,2,0]
12360 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3,4,5,6],ymm1[7]
12361 ; AVX2-FP-NEXT: vmovdqa 736(%rdi), %ymm2
12362 ; AVX2-FP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
12363 ; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
12364 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm2 = ymm11[0,1],ymm2[2,3],ymm11[4,5],ymm2[6,7]
12365 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm8 = ymm8[0],ymm7[1],ymm8[2,3,4],ymm7[5],ymm8[6,7]
12366 ; AVX2-FP-NEXT: vmovdqu %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
12367 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm2 = ymm8[0,1,2,3,4],ymm2[5,6],ymm8[7]
12368 ; AVX2-FP-NEXT: vpermd %ymm2, %ymm0, %ymm2
12369 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3,4],ymm1[5,6,7]
12370 ; AVX2-FP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
12371 ; AVX2-FP-NEXT: vpshufd {{.*#+}} xmm1 = mem[2,2,2,2]
12372 ; AVX2-FP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
12373 ; AVX2-FP-NEXT: vmovdqa 1280(%rdi), %ymm3
12374 ; AVX2-FP-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
12375 ; AVX2-FP-NEXT: vmovdqa 1248(%rdi), %ymm2
12376 ; AVX2-FP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
12377 ; AVX2-FP-NEXT: vpalignr {{.*#+}} ymm2 = ymm3[12,13,14,15],ymm2[0,1,2,3,4,5,6,7,8,9,10,11],ymm3[28,29,30,31],ymm2[16,17,18,19,20,21,22,23,24,25,26,27]
12378 ; AVX2-FP-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,1,2,0]
12379 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3,4,5,6],ymm1[7]
12380 ; AVX2-FP-NEXT: vmovdqa 1184(%rdi), %ymm2
12381 ; AVX2-FP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
12382 ; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
12383 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm2 = ymm10[0,1],ymm2[2,3],ymm10[4,5],ymm2[6,7]
12384 ; AVX2-FP-NEXT: vpblendd $221, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm7 # 32-byte Folded Reload
12385 ; AVX2-FP-NEXT: # ymm7 = mem[0],ymm6[1],mem[2,3,4],ymm6[5],mem[6,7]
12386 ; AVX2-FP-NEXT: vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
12387 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm2 = ymm7[0,1,2,3,4],ymm2[5,6],ymm7[7]
12388 ; AVX2-FP-NEXT: vpermd %ymm2, %ymm0, %ymm2
12389 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3,4],ymm1[5,6,7]
12390 ; AVX2-FP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
12391 ; AVX2-FP-NEXT: vpshufd {{.*#+}} xmm1 = mem[2,2,2,2]
12392 ; AVX2-FP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
12393 ; AVX2-FP-NEXT: vmovdqa 1728(%rdi), %ymm3
12394 ; AVX2-FP-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
12395 ; AVX2-FP-NEXT: vmovdqa 1696(%rdi), %ymm2
12396 ; AVX2-FP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
12397 ; AVX2-FP-NEXT: vpalignr {{.*#+}} ymm2 = ymm3[12,13,14,15],ymm2[0,1,2,3,4,5,6,7,8,9,10,11],ymm3[28,29,30,31],ymm2[16,17,18,19,20,21,22,23,24,25,26,27]
12398 ; AVX2-FP-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,1,2,0]
12399 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3,4,5,6],ymm1[7]
12400 ; AVX2-FP-NEXT: vmovdqa 1632(%rdi), %ymm2
12401 ; AVX2-FP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
12402 ; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
12403 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm2 = ymm6[0,1],ymm2[2,3],ymm6[4,5],ymm2[6,7]
12404 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm5 = ymm13[0],ymm5[1],ymm13[2,3,4],ymm5[5],ymm13[6,7]
12405 ; AVX2-FP-NEXT: vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
12406 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm2 = ymm5[0,1,2,3,4],ymm2[5,6],ymm5[7]
12407 ; AVX2-FP-NEXT: vpermd %ymm2, %ymm0, %ymm2
12408 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3,4],ymm1[5,6,7]
12409 ; AVX2-FP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
12410 ; AVX2-FP-NEXT: vpshufd {{.*#+}} xmm1 = mem[2,2,2,2]
12411 ; AVX2-FP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
12412 ; AVX2-FP-NEXT: vmovdqa 608(%rdi), %ymm3
12413 ; AVX2-FP-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
12414 ; AVX2-FP-NEXT: vmovdqa 576(%rdi), %ymm2
12415 ; AVX2-FP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
12416 ; AVX2-FP-NEXT: vpalignr {{.*#+}} ymm2 = ymm3[12,13,14,15],ymm2[0,1,2,3,4,5,6,7,8,9,10,11],ymm3[28,29,30,31],ymm2[16,17,18,19,20,21,22,23,24,25,26,27]
12417 ; AVX2-FP-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,1,2,0]
12418 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3,4,5,6],ymm1[7]
12419 ; AVX2-FP-NEXT: vmovdqa 512(%rdi), %ymm2
12420 ; AVX2-FP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
12421 ; AVX2-FP-NEXT: vmovdqu (%rsp), %ymm8 # 32-byte Reload
12422 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm2 = ymm8[0,1],ymm2[2,3],ymm8[4,5],ymm2[6,7]
12423 ; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
12424 ; AVX2-FP-NEXT: vpblendd $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
12425 ; AVX2-FP-NEXT: # ymm3 = ymm3[0],mem[1],ymm3[2,3,4],mem[5],ymm3[6,7]
12426 ; AVX2-FP-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
12427 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3,4],ymm2[5,6],ymm3[7]
12428 ; AVX2-FP-NEXT: vpermd %ymm2, %ymm0, %ymm2
12429 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3,4],ymm1[5,6,7]
12430 ; AVX2-FP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
12431 ; AVX2-FP-NEXT: vpshufd {{.*#+}} xmm1 = mem[2,2,2,2]
12432 ; AVX2-FP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
12433 ; AVX2-FP-NEXT: vmovdqa 1056(%rdi), %ymm3
12434 ; AVX2-FP-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
12435 ; AVX2-FP-NEXT: vmovdqa 1024(%rdi), %ymm2
12436 ; AVX2-FP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
12437 ; AVX2-FP-NEXT: vpalignr {{.*#+}} ymm2 = ymm3[12,13,14,15],ymm2[0,1,2,3,4,5,6,7,8,9,10,11],ymm3[28,29,30,31],ymm2[16,17,18,19,20,21,22,23,24,25,26,27]
12438 ; AVX2-FP-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,1,2,0]
12439 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3,4,5,6],ymm1[7]
12440 ; AVX2-FP-NEXT: vmovdqa 960(%rdi), %ymm2
12441 ; AVX2-FP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
12442 ; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
12443 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm2 = ymm13[0,1],ymm2[2,3],ymm13[4,5],ymm2[6,7]
12444 ; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
12445 ; AVX2-FP-NEXT: vpblendd $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
12446 ; AVX2-FP-NEXT: # ymm3 = ymm3[0],mem[1],ymm3[2,3,4],mem[5],ymm3[6,7]
12447 ; AVX2-FP-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
12448 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3,4],ymm2[5,6],ymm3[7]
12449 ; AVX2-FP-NEXT: vpermd %ymm2, %ymm0, %ymm2
12450 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3,4],ymm1[5,6,7]
12451 ; AVX2-FP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
12452 ; AVX2-FP-NEXT: vpshufd {{.*#+}} xmm1 = mem[2,2,2,2]
12453 ; AVX2-FP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
12454 ; AVX2-FP-NEXT: vmovdqa 1504(%rdi), %ymm3
12455 ; AVX2-FP-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
12456 ; AVX2-FP-NEXT: vmovdqa 1472(%rdi), %ymm2
12457 ; AVX2-FP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
12458 ; AVX2-FP-NEXT: vpalignr {{.*#+}} ymm2 = ymm3[12,13,14,15],ymm2[0,1,2,3,4,5,6,7,8,9,10,11],ymm3[28,29,30,31],ymm2[16,17,18,19,20,21,22,23,24,25,26,27]
12459 ; AVX2-FP-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,1,2,0]
12460 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3,4,5,6],ymm1[7]
12461 ; AVX2-FP-NEXT: vmovdqa 1408(%rdi), %ymm2
12462 ; AVX2-FP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
12463 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm2 = ymm4[0,1],ymm2[2,3],ymm4[4,5],ymm2[6,7]
12464 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm3 = ymm14[0],ymm15[1],ymm14[2,3,4],ymm15[5],ymm14[6,7]
12465 ; AVX2-FP-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
12466 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3,4],ymm2[5,6],ymm3[7]
12467 ; AVX2-FP-NEXT: vpermd %ymm2, %ymm0, %ymm2
12468 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3,4],ymm1[5,6,7]
12469 ; AVX2-FP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
12470 ; AVX2-FP-NEXT: vpshufd {{.*#+}} xmm1 = mem[2,2,2,2]
12471 ; AVX2-FP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
12472 ; AVX2-FP-NEXT: vmovdqa 160(%rdi), %ymm15
12473 ; AVX2-FP-NEXT: vmovdqa 128(%rdi), %ymm14
12474 ; AVX2-FP-NEXT: vpalignr {{.*#+}} ymm2 = ymm15[12,13,14,15],ymm14[0,1,2,3,4,5,6,7,8,9,10,11],ymm15[28,29,30,31],ymm14[16,17,18,19,20,21,22,23,24,25,26,27]
12475 ; AVX2-FP-NEXT: vmovdqu %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
12476 ; AVX2-FP-NEXT: vmovdqu %ymm15, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
12477 ; AVX2-FP-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,1,2,0]
12478 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3,4,5,6],ymm1[7]
12479 ; AVX2-FP-NEXT: vmovdqa 64(%rdi), %ymm4
12480 ; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
12481 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm2 = ymm7[0,1],ymm4[2,3],ymm7[4,5],ymm4[6,7]
12482 ; AVX2-FP-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
12483 ; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
12484 ; AVX2-FP-NEXT: vpblendd $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
12485 ; AVX2-FP-NEXT: # ymm3 = ymm3[0],mem[1],ymm3[2,3,4],mem[5],ymm3[6,7]
12486 ; AVX2-FP-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
12487 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3,4],ymm2[5,6],ymm3[7]
12488 ; AVX2-FP-NEXT: vpermd %ymm2, %ymm0, %ymm0
12489 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
12490 ; AVX2-FP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
12491 ; AVX2-FP-NEXT: vmovdqa 304(%rdi), %xmm0
12492 ; AVX2-FP-NEXT: vpalignr {{.*#+}} ymm1 = ymm9[8,9,10,11,12,13,14,15],ymm12[0,1,2,3,4,5,6,7],ymm9[24,25,26,27,28,29,30,31],ymm12[16,17,18,19,20,21,22,23]
12493 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5,6,7]
12494 ; AVX2-FP-NEXT: vpbroadcastd 232(%rdi), %xmm1
12495 ; AVX2-FP-NEXT: vmovdqa 256(%rdi), %xmm5
12496 ; AVX2-FP-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm5[1],xmm1[2,3]
12497 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5,6,7]
12498 ; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
12499 ; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
12500 ; AVX2-FP-NEXT: vpunpcklqdq {{.*#+}} ymm1 = ymm1[0],ymm12[0],ymm1[2],ymm12[2]
12501 ; AVX2-FP-NEXT: vpbroadcastd 428(%rdi), %ymm2
12502 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm2[7]
12503 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
12504 ; AVX2-FP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
12505 ; AVX2-FP-NEXT: vmovdqa 752(%rdi), %xmm0
12506 ; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
12507 ; AVX2-FP-NEXT: vpalignr {{.*#+}} ymm1 = ymm11[8,9,10,11,12,13,14,15],ymm1[0,1,2,3,4,5,6,7],ymm11[24,25,26,27,28,29,30,31],ymm1[16,17,18,19,20,21,22,23]
12508 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5,6,7]
12509 ; AVX2-FP-NEXT: vpbroadcastd 680(%rdi), %xmm1
12510 ; AVX2-FP-NEXT: vmovdqa 704(%rdi), %xmm2
12511 ; AVX2-FP-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
12512 ; AVX2-FP-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2,3]
12513 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5,6,7]
12514 ; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
12515 ; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
12516 ; AVX2-FP-NEXT: vpunpcklqdq {{.*#+}} ymm1 = ymm1[0],ymm3[0],ymm1[2],ymm3[2]
12517 ; AVX2-FP-NEXT: vpbroadcastd 876(%rdi), %ymm2
12518 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm2[7]
12519 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
12520 ; AVX2-FP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
12521 ; AVX2-FP-NEXT: vmovdqa 1200(%rdi), %xmm0
12522 ; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
12523 ; AVX2-FP-NEXT: vpalignr {{.*#+}} ymm1 = ymm10[8,9,10,11,12,13,14,15],ymm1[0,1,2,3,4,5,6,7],ymm10[24,25,26,27,28,29,30,31],ymm1[16,17,18,19,20,21,22,23]
12524 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5,6,7]
12525 ; AVX2-FP-NEXT: vpbroadcastd 1128(%rdi), %xmm1
12526 ; AVX2-FP-NEXT: vmovdqa 1152(%rdi), %xmm2
12527 ; AVX2-FP-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
12528 ; AVX2-FP-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2,3]
12529 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5,6,7]
12530 ; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
12531 ; AVX2-FP-NEXT: vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
12532 ; AVX2-FP-NEXT: # ymm1 = ymm1[0],mem[0],ymm1[2],mem[2]
12533 ; AVX2-FP-NEXT: vpbroadcastd 1324(%rdi), %ymm2
12534 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm2[7]
12535 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
12536 ; AVX2-FP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
12537 ; AVX2-FP-NEXT: vmovdqa 1648(%rdi), %xmm0
12538 ; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
12539 ; AVX2-FP-NEXT: vpalignr {{.*#+}} ymm1 = ymm6[8,9,10,11,12,13,14,15],ymm1[0,1,2,3,4,5,6,7],ymm6[24,25,26,27,28,29,30,31],ymm1[16,17,18,19,20,21,22,23]
12540 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5,6,7]
12541 ; AVX2-FP-NEXT: vpbroadcastd 1576(%rdi), %xmm1
12542 ; AVX2-FP-NEXT: vmovdqa 1600(%rdi), %xmm2
12543 ; AVX2-FP-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2,3]
12544 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5,6,7]
12545 ; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
12546 ; AVX2-FP-NEXT: vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
12547 ; AVX2-FP-NEXT: # ymm1 = ymm1[0],mem[0],ymm1[2],mem[2]
12548 ; AVX2-FP-NEXT: vpbroadcastd 1772(%rdi), %ymm6
12549 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm6[7]
12550 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
12551 ; AVX2-FP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
12552 ; AVX2-FP-NEXT: vmovdqa 80(%rdi), %xmm0
12553 ; AVX2-FP-NEXT: vpalignr {{.*#+}} ymm1 = ymm7[8,9,10,11,12,13,14,15],ymm4[0,1,2,3,4,5,6,7],ymm7[24,25,26,27,28,29,30,31],ymm4[16,17,18,19,20,21,22,23]
12554 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5,6,7]
12555 ; AVX2-FP-NEXT: vpbroadcastd 8(%rdi), %xmm1
12556 ; AVX2-FP-NEXT: vmovdqa 32(%rdi), %xmm4
12557 ; AVX2-FP-NEXT: vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
12558 ; AVX2-FP-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm4[1],xmm1[2,3]
12559 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5,6,7]
12560 ; AVX2-FP-NEXT: vpunpcklqdq {{.*#+}} ymm1 = ymm14[0],ymm15[0],ymm14[2],ymm15[2]
12561 ; AVX2-FP-NEXT: vpbroadcastd 204(%rdi), %ymm6
12562 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm6[7]
12563 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
12564 ; AVX2-FP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
12565 ; AVX2-FP-NEXT: vmovdqa 528(%rdi), %xmm0
12566 ; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
12567 ; AVX2-FP-NEXT: vpalignr {{.*#+}} ymm1 = ymm8[8,9,10,11,12,13,14,15],ymm1[0,1,2,3,4,5,6,7],ymm8[24,25,26,27,28,29,30,31],ymm1[16,17,18,19,20,21,22,23]
12568 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5,6,7]
12569 ; AVX2-FP-NEXT: vpbroadcastd 456(%rdi), %xmm1
12570 ; AVX2-FP-NEXT: vmovdqa 480(%rdi), %xmm4
12571 ; AVX2-FP-NEXT: vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
12572 ; AVX2-FP-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm4[1],xmm1[2,3]
12573 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5,6,7]
12574 ; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
12575 ; AVX2-FP-NEXT: vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
12576 ; AVX2-FP-NEXT: # ymm1 = ymm1[0],mem[0],ymm1[2],mem[2]
12577 ; AVX2-FP-NEXT: vpbroadcastd 652(%rdi), %ymm15
12578 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm15[7]
12579 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
12580 ; AVX2-FP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
12581 ; AVX2-FP-NEXT: vmovdqa 976(%rdi), %xmm0
12582 ; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
12583 ; AVX2-FP-NEXT: vpalignr {{.*#+}} ymm1 = ymm13[8,9,10,11,12,13,14,15],ymm1[0,1,2,3,4,5,6,7],ymm13[24,25,26,27,28,29,30,31],ymm1[16,17,18,19,20,21,22,23]
12584 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5,6,7]
12585 ; AVX2-FP-NEXT: vpbroadcastd 904(%rdi), %xmm15
12586 ; AVX2-FP-NEXT: vmovdqa 928(%rdi), %xmm11
12587 ; AVX2-FP-NEXT: vpblendd {{.*#+}} xmm15 = xmm15[0],xmm11[1],xmm15[2,3]
12588 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm15[0,1],ymm0[2,3,4,5,6,7]
12589 ; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
12590 ; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
12591 ; AVX2-FP-NEXT: vpunpcklqdq {{.*#+}} ymm15 = ymm10[0],ymm7[0],ymm10[2],ymm7[2]
12592 ; AVX2-FP-NEXT: vpbroadcastd 1100(%rdi), %ymm14
12593 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm14 = ymm15[0,1,2,3,4,5,6],ymm14[7]
12594 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm14[5,6,7]
12595 ; AVX2-FP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
12596 ; AVX2-FP-NEXT: vmovdqa 1424(%rdi), %xmm0
12597 ; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
12598 ; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
12599 ; AVX2-FP-NEXT: vpalignr {{.*#+}} ymm14 = ymm9[8,9,10,11,12,13,14,15],ymm8[0,1,2,3,4,5,6,7],ymm9[24,25,26,27,28,29,30,31],ymm8[16,17,18,19,20,21,22,23]
12600 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm14 = ymm14[0,1,2],ymm0[3],ymm14[4,5,6,7]
12601 ; AVX2-FP-NEXT: vpbroadcastd 1352(%rdi), %xmm15
12602 ; AVX2-FP-NEXT: vmovdqa 1376(%rdi), %xmm0
12603 ; AVX2-FP-NEXT: vpblendd {{.*#+}} xmm15 = xmm15[0],xmm0[1],xmm15[2,3]
12604 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm14 = ymm15[0,1],ymm14[2,3,4,5,6,7]
12605 ; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
12606 ; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
12607 ; AVX2-FP-NEXT: vpunpcklqdq {{.*#+}} ymm15 = ymm6[0],ymm1[0],ymm6[2],ymm1[2]
12608 ; AVX2-FP-NEXT: vpbroadcastd 1548(%rdi), %ymm13
12609 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm13 = ymm15[0,1,2,3,4,5,6],ymm13[7]
12610 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm13 = ymm14[0,1,2,3,4],ymm13[5,6,7]
12611 ; AVX2-FP-NEXT: vmovdqu %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
12612 ; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
12613 ; AVX2-FP-NEXT: vpblendd $2, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm13 # 32-byte Folded Reload
12614 ; AVX2-FP-NEXT: # ymm13 = ymm4[0],mem[1],ymm4[2,3,4,5,6,7]
12615 ; AVX2-FP-NEXT: vpblendd {{.*#+}} xmm5 = xmm5[0,1,2],mem[3]
12616 ; AVX2-FP-NEXT: vpshufd {{.*#+}} xmm5 = xmm5[3,2,2,3]
12617 ; AVX2-FP-NEXT: vpshufd {{.*#+}} ymm13 = ymm13[3,1,1,0,7,5,5,4]
12618 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm5 = ymm5[0,1],ymm13[2,3,4,5,6,7]
12619 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
12620 ; AVX2-FP-NEXT: vshufps {{.*#+}} ymm13 = ymm4[0,2],ymm12[1,3],ymm4[4,6],ymm12[5,7]
12621 ; AVX2-FP-NEXT: vmovaps %ymm4, %ymm12
12622 ; AVX2-FP-NEXT: vbroadcastss 432(%rdi), %ymm14
12623 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm13 = ymm13[0,1,2,3,4,5,6],ymm14[7]
12624 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3,4],ymm13[5,6,7]
12625 ; AVX2-FP-NEXT: vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
12626 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
12627 ; AVX2-FP-NEXT: vblendps $2, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm5 # 32-byte Folded Reload
12628 ; AVX2-FP-NEXT: # ymm5 = ymm4[0],mem[1],ymm4[2,3,4,5,6,7]
12629 ; AVX2-FP-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
12630 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm4 = xmm4[0,1,2],mem[3]
12631 ; AVX2-FP-NEXT: vshufps {{.*#+}} xmm4 = xmm4[3,2,2,3]
12632 ; AVX2-FP-NEXT: vshufps {{.*#+}} ymm5 = ymm5[3,1,1,0,7,5,5,4]
12633 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm5[2,3,4,5,6,7]
12634 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
12635 ; AVX2-FP-NEXT: vshufps {{.*#+}} ymm5 = ymm14[0,2],ymm3[1,3],ymm14[4,6],ymm3[5,7]
12636 ; AVX2-FP-NEXT: vbroadcastss 880(%rdi), %ymm13
12637 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3,4,5,6],ymm13[7]
12638 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0,1,2,3,4],ymm5[5,6,7]
12639 ; AVX2-FP-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
12640 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
12641 ; AVX2-FP-NEXT: vblendps $2, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm4 # 32-byte Folded Reload
12642 ; AVX2-FP-NEXT: # ymm4 = ymm3[0],mem[1],ymm3[2,3,4,5,6,7]
12643 ; AVX2-FP-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
12644 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm3 = xmm3[0,1,2],mem[3]
12645 ; AVX2-FP-NEXT: vshufps {{.*#+}} xmm3 = xmm3[3,2,2,3]
12646 ; AVX2-FP-NEXT: vshufps {{.*#+}} ymm4 = ymm4[3,1,1,0,7,5,5,4]
12647 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm4[2,3,4,5,6,7]
12648 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
12649 ; AVX2-FP-NEXT: vshufps $216, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm4 # 32-byte Folded Reload
12650 ; AVX2-FP-NEXT: # ymm4 = ymm4[0,2],mem[1,3],ymm4[4,6],mem[5,7]
12651 ; AVX2-FP-NEXT: vbroadcastss 1328(%rdi), %ymm5
12652 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3,4,5,6],ymm5[7]
12653 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3,4],ymm4[5,6,7]
12654 ; AVX2-FP-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
12655 ; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
12656 ; AVX2-FP-NEXT: vpblendd $2, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
12657 ; AVX2-FP-NEXT: # ymm3 = ymm3[0],mem[1],ymm3[2,3,4,5,6,7]
12658 ; AVX2-FP-NEXT: vpblendd {{.*#+}} xmm2 = xmm2[0,1,2],mem[3]
12659 ; AVX2-FP-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[3,2,2,3]
12660 ; AVX2-FP-NEXT: vpshufd {{.*#+}} ymm3 = ymm3[3,1,1,0,7,5,5,4]
12661 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1],ymm3[2,3,4,5,6,7]
12662 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
12663 ; AVX2-FP-NEXT: vshufps $216, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm3 # 32-byte Folded Reload
12664 ; AVX2-FP-NEXT: # ymm3 = ymm13[0,2],mem[1,3],ymm13[4,6],mem[5,7]
12665 ; AVX2-FP-NEXT: vbroadcastss 1776(%rdi), %ymm4
12666 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5,6],ymm4[7]
12667 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm3[5,6,7]
12668 ; AVX2-FP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
12669 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm2 = ymm9[0],ymm8[1],ymm9[2,3,4,5,6,7]
12670 ; AVX2-FP-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1,2],mem[3]
12671 ; AVX2-FP-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[3,2,2,3]
12672 ; AVX2-FP-NEXT: vpshufd {{.*#+}} ymm2 = ymm2[3,1,1,0,7,5,5,4]
12673 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm2[2,3,4,5,6,7]
12674 ; AVX2-FP-NEXT: vshufps {{.*#+}} ymm2 = ymm6[0,2],ymm1[1,3],ymm6[4,6],ymm1[5,7]
12675 ; AVX2-FP-NEXT: vmovaps %ymm1, %ymm9
12676 ; AVX2-FP-NEXT: vbroadcastss 1552(%rdi), %ymm3
12677 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,6],ymm3[7]
12678 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm2[5,6,7]
12679 ; AVX2-FP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
12680 ; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
12681 ; AVX2-FP-NEXT: vpblendd $2, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
12682 ; AVX2-FP-NEXT: # ymm0 = ymm0[0],mem[1],ymm0[2,3,4,5,6,7]
12683 ; AVX2-FP-NEXT: vpblendd {{.*#+}} xmm1 = xmm11[0,1,2],mem[3]
12684 ; AVX2-FP-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[3,2,2,3]
12685 ; AVX2-FP-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[3,1,1,0,7,5,5,4]
12686 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5,6,7]
12687 ; AVX2-FP-NEXT: vmovdqa %ymm10, %ymm8
12688 ; AVX2-FP-NEXT: vshufps {{.*#+}} ymm1 = ymm10[0,2],ymm7[1,3],ymm10[4,6],ymm7[5,7]
12689 ; AVX2-FP-NEXT: vmovaps %ymm7, %ymm11
12690 ; AVX2-FP-NEXT: vbroadcastss 1104(%rdi), %ymm2
12691 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm2[7]
12692 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
12693 ; AVX2-FP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
12694 ; AVX2-FP-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
12695 ; AVX2-FP-NEXT: vblendps $2, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
12696 ; AVX2-FP-NEXT: # ymm0 = ymm0[0],mem[1],ymm0[2,3,4,5,6,7]
12697 ; AVX2-FP-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
12698 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1,2],mem[3]
12699 ; AVX2-FP-NEXT: vshufps {{.*#+}} xmm1 = xmm1[3,2,2,3]
12700 ; AVX2-FP-NEXT: vshufps {{.*#+}} ymm0 = ymm0[3,1,1,0,7,5,5,4]
12701 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5,6,7]
12702 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
12703 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
12704 ; AVX2-FP-NEXT: vshufps {{.*#+}} ymm1 = ymm4[0,2],ymm7[1,3],ymm4[4,6],ymm7[5,7]
12705 ; AVX2-FP-NEXT: vbroadcastss 656(%rdi), %ymm2
12706 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm2[7]
12707 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
12708 ; AVX2-FP-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
12709 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
12710 ; AVX2-FP-NEXT: vblendps $2, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
12711 ; AVX2-FP-NEXT: # ymm0 = ymm0[0],mem[1],ymm0[2,3,4,5,6,7]
12712 ; AVX2-FP-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
12713 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1,2],mem[3]
12714 ; AVX2-FP-NEXT: vshufps {{.*#+}} xmm1 = xmm1[3,2,2,3]
12715 ; AVX2-FP-NEXT: vshufps {{.*#+}} ymm0 = ymm0[3,1,1,0,7,5,5,4]
12716 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5,6,7]
12717 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
12718 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
12719 ; AVX2-FP-NEXT: vshufps {{.*#+}} ymm1 = ymm3[0,2],ymm10[1,3],ymm3[4,6],ymm10[5,7]
12720 ; AVX2-FP-NEXT: vbroadcastss 208(%rdi), %ymm2
12721 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm2[7]
12722 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
12723 ; AVX2-FP-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
12724 ; AVX2-FP-NEXT: vbroadcastss 100(%rdi), %xmm0
12725 ; AVX2-FP-NEXT: vmovaps 64(%rdi), %xmm6
12726 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm0 = xmm6[0,1,2],xmm0[3]
12727 ; AVX2-FP-NEXT: vmovsd {{.*#+}} xmm5 = [4,3,0,0]
12728 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
12729 ; AVX2-FP-NEXT: vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
12730 ; AVX2-FP-NEXT: # ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
12731 ; AVX2-FP-NEXT: vpermps %ymm1, %ymm5, %ymm1
12732 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
12733 ; AVX2-FP-NEXT: vbroadcastsd {{.*#+}} ymm15 = [0,7,0,7,0,7,0,7]
12734 ; AVX2-FP-NEXT: vpermps %ymm3, %ymm15, %ymm1
12735 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm10[6,7]
12736 ; AVX2-FP-NEXT: vbroadcastss 212(%rdi), %ymm2
12737 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm2[7]
12738 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
12739 ; AVX2-FP-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
12740 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
12741 ; AVX2-FP-NEXT: vblendps $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
12742 ; AVX2-FP-NEXT: # ymm0 = mem[0,1,2,3],ymm0[4,5,6,7]
12743 ; AVX2-FP-NEXT: vpermps %ymm0, %ymm5, %ymm0
12744 ; AVX2-FP-NEXT: vbroadcastss 324(%rdi), %xmm2
12745 ; AVX2-FP-NEXT: vmovaps 288(%rdi), %xmm1
12746 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm2 = xmm1[0,1,2],xmm2[3]
12747 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3]
12748 ; AVX2-FP-NEXT: vpermps %ymm12, %ymm15, %ymm2
12749 ; AVX2-FP-NEXT: vblendps $192, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
12750 ; AVX2-FP-NEXT: # ymm2 = ymm2[0,1,2,3,4,5],mem[6,7]
12751 ; AVX2-FP-NEXT: vbroadcastss 436(%rdi), %ymm3
12752 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,6],ymm3[7]
12753 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7]
12754 ; AVX2-FP-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
12755 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
12756 ; AVX2-FP-NEXT: vblendps $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
12757 ; AVX2-FP-NEXT: # ymm0 = mem[0,1,2,3],ymm0[4,5,6,7]
12758 ; AVX2-FP-NEXT: vpermps %ymm0, %ymm5, %ymm0
12759 ; AVX2-FP-NEXT: vbroadcastss 548(%rdi), %xmm3
12760 ; AVX2-FP-NEXT: vmovaps 512(%rdi), %xmm2
12761 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm3 = xmm2[0,1,2],xmm3[3]
12762 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm3[2,3]
12763 ; AVX2-FP-NEXT: vpermps %ymm4, %ymm15, %ymm3
12764 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5],ymm7[6,7]
12765 ; AVX2-FP-NEXT: vbroadcastss 660(%rdi), %ymm4
12766 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5,6],ymm4[7]
12767 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm3[4,5,6,7]
12768 ; AVX2-FP-NEXT: vmovups %ymm0, (%rsp) # 32-byte Spill
12769 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
12770 ; AVX2-FP-NEXT: vblendps $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
12771 ; AVX2-FP-NEXT: # ymm0 = mem[0,1,2,3],ymm0[4,5,6,7]
12772 ; AVX2-FP-NEXT: vpermps %ymm0, %ymm5, %ymm0
12773 ; AVX2-FP-NEXT: vbroadcastss 772(%rdi), %xmm4
12774 ; AVX2-FP-NEXT: vmovaps 736(%rdi), %xmm3
12775 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm4 = xmm3[0,1,2],xmm4[3]
12776 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm4[2,3]
12777 ; AVX2-FP-NEXT: vpermps %ymm14, %ymm15, %ymm4
12778 ; AVX2-FP-NEXT: vblendps $192, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm4 # 32-byte Folded Reload
12779 ; AVX2-FP-NEXT: # ymm4 = ymm4[0,1,2,3,4,5],mem[6,7]
12780 ; AVX2-FP-NEXT: vbroadcastss 884(%rdi), %ymm7
12781 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3,4,5,6],ymm7[7]
12782 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm4[4,5,6,7]
12783 ; AVX2-FP-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
12784 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
12785 ; AVX2-FP-NEXT: vblendps $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
12786 ; AVX2-FP-NEXT: # ymm0 = mem[0,1,2,3],ymm0[4,5,6,7]
12787 ; AVX2-FP-NEXT: vpermps %ymm0, %ymm5, %ymm0
12788 ; AVX2-FP-NEXT: vbroadcastss 996(%rdi), %xmm7
12789 ; AVX2-FP-NEXT: vmovaps 960(%rdi), %xmm4
12790 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm7 = xmm4[0,1,2],xmm7[3]
12791 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm7[2,3]
12792 ; AVX2-FP-NEXT: vpermps %ymm8, %ymm15, %ymm7
12793 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3,4,5],ymm11[6,7]
12794 ; AVX2-FP-NEXT: vbroadcastss 1108(%rdi), %ymm8
12795 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3,4,5,6],ymm8[7]
12796 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm7[4,5,6,7]
12797 ; AVX2-FP-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
12798 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
12799 ; AVX2-FP-NEXT: vblendps $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
12800 ; AVX2-FP-NEXT: # ymm0 = mem[0,1,2,3],ymm0[4,5,6,7]
12801 ; AVX2-FP-NEXT: vpermps %ymm0, %ymm5, %ymm0
12802 ; AVX2-FP-NEXT: vbroadcastss 1220(%rdi), %xmm7
12803 ; AVX2-FP-NEXT: vmovaps 1184(%rdi), %xmm14
12804 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm7 = xmm14[0,1,2],xmm7[3]
12805 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm7[2,3]
12806 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
12807 ; AVX2-FP-NEXT: vpermps %ymm10, %ymm15, %ymm7
12808 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
12809 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3,4,5],ymm12[6,7]
12810 ; AVX2-FP-NEXT: vbroadcastss 1332(%rdi), %ymm8
12811 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3,4,5,6],ymm8[7]
12812 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm7[4,5,6,7]
12813 ; AVX2-FP-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
12814 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
12815 ; AVX2-FP-NEXT: vblendps $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
12816 ; AVX2-FP-NEXT: # ymm0 = mem[0,1,2,3],ymm0[4,5,6,7]
12817 ; AVX2-FP-NEXT: vpermps %ymm0, %ymm5, %ymm7
12818 ; AVX2-FP-NEXT: vbroadcastss 1444(%rdi), %xmm8
12819 ; AVX2-FP-NEXT: vmovaps 1408(%rdi), %xmm0
12820 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm8 = xmm0[0,1,2],xmm8[3]
12821 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm7 = xmm7[0,1],xmm8[2,3]
12822 ; AVX2-FP-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm8 # 32-byte Folded Reload
12823 ; AVX2-FP-NEXT: vmovaps %ymm9, %ymm11
12824 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1,2,3,4,5],ymm9[6,7]
12825 ; AVX2-FP-NEXT: vbroadcastss 1556(%rdi), %ymm9
12826 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1,2,3,4,5,6],ymm9[7]
12827 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm8[4,5,6,7]
12828 ; AVX2-FP-NEXT: vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
12829 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
12830 ; AVX2-FP-NEXT: vblendps $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm7 # 32-byte Folded Reload
12831 ; AVX2-FP-NEXT: # ymm7 = mem[0,1,2,3],ymm7[4,5,6,7]
12832 ; AVX2-FP-NEXT: vpermps %ymm7, %ymm5, %ymm7
12833 ; AVX2-FP-NEXT: vbroadcastss 1668(%rdi), %xmm8
12834 ; AVX2-FP-NEXT: vmovaps 1632(%rdi), %xmm5
12835 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm8 = xmm5[0,1,2],xmm8[3]
12836 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm7 = xmm7[0,1],xmm8[2,3]
12837 ; AVX2-FP-NEXT: vpermps %ymm13, %ymm15, %ymm8
12838 ; AVX2-FP-NEXT: vblendps $192, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm8 # 32-byte Folded Reload
12839 ; AVX2-FP-NEXT: # ymm8 = ymm8[0,1,2,3,4,5],mem[6,7]
12840 ; AVX2-FP-NEXT: vbroadcastss 1780(%rdi), %ymm9
12841 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1,2,3,4,5,6],ymm9[7]
12842 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm8[4,5,6,7]
12843 ; AVX2-FP-NEXT: vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
12844 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
12845 ; AVX2-FP-NEXT: vblendps $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm7 # 32-byte Folded Reload
12846 ; AVX2-FP-NEXT: # ymm7 = ymm7[0],mem[1],ymm7[2,3,4],mem[5],ymm7[6,7]
12847 ; AVX2-FP-NEXT: vshufps {{.*#+}} ymm7 = ymm7[1,0,3,3,5,4,7,7]
12848 ; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm7 = ymm7[0,1,0,3]
12849 ; AVX2-FP-NEXT: vbroadcastss 216(%rdi), %ymm8
12850 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3,4,5,6],ymm8[7]
12851 ; AVX2-FP-NEXT: vmovaps 96(%rdi), %xmm9
12852 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm6 = xmm9[0,1,2],xmm6[3]
12853 ; AVX2-FP-NEXT: vshufps {{.*#+}} xmm6 = xmm6[0,1,3,2]
12854 ; AVX2-FP-NEXT: vpermilps $225, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Folded Reload
12855 ; AVX2-FP-NEXT: # ymm8 = mem[1,0,2,3,5,4,6,7]
12856 ; AVX2-FP-NEXT: vextractf128 $1, %ymm8, %xmm8
12857 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm6 = xmm8[0,1],xmm6[2,3]
12858 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm7[4,5,6,7]
12859 ; AVX2-FP-NEXT: vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
12860 ; AVX2-FP-NEXT: vmovaps 320(%rdi), %xmm13
12861 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm1 = xmm13[0,1,2],xmm1[3]
12862 ; AVX2-FP-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,1,3,2]
12863 ; AVX2-FP-NEXT: vpermilps $225, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Folded Reload
12864 ; AVX2-FP-NEXT: # ymm6 = mem[1,0,2,3,5,4,6,7]
12865 ; AVX2-FP-NEXT: vextractf128 $1, %ymm6, %xmm6
12866 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm1 = xmm6[0,1],xmm1[2,3]
12867 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
12868 ; AVX2-FP-NEXT: vblendps $221, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm6 # 32-byte Folded Reload
12869 ; AVX2-FP-NEXT: # ymm6 = mem[0],ymm6[1],mem[2,3,4],ymm6[5],mem[6,7]
12870 ; AVX2-FP-NEXT: vshufps {{.*#+}} ymm6 = ymm6[1,0,3,3,5,4,7,7]
12871 ; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[0,1,0,3]
12872 ; AVX2-FP-NEXT: vbroadcastss 440(%rdi), %ymm7
12873 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3,4,5,6],ymm7[7]
12874 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm6[4,5,6,7]
12875 ; AVX2-FP-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
12876 ; AVX2-FP-NEXT: vmovaps 544(%rdi), %xmm8
12877 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm1 = xmm8[0,1,2],xmm2[3]
12878 ; AVX2-FP-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,1,3,2]
12879 ; AVX2-FP-NEXT: vpermilps $225, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
12880 ; AVX2-FP-NEXT: # ymm2 = mem[1,0,2,3,5,4,6,7]
12881 ; AVX2-FP-NEXT: vextractf128 $1, %ymm2, %xmm2
12882 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3]
12883 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
12884 ; AVX2-FP-NEXT: vblendps $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
12885 ; AVX2-FP-NEXT: # ymm2 = ymm2[0],mem[1],ymm2[2,3,4],mem[5],ymm2[6,7]
12886 ; AVX2-FP-NEXT: vshufps {{.*#+}} ymm2 = ymm2[1,0,3,3,5,4,7,7]
12887 ; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,1,0,3]
12888 ; AVX2-FP-NEXT: vbroadcastss 664(%rdi), %ymm6
12889 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,6],ymm6[7]
12890 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
12891 ; AVX2-FP-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
12892 ; AVX2-FP-NEXT: vmovaps 768(%rdi), %xmm1
12893 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm2 = xmm1[0,1,2],xmm3[3]
12894 ; AVX2-FP-NEXT: vshufps {{.*#+}} xmm2 = xmm2[0,1,3,2]
12895 ; AVX2-FP-NEXT: vpermilps $225, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Folded Reload
12896 ; AVX2-FP-NEXT: # ymm3 = mem[1,0,2,3,5,4,6,7]
12897 ; AVX2-FP-NEXT: vextractf128 $1, %ymm3, %xmm3
12898 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm2 = xmm3[0,1],xmm2[2,3]
12899 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
12900 ; AVX2-FP-NEXT: vblendps $221, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
12901 ; AVX2-FP-NEXT: # ymm3 = mem[0],ymm3[1],mem[2,3,4],ymm3[5],mem[6,7]
12902 ; AVX2-FP-NEXT: vshufps {{.*#+}} ymm3 = ymm3[1,0,3,3,5,4,7,7]
12903 ; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[0,1,0,3]
12904 ; AVX2-FP-NEXT: vbroadcastss 888(%rdi), %ymm6
12905 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5,6],ymm6[7]
12906 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
12907 ; AVX2-FP-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
12908 ; AVX2-FP-NEXT: vmovaps 992(%rdi), %xmm2
12909 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm3 = xmm2[0,1,2],xmm4[3]
12910 ; AVX2-FP-NEXT: vshufps {{.*#+}} xmm3 = xmm3[0,1,3,2]
12911 ; AVX2-FP-NEXT: vpermilps $225, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Folded Reload
12912 ; AVX2-FP-NEXT: # ymm4 = mem[1,0,2,3,5,4,6,7]
12913 ; AVX2-FP-NEXT: vextractf128 $1, %ymm4, %xmm4
12914 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm3 = xmm4[0,1],xmm3[2,3]
12915 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
12916 ; AVX2-FP-NEXT: vblendps $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm4 # 32-byte Folded Reload
12917 ; AVX2-FP-NEXT: # ymm4 = ymm4[0],mem[1],ymm4[2,3,4],mem[5],ymm4[6,7]
12918 ; AVX2-FP-NEXT: vshufps {{.*#+}} ymm4 = ymm4[1,0,3,3,5,4,7,7]
12919 ; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,1,0,3]
12920 ; AVX2-FP-NEXT: vbroadcastss 1112(%rdi), %ymm6
12921 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3,4,5,6],ymm6[7]
12922 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm7 = ymm3[0,1,2,3],ymm4[4,5,6,7]
12923 ; AVX2-FP-NEXT: vmovaps 1216(%rdi), %xmm3
12924 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm4 = xmm3[0,1,2],xmm14[3]
12925 ; AVX2-FP-NEXT: vshufps {{.*#+}} xmm4 = xmm4[0,1,3,2]
12926 ; AVX2-FP-NEXT: vpermilps $225, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Folded Reload
12927 ; AVX2-FP-NEXT: # ymm6 = mem[1,0,2,3,5,4,6,7]
12928 ; AVX2-FP-NEXT: vextractf128 $1, %ymm6, %xmm6
12929 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm4 = xmm6[0,1],xmm4[2,3]
12930 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm6 = ymm12[0],ymm10[1],ymm12[2,3,4],ymm10[5],ymm12[6,7]
12931 ; AVX2-FP-NEXT: vshufps {{.*#+}} ymm6 = ymm6[1,0,3,3,5,4,7,7]
12932 ; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[0,1,0,3]
12933 ; AVX2-FP-NEXT: vbroadcastss 1336(%rdi), %ymm10
12934 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3,4,5,6],ymm10[7]
12935 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm6 = ymm4[0,1,2,3],ymm6[4,5,6,7]
12936 ; AVX2-FP-NEXT: vmovaps 1440(%rdi), %xmm4
12937 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm0 = xmm4[0,1,2],xmm0[3]
12938 ; AVX2-FP-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,1,3,2]
12939 ; AVX2-FP-NEXT: vpermilps $225, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Folded Reload
12940 ; AVX2-FP-NEXT: # ymm10 = mem[1,0,2,3,5,4,6,7]
12941 ; AVX2-FP-NEXT: vextractf128 $1, %ymm10, %xmm10
12942 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm0 = xmm10[0,1],xmm0[2,3]
12943 ; AVX2-FP-NEXT: vblendps $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm10 # 32-byte Folded Reload
12944 ; AVX2-FP-NEXT: # ymm10 = ymm11[0],mem[1],ymm11[2,3,4],mem[5],ymm11[6,7]
12945 ; AVX2-FP-NEXT: vshufps {{.*#+}} ymm10 = ymm10[1,0,3,3,5,4,7,7]
12946 ; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm10 = ymm10[0,1,0,3]
12947 ; AVX2-FP-NEXT: vbroadcastss 1560(%rdi), %ymm12
12948 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0,1,2,3,4,5,6],ymm12[7]
12949 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm10 = ymm0[0,1,2,3],ymm10[4,5,6,7]
12950 ; AVX2-FP-NEXT: vmovaps 1664(%rdi), %xmm14
12951 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm0 = xmm14[0,1,2],xmm5[3]
12952 ; AVX2-FP-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,1,3,2]
12953 ; AVX2-FP-NEXT: vpermilps $225, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload
12954 ; AVX2-FP-NEXT: # ymm5 = mem[1,0,2,3,5,4,6,7]
12955 ; AVX2-FP-NEXT: vextractf128 $1, %ymm5, %xmm5
12956 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm0 = xmm5[0,1],xmm0[2,3]
12957 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
12958 ; AVX2-FP-NEXT: vblendps $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm5 # 32-byte Folded Reload
12959 ; AVX2-FP-NEXT: # ymm5 = ymm5[0],mem[1],ymm5[2,3,4],mem[5],ymm5[6,7]
12960 ; AVX2-FP-NEXT: vshufps {{.*#+}} ymm5 = ymm5[1,0,3,3,5,4,7,7]
12961 ; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,1,0,3]
12962 ; AVX2-FP-NEXT: vbroadcastss 1784(%rdi), %ymm12
12963 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3,4,5,6],ymm12[7]
12964 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm12 = ymm0[0,1,2,3],ymm5[4,5,6,7]
12965 ; AVX2-FP-NEXT: vbroadcastss 136(%rdi), %xmm0
12966 ; AVX2-FP-NEXT: vblendps $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
12967 ; AVX2-FP-NEXT: # xmm0 = xmm0[0],mem[1],xmm0[2,3]
12968 ; AVX2-FP-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
12969 ; AVX2-FP-NEXT: vpermps 192(%rdi), %ymm15, %ymm5
12970 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm5[6,7]
12971 ; AVX2-FP-NEXT: vbroadcastss 80(%rdi), %ymm5
12972 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm5 = xmm5[0,1,2],xmm9[3]
12973 ; AVX2-FP-NEXT: vpermilps $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Folded Reload
12974 ; AVX2-FP-NEXT: # ymm11 = mem[2,3,2,3,6,7,6,7]
12975 ; AVX2-FP-NEXT: vblendps $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm11 # 32-byte Folded Reload
12976 ; AVX2-FP-NEXT: # ymm11 = ymm11[0],mem[1],ymm11[2,3,4],mem[5],ymm11[6,7]
12977 ; AVX2-FP-NEXT: vextractf128 $1, %ymm11, %xmm11
12978 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm5 = xmm11[0,1],xmm5[2,3]
12979 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm11 = ymm5[0,1,2,3],ymm0[4,5,6,7]
12980 ; AVX2-FP-NEXT: vbroadcastss 360(%rdi), %xmm0
12981 ; AVX2-FP-NEXT: vblendps $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
12982 ; AVX2-FP-NEXT: # xmm0 = xmm0[0],mem[1],xmm0[2,3]
12983 ; AVX2-FP-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
12984 ; AVX2-FP-NEXT: vpermps 416(%rdi), %ymm15, %ymm5
12985 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm5[6,7]
12986 ; AVX2-FP-NEXT: vbroadcastss 304(%rdi), %ymm5
12987 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm5 = xmm5[0,1,2],xmm13[3]
12988 ; AVX2-FP-NEXT: vpermilps $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Folded Reload
12989 ; AVX2-FP-NEXT: # ymm13 = mem[2,3,2,3,6,7,6,7]
12990 ; AVX2-FP-NEXT: vblendps $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm13 # 32-byte Folded Reload
12991 ; AVX2-FP-NEXT: # ymm13 = ymm13[0],mem[1],ymm13[2,3,4],mem[5],ymm13[6,7]
12992 ; AVX2-FP-NEXT: vextractf128 $1, %ymm13, %xmm13
12993 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm5 = xmm13[0,1],xmm5[2,3]
12994 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm13 = ymm5[0,1,2,3],ymm0[4,5,6,7]
12995 ; AVX2-FP-NEXT: vbroadcastss 584(%rdi), %xmm0
12996 ; AVX2-FP-NEXT: vblendps $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
12997 ; AVX2-FP-NEXT: # xmm0 = xmm0[0],mem[1],xmm0[2,3]
12998 ; AVX2-FP-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
12999 ; AVX2-FP-NEXT: vpermps 640(%rdi), %ymm15, %ymm5
13000 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm5[6,7]
13001 ; AVX2-FP-NEXT: vbroadcastss 528(%rdi), %ymm5
13002 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm5 = xmm5[0,1,2],xmm8[3]
13003 ; AVX2-FP-NEXT: vpermilps $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Folded Reload
13004 ; AVX2-FP-NEXT: # ymm8 = mem[2,3,2,3,6,7,6,7]
13005 ; AVX2-FP-NEXT: vblendps $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm8 # 32-byte Folded Reload
13006 ; AVX2-FP-NEXT: # ymm8 = ymm8[0],mem[1],ymm8[2,3,4],mem[5],ymm8[6,7]
13007 ; AVX2-FP-NEXT: vextractf128 $1, %ymm8, %xmm8
13008 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm5 = xmm8[0,1],xmm5[2,3]
13009 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm8 = ymm5[0,1,2,3],ymm0[4,5,6,7]
13010 ; AVX2-FP-NEXT: vbroadcastss 808(%rdi), %xmm0
13011 ; AVX2-FP-NEXT: vblendps $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
13012 ; AVX2-FP-NEXT: # xmm0 = xmm0[0],mem[1],xmm0[2,3]
13013 ; AVX2-FP-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
13014 ; AVX2-FP-NEXT: vpermps 864(%rdi), %ymm15, %ymm5
13015 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm5[6,7]
13016 ; AVX2-FP-NEXT: vbroadcastss 752(%rdi), %ymm5
13017 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm1 = xmm5[0,1,2],xmm1[3]
13018 ; AVX2-FP-NEXT: vpermilps $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload
13019 ; AVX2-FP-NEXT: # ymm5 = mem[2,3,2,3,6,7,6,7]
13020 ; AVX2-FP-NEXT: vblendps $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm5 # 32-byte Folded Reload
13021 ; AVX2-FP-NEXT: # ymm5 = ymm5[0],mem[1],ymm5[2,3,4],mem[5],ymm5[6,7]
13022 ; AVX2-FP-NEXT: vextractf128 $1, %ymm5, %xmm5
13023 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm1 = xmm5[0,1],xmm1[2,3]
13024 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
13025 ; AVX2-FP-NEXT: vbroadcastss 1032(%rdi), %xmm1
13026 ; AVX2-FP-NEXT: vblendps $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
13027 ; AVX2-FP-NEXT: # xmm1 = xmm1[0],mem[1],xmm1[2,3]
13028 ; AVX2-FP-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
13029 ; AVX2-FP-NEXT: vpermps 1088(%rdi), %ymm15, %ymm5
13030 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm5[6,7]
13031 ; AVX2-FP-NEXT: vbroadcastss 976(%rdi), %ymm5
13032 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm2 = xmm5[0,1,2],xmm2[3]
13033 ; AVX2-FP-NEXT: vpermilps $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload
13034 ; AVX2-FP-NEXT: # ymm5 = mem[2,3,2,3,6,7,6,7]
13035 ; AVX2-FP-NEXT: vblendps $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm5 # 32-byte Folded Reload
13036 ; AVX2-FP-NEXT: # ymm5 = ymm5[0],mem[1],ymm5[2,3,4],mem[5],ymm5[6,7]
13037 ; AVX2-FP-NEXT: vextractf128 $1, %ymm5, %xmm5
13038 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm2 = xmm5[0,1],xmm2[2,3]
13039 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
13040 ; AVX2-FP-NEXT: vbroadcastss 1256(%rdi), %xmm2
13041 ; AVX2-FP-NEXT: vblendps $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm2 # 16-byte Folded Reload
13042 ; AVX2-FP-NEXT: # xmm2 = xmm2[0],mem[1],xmm2[2,3]
13043 ; AVX2-FP-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2
13044 ; AVX2-FP-NEXT: vpermps 1312(%rdi), %ymm15, %ymm5
13045 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5],ymm5[6,7]
13046 ; AVX2-FP-NEXT: vbroadcastss 1200(%rdi), %ymm5
13047 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm3 = xmm5[0,1,2],xmm3[3]
13048 ; AVX2-FP-NEXT: vpermilps $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload
13049 ; AVX2-FP-NEXT: # ymm5 = mem[2,3,2,3,6,7,6,7]
13050 ; AVX2-FP-NEXT: vblendps $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm5 # 32-byte Folded Reload
13051 ; AVX2-FP-NEXT: # ymm5 = ymm5[0],mem[1],ymm5[2,3,4],mem[5],ymm5[6,7]
13052 ; AVX2-FP-NEXT: vextractf128 $1, %ymm5, %xmm5
13053 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm3 = xmm5[0,1],xmm3[2,3]
13054 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm2[4,5,6,7]
13055 ; AVX2-FP-NEXT: vbroadcastss 1480(%rdi), %xmm3
13056 ; AVX2-FP-NEXT: vblendps $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm3 # 16-byte Folded Reload
13057 ; AVX2-FP-NEXT: # xmm3 = xmm3[0],mem[1],xmm3[2,3]
13058 ; AVX2-FP-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm3
13059 ; AVX2-FP-NEXT: vpermps 1536(%rdi), %ymm15, %ymm5
13060 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5],ymm5[6,7]
13061 ; AVX2-FP-NEXT: vbroadcastss 1424(%rdi), %ymm5
13062 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm4 = xmm5[0,1,2],xmm4[3]
13063 ; AVX2-FP-NEXT: vpermilps $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload
13064 ; AVX2-FP-NEXT: # ymm5 = mem[2,3,2,3,6,7,6,7]
13065 ; AVX2-FP-NEXT: vblendps $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm5 # 32-byte Folded Reload
13066 ; AVX2-FP-NEXT: # ymm5 = ymm5[0],mem[1],ymm5[2,3,4],mem[5],ymm5[6,7]
13067 ; AVX2-FP-NEXT: vextractf128 $1, %ymm5, %xmm5
13068 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm4 = xmm5[0,1],xmm4[2,3]
13069 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
13070 ; AVX2-FP-NEXT: vbroadcastss 1704(%rdi), %xmm4
13071 ; AVX2-FP-NEXT: vblendps $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm4 # 16-byte Folded Reload
13072 ; AVX2-FP-NEXT: # xmm4 = xmm4[0],mem[1],xmm4[2,3]
13073 ; AVX2-FP-NEXT: vpermps 1760(%rdi), %ymm15, %ymm5
13074 ; AVX2-FP-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm4
13075 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3,4,5],ymm5[6,7]
13076 ; AVX2-FP-NEXT: vbroadcastss 1648(%rdi), %ymm5
13077 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm5 = xmm5[0,1,2],xmm14[3]
13078 ; AVX2-FP-NEXT: vpermilps $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Folded Reload
13079 ; AVX2-FP-NEXT: # ymm14 = mem[2,3,2,3,6,7,6,7]
13080 ; AVX2-FP-NEXT: vblendps $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
13081 ; AVX2-FP-NEXT: # ymm14 = ymm14[0],mem[1],ymm14[2,3,4],mem[5],ymm14[6,7]
13082 ; AVX2-FP-NEXT: vextractf128 $1, %ymm14, %xmm14
13083 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm5 = xmm14[0,1],xmm5[2,3]
13084 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0,1,2,3],ymm4[4,5,6,7]
13085 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
13086 ; AVX2-FP-NEXT: vmovaps %ymm5, 192(%rsi)
13087 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
13088 ; AVX2-FP-NEXT: vmovaps %ymm5, 128(%rsi)
13089 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
13090 ; AVX2-FP-NEXT: vmovaps %ymm5, 64(%rsi)
13091 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
13092 ; AVX2-FP-NEXT: vmovaps %ymm5, (%rsi)
13093 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
13094 ; AVX2-FP-NEXT: vmovaps %ymm5, 224(%rsi)
13095 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
13096 ; AVX2-FP-NEXT: vmovaps %ymm5, 160(%rsi)
13097 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
13098 ; AVX2-FP-NEXT: vmovaps %ymm5, 96(%rsi)
13099 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
13100 ; AVX2-FP-NEXT: vmovaps %ymm5, 32(%rsi)
13101 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
13102 ; AVX2-FP-NEXT: vmovaps %ymm5, 192(%rdx)
13103 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
13104 ; AVX2-FP-NEXT: vmovaps %ymm5, 128(%rdx)
13105 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
13106 ; AVX2-FP-NEXT: vmovaps %ymm5, 64(%rdx)
13107 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
13108 ; AVX2-FP-NEXT: vmovaps %ymm5, (%rdx)
13109 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
13110 ; AVX2-FP-NEXT: vmovaps %ymm5, 224(%rdx)
13111 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
13112 ; AVX2-FP-NEXT: vmovaps %ymm5, 160(%rdx)
13113 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
13114 ; AVX2-FP-NEXT: vmovaps %ymm5, 96(%rdx)
13115 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
13116 ; AVX2-FP-NEXT: vmovaps %ymm5, 32(%rdx)
13117 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
13118 ; AVX2-FP-NEXT: vmovaps %ymm5, 192(%rcx)
13119 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
13120 ; AVX2-FP-NEXT: vmovaps %ymm5, 128(%rcx)
13121 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
13122 ; AVX2-FP-NEXT: vmovaps %ymm5, 64(%rcx)
13123 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
13124 ; AVX2-FP-NEXT: vmovaps %ymm5, (%rcx)
13125 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
13126 ; AVX2-FP-NEXT: vmovaps %ymm5, 224(%rcx)
13127 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
13128 ; AVX2-FP-NEXT: vmovaps %ymm5, 160(%rcx)
13129 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
13130 ; AVX2-FP-NEXT: vmovaps %ymm5, 96(%rcx)
13131 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
13132 ; AVX2-FP-NEXT: vmovaps %ymm5, 32(%rcx)
13133 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
13134 ; AVX2-FP-NEXT: vmovaps %ymm5, (%r8)
13135 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
13136 ; AVX2-FP-NEXT: vmovaps %ymm5, 64(%r8)
13137 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
13138 ; AVX2-FP-NEXT: vmovaps %ymm5, 128(%r8)
13139 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
13140 ; AVX2-FP-NEXT: vmovaps %ymm5, 192(%r8)
13141 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
13142 ; AVX2-FP-NEXT: vmovaps %ymm5, 224(%r8)
13143 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
13144 ; AVX2-FP-NEXT: vmovaps %ymm5, 160(%r8)
13145 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
13146 ; AVX2-FP-NEXT: vmovaps %ymm5, 96(%r8)
13147 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
13148 ; AVX2-FP-NEXT: vmovaps %ymm5, 32(%r8)
13149 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
13150 ; AVX2-FP-NEXT: vmovaps %ymm5, 224(%r9)
13151 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
13152 ; AVX2-FP-NEXT: vmovaps %ymm5, 192(%r9)
13153 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
13154 ; AVX2-FP-NEXT: vmovaps %ymm5, 160(%r9)
13155 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
13156 ; AVX2-FP-NEXT: vmovaps %ymm5, 128(%r9)
13157 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
13158 ; AVX2-FP-NEXT: vmovaps %ymm5, 96(%r9)
13159 ; AVX2-FP-NEXT: vmovups (%rsp), %ymm5 # 32-byte Reload
13160 ; AVX2-FP-NEXT: vmovaps %ymm5, 64(%r9)
13161 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
13162 ; AVX2-FP-NEXT: vmovaps %ymm5, 32(%r9)
13163 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
13164 ; AVX2-FP-NEXT: vmovaps %ymm5, (%r9)
13165 ; AVX2-FP-NEXT: movq {{[0-9]+}}(%rsp), %rax
13166 ; AVX2-FP-NEXT: vmovaps %ymm12, 224(%rax)
13167 ; AVX2-FP-NEXT: vmovaps %ymm10, 192(%rax)
13168 ; AVX2-FP-NEXT: vmovaps %ymm6, 160(%rax)
13169 ; AVX2-FP-NEXT: vmovaps %ymm7, 128(%rax)
13170 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
13171 ; AVX2-FP-NEXT: vmovaps %ymm5, 96(%rax)
13172 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
13173 ; AVX2-FP-NEXT: vmovaps %ymm5, 64(%rax)
13174 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
13175 ; AVX2-FP-NEXT: vmovaps %ymm5, 32(%rax)
13176 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
13177 ; AVX2-FP-NEXT: vmovaps %ymm5, (%rax)
13178 ; AVX2-FP-NEXT: movq {{[0-9]+}}(%rsp), %rax
13179 ; AVX2-FP-NEXT: vmovaps %ymm4, 224(%rax)
13180 ; AVX2-FP-NEXT: vmovaps %ymm3, 192(%rax)
13181 ; AVX2-FP-NEXT: vmovaps %ymm2, 160(%rax)
13182 ; AVX2-FP-NEXT: vmovaps %ymm1, 128(%rax)
13183 ; AVX2-FP-NEXT: vmovaps %ymm0, 96(%rax)
13184 ; AVX2-FP-NEXT: vmovaps %ymm8, 64(%rax)
13185 ; AVX2-FP-NEXT: vmovaps %ymm13, 32(%rax)
13186 ; AVX2-FP-NEXT: vmovaps %ymm11, (%rax)
13187 ; AVX2-FP-NEXT: addq $2648, %rsp # imm = 0xA58
13188 ; AVX2-FP-NEXT: vzeroupper
13189 ; AVX2-FP-NEXT: retq
13191 ; AVX2-FCP-LABEL: load_i32_stride7_vf64:
13192 ; AVX2-FCP: # %bb.0:
13193 ; AVX2-FCP-NEXT: subq $2648, %rsp # imm = 0xA58
13194 ; AVX2-FCP-NEXT: vmovdqa 1216(%rdi), %ymm9
13195 ; AVX2-FCP-NEXT: vmovdqu %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
13196 ; AVX2-FCP-NEXT: vmovdqa 1152(%rdi), %ymm4
13197 ; AVX2-FCP-NEXT: vmovdqa 1120(%rdi), %ymm5
13198 ; AVX2-FCP-NEXT: vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
13199 ; AVX2-FCP-NEXT: vmovdqa 768(%rdi), %ymm13
13200 ; AVX2-FCP-NEXT: vmovdqa 704(%rdi), %ymm6
13201 ; AVX2-FCP-NEXT: vmovdqa 672(%rdi), %ymm7
13202 ; AVX2-FCP-NEXT: vmovdqa 320(%rdi), %ymm8
13203 ; AVX2-FCP-NEXT: vmovdqu %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
13204 ; AVX2-FCP-NEXT: vmovdqa 256(%rdi), %ymm10
13205 ; AVX2-FCP-NEXT: vmovdqa 224(%rdi), %ymm11
13206 ; AVX2-FCP-NEXT: vpmovsxbd {{.*#+}} xmm0 = [0,7,6,0]
13207 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm11[0,1,2,3,4,5],ymm10[6],ymm11[7]
13208 ; AVX2-FCP-NEXT: vmovdqu %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
13209 ; AVX2-FCP-NEXT: vmovdqu %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
13210 ; AVX2-FCP-NEXT: vpermd %ymm1, %ymm0, %ymm1
13211 ; AVX2-FCP-NEXT: vpbroadcastq 304(%rdi), %ymm2
13212 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm8[4,5,6,7]
13213 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4,5,6,7]
13214 ; AVX2-FCP-NEXT: vmovdqa 352(%rdi), %xmm2
13215 ; AVX2-FCP-NEXT: vmovdqa 384(%rdi), %xmm3
13216 ; AVX2-FCP-NEXT: vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
13217 ; AVX2-FCP-NEXT: vpunpckhqdq {{.*#+}} xmm2 = xmm2[1],xmm3[1]
13218 ; AVX2-FCP-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
13219 ; AVX2-FCP-NEXT: vpbroadcastd 420(%rdi), %ymm3
13220 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,6],ymm3[7]
13221 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5,6,7]
13222 ; AVX2-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
13223 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm7[0,1,2,3,4,5],ymm6[6],ymm7[7]
13224 ; AVX2-FCP-NEXT: vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
13225 ; AVX2-FCP-NEXT: vmovdqa %ymm6, %ymm8
13226 ; AVX2-FCP-NEXT: vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
13227 ; AVX2-FCP-NEXT: vpermd %ymm1, %ymm0, %ymm1
13228 ; AVX2-FCP-NEXT: vpbroadcastq 752(%rdi), %ymm2
13229 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm13[4,5,6,7]
13230 ; AVX2-FCP-NEXT: vmovdqa %ymm13, %ymm6
13231 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4,5,6,7]
13232 ; AVX2-FCP-NEXT: vmovdqa 800(%rdi), %xmm2
13233 ; AVX2-FCP-NEXT: vmovdqa 832(%rdi), %xmm3
13234 ; AVX2-FCP-NEXT: vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
13235 ; AVX2-FCP-NEXT: vpunpckhqdq {{.*#+}} xmm2 = xmm2[1],xmm3[1]
13236 ; AVX2-FCP-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
13237 ; AVX2-FCP-NEXT: vpbroadcastd 868(%rdi), %ymm3
13238 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,6],ymm3[7]
13239 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5,6,7]
13240 ; AVX2-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
13241 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm5[0,1,2,3,4,5],ymm4[6],ymm5[7]
13242 ; AVX2-FCP-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
13243 ; AVX2-FCP-NEXT: vpermd %ymm1, %ymm0, %ymm1
13244 ; AVX2-FCP-NEXT: vpbroadcastq 1200(%rdi), %ymm2
13245 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm9[4,5,6,7]
13246 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4,5,6,7]
13247 ; AVX2-FCP-NEXT: vmovdqa 1248(%rdi), %xmm2
13248 ; AVX2-FCP-NEXT: vmovdqa 1280(%rdi), %xmm3
13249 ; AVX2-FCP-NEXT: vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
13250 ; AVX2-FCP-NEXT: vpunpckhqdq {{.*#+}} xmm2 = xmm2[1],xmm3[1]
13251 ; AVX2-FCP-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
13252 ; AVX2-FCP-NEXT: vpbroadcastd 1316(%rdi), %ymm3
13253 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,6],ymm3[7]
13254 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5,6,7]
13255 ; AVX2-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
13256 ; AVX2-FCP-NEXT: vmovdqa 1600(%rdi), %ymm1
13257 ; AVX2-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
13258 ; AVX2-FCP-NEXT: vmovdqa 1568(%rdi), %ymm5
13259 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm5[0,1,2,3,4,5],ymm1[6],ymm5[7]
13260 ; AVX2-FCP-NEXT: vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
13261 ; AVX2-FCP-NEXT: vpermd %ymm1, %ymm0, %ymm1
13262 ; AVX2-FCP-NEXT: vmovdqa 1664(%rdi), %ymm3
13263 ; AVX2-FCP-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
13264 ; AVX2-FCP-NEXT: vpbroadcastq 1648(%rdi), %ymm2
13265 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
13266 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4,5,6,7]
13267 ; AVX2-FCP-NEXT: vmovdqa 1696(%rdi), %xmm2
13268 ; AVX2-FCP-NEXT: vmovdqa 1728(%rdi), %xmm3
13269 ; AVX2-FCP-NEXT: vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
13270 ; AVX2-FCP-NEXT: vpunpckhqdq {{.*#+}} xmm2 = xmm2[1],xmm3[1]
13271 ; AVX2-FCP-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
13272 ; AVX2-FCP-NEXT: vpbroadcastd 1764(%rdi), %ymm3
13273 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,6],ymm3[7]
13274 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5,6,7]
13275 ; AVX2-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
13276 ; AVX2-FCP-NEXT: vmovdqa 96(%rdi), %ymm2
13277 ; AVX2-FCP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
13278 ; AVX2-FCP-NEXT: vpbroadcastq 80(%rdi), %ymm1
13279 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
13280 ; AVX2-FCP-NEXT: vmovdqa (%rdi), %ymm2
13281 ; AVX2-FCP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
13282 ; AVX2-FCP-NEXT: vmovdqa 32(%rdi), %ymm3
13283 ; AVX2-FCP-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
13284 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5],ymm3[6],ymm2[7]
13285 ; AVX2-FCP-NEXT: vpermd %ymm2, %ymm0, %ymm2
13286 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3,4,5,6,7]
13287 ; AVX2-FCP-NEXT: vmovdqa 128(%rdi), %xmm2
13288 ; AVX2-FCP-NEXT: vmovdqa 160(%rdi), %xmm3
13289 ; AVX2-FCP-NEXT: vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
13290 ; AVX2-FCP-NEXT: vpunpckhqdq {{.*#+}} xmm2 = xmm2[1],xmm3[1]
13291 ; AVX2-FCP-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
13292 ; AVX2-FCP-NEXT: vpbroadcastd 196(%rdi), %ymm3
13293 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,6],ymm3[7]
13294 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5,6,7]
13295 ; AVX2-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
13296 ; AVX2-FCP-NEXT: vmovdqa 480(%rdi), %ymm2
13297 ; AVX2-FCP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
13298 ; AVX2-FCP-NEXT: vmovdqa 448(%rdi), %ymm1
13299 ; AVX2-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
13300 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm2[6],ymm1[7]
13301 ; AVX2-FCP-NEXT: vpermd %ymm1, %ymm0, %ymm1
13302 ; AVX2-FCP-NEXT: vmovdqa 544(%rdi), %ymm3
13303 ; AVX2-FCP-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
13304 ; AVX2-FCP-NEXT: vpbroadcastq 528(%rdi), %ymm2
13305 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
13306 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4,5,6,7]
13307 ; AVX2-FCP-NEXT: vmovdqa 576(%rdi), %xmm2
13308 ; AVX2-FCP-NEXT: vmovdqa 608(%rdi), %xmm3
13309 ; AVX2-FCP-NEXT: vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
13310 ; AVX2-FCP-NEXT: vpunpckhqdq {{.*#+}} xmm2 = xmm2[1],xmm3[1]
13311 ; AVX2-FCP-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
13312 ; AVX2-FCP-NEXT: vpbroadcastd 644(%rdi), %ymm3
13313 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,6],ymm3[7]
13314 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5,6,7]
13315 ; AVX2-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
13316 ; AVX2-FCP-NEXT: vmovdqa 928(%rdi), %ymm2
13317 ; AVX2-FCP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
13318 ; AVX2-FCP-NEXT: vmovdqa 896(%rdi), %ymm1
13319 ; AVX2-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
13320 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm2[6],ymm1[7]
13321 ; AVX2-FCP-NEXT: vpermd %ymm1, %ymm0, %ymm1
13322 ; AVX2-FCP-NEXT: vmovdqa 992(%rdi), %ymm12
13323 ; AVX2-FCP-NEXT: vpbroadcastq 976(%rdi), %ymm2
13324 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm12[4,5,6,7]
13325 ; AVX2-FCP-NEXT: vmovdqu %ymm12, (%rsp) # 32-byte Spill
13326 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4,5,6,7]
13327 ; AVX2-FCP-NEXT: vmovdqa 1024(%rdi), %xmm2
13328 ; AVX2-FCP-NEXT: vmovdqa 1056(%rdi), %xmm3
13329 ; AVX2-FCP-NEXT: vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
13330 ; AVX2-FCP-NEXT: vpunpckhqdq {{.*#+}} xmm2 = xmm2[1],xmm3[1]
13331 ; AVX2-FCP-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
13332 ; AVX2-FCP-NEXT: vpbroadcastd 1092(%rdi), %ymm3
13333 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,6],ymm3[7]
13334 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5,6,7]
13335 ; AVX2-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
13336 ; AVX2-FCP-NEXT: vmovdqa 1376(%rdi), %ymm15
13337 ; AVX2-FCP-NEXT: vmovdqa 1344(%rdi), %ymm14
13338 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm14[0,1,2,3,4,5],ymm15[6],ymm14[7]
13339 ; AVX2-FCP-NEXT: vmovdqu %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
13340 ; AVX2-FCP-NEXT: vmovdqu %ymm15, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
13341 ; AVX2-FCP-NEXT: vpermd %ymm1, %ymm0, %ymm0
13342 ; AVX2-FCP-NEXT: vmovdqa 1440(%rdi), %ymm9
13343 ; AVX2-FCP-NEXT: vpbroadcastq 1424(%rdi), %ymm1
13344 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm9[4,5,6,7]
13345 ; AVX2-FCP-NEXT: vmovdqu %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
13346 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
13347 ; AVX2-FCP-NEXT: vmovdqa 1472(%rdi), %xmm1
13348 ; AVX2-FCP-NEXT: vmovdqa 1504(%rdi), %xmm2
13349 ; AVX2-FCP-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
13350 ; AVX2-FCP-NEXT: vpunpckhqdq {{.*#+}} xmm1 = xmm1[1],xmm2[1]
13351 ; AVX2-FCP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
13352 ; AVX2-FCP-NEXT: vpbroadcastd 1540(%rdi), %ymm2
13353 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm2[7]
13354 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
13355 ; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
13356 ; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm0 = mem[2,2,2,2]
13357 ; AVX2-FCP-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm3
13358 ; AVX2-FCP-NEXT: vmovdqa 384(%rdi), %ymm1
13359 ; AVX2-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
13360 ; AVX2-FCP-NEXT: vmovdqa 352(%rdi), %ymm0
13361 ; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
13362 ; AVX2-FCP-NEXT: vpalignr {{.*#+}} ymm1 = ymm1[12,13,14,15],ymm0[0,1,2,3,4,5,6,7,8,9,10,11],ymm1[28,29,30,31],ymm0[16,17,18,19,20,21,22,23,24,25,26,27]
13363 ; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,1,2,0]
13364 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm3[7]
13365 ; AVX2-FCP-NEXT: vmovdqa 288(%rdi), %ymm0
13366 ; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
13367 ; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
13368 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm13[0,1],ymm0[2,3],ymm13[4,5],ymm0[6,7]
13369 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm10[0],ymm11[1],ymm10[2,3,4],ymm11[5],ymm10[6,7]
13370 ; AVX2-FCP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
13371 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm0[5,6],ymm2[7]
13372 ; AVX2-FCP-NEXT: vpmovsxbd {{.*#+}} ymm0 = [1,0,7,6,5,6,5,6]
13373 ; AVX2-FCP-NEXT: vpermd %ymm2, %ymm0, %ymm2
13374 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3,4],ymm1[5,6,7]
13375 ; AVX2-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
13376 ; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm1 = mem[2,2,2,2]
13377 ; AVX2-FCP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
13378 ; AVX2-FCP-NEXT: vmovdqa 832(%rdi), %ymm2
13379 ; AVX2-FCP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
13380 ; AVX2-FCP-NEXT: vmovdqa 800(%rdi), %ymm3
13381 ; AVX2-FCP-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
13382 ; AVX2-FCP-NEXT: vpalignr {{.*#+}} ymm2 = ymm2[12,13,14,15],ymm3[0,1,2,3,4,5,6,7,8,9,10,11],ymm2[28,29,30,31],ymm3[16,17,18,19,20,21,22,23,24,25,26,27]
13383 ; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,1,2,0]
13384 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3,4,5,6],ymm1[7]
13385 ; AVX2-FCP-NEXT: vmovdqa 736(%rdi), %ymm2
13386 ; AVX2-FCP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
13387 ; AVX2-FCP-NEXT: vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
13388 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm6[0,1],ymm2[2,3],ymm6[4,5],ymm2[6,7]
13389 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm8[0],ymm7[1],ymm8[2,3,4],ymm7[5],ymm8[6,7]
13390 ; AVX2-FCP-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
13391 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3,4],ymm2[5,6],ymm3[7]
13392 ; AVX2-FCP-NEXT: vpermd %ymm2, %ymm0, %ymm2
13393 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3,4],ymm1[5,6,7]
13394 ; AVX2-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
13395 ; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm1 = mem[2,2,2,2]
13396 ; AVX2-FCP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
13397 ; AVX2-FCP-NEXT: vmovdqa 1280(%rdi), %ymm3
13398 ; AVX2-FCP-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
13399 ; AVX2-FCP-NEXT: vmovdqa 1248(%rdi), %ymm2
13400 ; AVX2-FCP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
13401 ; AVX2-FCP-NEXT: vpalignr {{.*#+}} ymm2 = ymm3[12,13,14,15],ymm2[0,1,2,3,4,5,6,7,8,9,10,11],ymm3[28,29,30,31],ymm2[16,17,18,19,20,21,22,23,24,25,26,27]
13402 ; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,1,2,0]
13403 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3,4,5,6],ymm1[7]
13404 ; AVX2-FCP-NEXT: vmovdqa 1184(%rdi), %ymm2
13405 ; AVX2-FCP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
13406 ; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
13407 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm8[0,1],ymm2[2,3],ymm8[4,5],ymm2[6,7]
13408 ; AVX2-FCP-NEXT: vpblendd $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm7 # 32-byte Folded Reload
13409 ; AVX2-FCP-NEXT: # ymm7 = ymm4[0],mem[1],ymm4[2,3,4],mem[5],ymm4[6,7]
13410 ; AVX2-FCP-NEXT: vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
13411 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm7[0,1,2,3,4],ymm2[5,6],ymm7[7]
13412 ; AVX2-FCP-NEXT: vpermd %ymm2, %ymm0, %ymm2
13413 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3,4],ymm1[5,6,7]
13414 ; AVX2-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
13415 ; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm1 = mem[2,2,2,2]
13416 ; AVX2-FCP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
13417 ; AVX2-FCP-NEXT: vmovdqa 1728(%rdi), %ymm3
13418 ; AVX2-FCP-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
13419 ; AVX2-FCP-NEXT: vmovdqa 1696(%rdi), %ymm2
13420 ; AVX2-FCP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
13421 ; AVX2-FCP-NEXT: vpalignr {{.*#+}} ymm2 = ymm3[12,13,14,15],ymm2[0,1,2,3,4,5,6,7,8,9,10,11],ymm3[28,29,30,31],ymm2[16,17,18,19,20,21,22,23,24,25,26,27]
13422 ; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,1,2,0]
13423 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3,4,5,6],ymm1[7]
13424 ; AVX2-FCP-NEXT: vmovdqa 1632(%rdi), %ymm2
13425 ; AVX2-FCP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
13426 ; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
13427 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm7[0,1],ymm2[2,3],ymm7[4,5],ymm2[6,7]
13428 ; AVX2-FCP-NEXT: vpblendd $221, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm5 # 32-byte Folded Reload
13429 ; AVX2-FCP-NEXT: # ymm5 = mem[0],ymm5[1],mem[2,3,4],ymm5[5],mem[6,7]
13430 ; AVX2-FCP-NEXT: vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
13431 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm5[0,1,2,3,4],ymm2[5,6],ymm5[7]
13432 ; AVX2-FCP-NEXT: vpermd %ymm2, %ymm0, %ymm2
13433 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3,4],ymm1[5,6,7]
13434 ; AVX2-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
13435 ; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm1 = mem[2,2,2,2]
13436 ; AVX2-FCP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
13437 ; AVX2-FCP-NEXT: vmovdqa 608(%rdi), %ymm3
13438 ; AVX2-FCP-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
13439 ; AVX2-FCP-NEXT: vmovdqa 576(%rdi), %ymm2
13440 ; AVX2-FCP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
13441 ; AVX2-FCP-NEXT: vpalignr {{.*#+}} ymm2 = ymm3[12,13,14,15],ymm2[0,1,2,3,4,5,6,7,8,9,10,11],ymm3[28,29,30,31],ymm2[16,17,18,19,20,21,22,23,24,25,26,27]
13442 ; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,1,2,0]
13443 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3,4,5,6],ymm1[7]
13444 ; AVX2-FCP-NEXT: vmovdqa 512(%rdi), %ymm2
13445 ; AVX2-FCP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
13446 ; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
13447 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm11[0,1],ymm2[2,3],ymm11[4,5],ymm2[6,7]
13448 ; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
13449 ; AVX2-FCP-NEXT: vpblendd $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
13450 ; AVX2-FCP-NEXT: # ymm3 = ymm3[0],mem[1],ymm3[2,3,4],mem[5],ymm3[6,7]
13451 ; AVX2-FCP-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
13452 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3,4],ymm2[5,6],ymm3[7]
13453 ; AVX2-FCP-NEXT: vpermd %ymm2, %ymm0, %ymm2
13454 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3,4],ymm1[5,6,7]
13455 ; AVX2-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
13456 ; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm1 = mem[2,2,2,2]
13457 ; AVX2-FCP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
13458 ; AVX2-FCP-NEXT: vmovdqa 1056(%rdi), %ymm3
13459 ; AVX2-FCP-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
13460 ; AVX2-FCP-NEXT: vmovdqa 1024(%rdi), %ymm2
13461 ; AVX2-FCP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
13462 ; AVX2-FCP-NEXT: vpalignr {{.*#+}} ymm2 = ymm3[12,13,14,15],ymm2[0,1,2,3,4,5,6,7,8,9,10,11],ymm3[28,29,30,31],ymm2[16,17,18,19,20,21,22,23,24,25,26,27]
13463 ; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,1,2,0]
13464 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3,4,5,6],ymm1[7]
13465 ; AVX2-FCP-NEXT: vmovdqa 960(%rdi), %ymm2
13466 ; AVX2-FCP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
13467 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm12[0,1],ymm2[2,3],ymm12[4,5],ymm2[6,7]
13468 ; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
13469 ; AVX2-FCP-NEXT: vpblendd $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
13470 ; AVX2-FCP-NEXT: # ymm3 = ymm3[0],mem[1],ymm3[2,3,4],mem[5],ymm3[6,7]
13471 ; AVX2-FCP-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
13472 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3,4],ymm2[5,6],ymm3[7]
13473 ; AVX2-FCP-NEXT: vpermd %ymm2, %ymm0, %ymm2
13474 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3,4],ymm1[5,6,7]
13475 ; AVX2-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
13476 ; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm1 = mem[2,2,2,2]
13477 ; AVX2-FCP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
13478 ; AVX2-FCP-NEXT: vmovdqa 1504(%rdi), %ymm3
13479 ; AVX2-FCP-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
13480 ; AVX2-FCP-NEXT: vmovdqa 1472(%rdi), %ymm2
13481 ; AVX2-FCP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
13482 ; AVX2-FCP-NEXT: vpalignr {{.*#+}} ymm2 = ymm3[12,13,14,15],ymm2[0,1,2,3,4,5,6,7,8,9,10,11],ymm3[28,29,30,31],ymm2[16,17,18,19,20,21,22,23,24,25,26,27]
13483 ; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,1,2,0]
13484 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3,4,5,6],ymm1[7]
13485 ; AVX2-FCP-NEXT: vmovdqa 1408(%rdi), %ymm2
13486 ; AVX2-FCP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
13487 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm9[0,1],ymm2[2,3],ymm9[4,5],ymm2[6,7]
13488 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm15[0],ymm14[1],ymm15[2,3,4],ymm14[5],ymm15[6,7]
13489 ; AVX2-FCP-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
13490 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3,4],ymm2[5,6],ymm3[7]
13491 ; AVX2-FCP-NEXT: vpermd %ymm2, %ymm0, %ymm2
13492 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3,4],ymm1[5,6,7]
13493 ; AVX2-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
13494 ; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm1 = mem[2,2,2,2]
13495 ; AVX2-FCP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
13496 ; AVX2-FCP-NEXT: vmovdqa 160(%rdi), %ymm15
13497 ; AVX2-FCP-NEXT: vmovdqa 128(%rdi), %ymm14
13498 ; AVX2-FCP-NEXT: vpalignr {{.*#+}} ymm2 = ymm15[12,13,14,15],ymm14[0,1,2,3,4,5,6,7,8,9,10,11],ymm15[28,29,30,31],ymm14[16,17,18,19,20,21,22,23,24,25,26,27]
13499 ; AVX2-FCP-NEXT: vmovdqu %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
13500 ; AVX2-FCP-NEXT: vmovdqu %ymm15, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
13501 ; AVX2-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,1,2,0]
13502 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3,4,5,6],ymm1[7]
13503 ; AVX2-FCP-NEXT: vmovdqa 64(%rdi), %ymm4
13504 ; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
13505 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm12[0,1],ymm4[2,3],ymm12[4,5],ymm4[6,7]
13506 ; AVX2-FCP-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
13507 ; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
13508 ; AVX2-FCP-NEXT: vpblendd $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
13509 ; AVX2-FCP-NEXT: # ymm3 = ymm3[0],mem[1],ymm3[2,3,4],mem[5],ymm3[6,7]
13510 ; AVX2-FCP-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
13511 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3,4],ymm2[5,6],ymm3[7]
13512 ; AVX2-FCP-NEXT: vpermd %ymm2, %ymm0, %ymm0
13513 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
13514 ; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
13515 ; AVX2-FCP-NEXT: vmovdqa 304(%rdi), %xmm0
13516 ; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
13517 ; AVX2-FCP-NEXT: vpalignr {{.*#+}} ymm1 = ymm13[8,9,10,11,12,13,14,15],ymm1[0,1,2,3,4,5,6,7],ymm13[24,25,26,27,28,29,30,31],ymm1[16,17,18,19,20,21,22,23]
13518 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5,6,7]
13519 ; AVX2-FCP-NEXT: vpbroadcastd 232(%rdi), %xmm1
13520 ; AVX2-FCP-NEXT: vmovdqa 256(%rdi), %xmm5
13521 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm5[1],xmm1[2,3]
13522 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5,6,7]
13523 ; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
13524 ; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
13525 ; AVX2-FCP-NEXT: vpunpcklqdq {{.*#+}} ymm1 = ymm10[0],ymm9[0],ymm10[2],ymm9[2]
13526 ; AVX2-FCP-NEXT: vpbroadcastd 428(%rdi), %ymm2
13527 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm2[7]
13528 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
13529 ; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
13530 ; AVX2-FCP-NEXT: vmovdqa 752(%rdi), %xmm0
13531 ; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
13532 ; AVX2-FCP-NEXT: vpalignr {{.*#+}} ymm1 = ymm6[8,9,10,11,12,13,14,15],ymm1[0,1,2,3,4,5,6,7],ymm6[24,25,26,27,28,29,30,31],ymm1[16,17,18,19,20,21,22,23]
13533 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5,6,7]
13534 ; AVX2-FCP-NEXT: vpbroadcastd 680(%rdi), %xmm1
13535 ; AVX2-FCP-NEXT: vmovdqa 704(%rdi), %xmm2
13536 ; AVX2-FCP-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
13537 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2,3]
13538 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5,6,7]
13539 ; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
13540 ; AVX2-FCP-NEXT: vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm1 # 32-byte Folded Reload
13541 ; AVX2-FCP-NEXT: # ymm1 = ymm3[0],mem[0],ymm3[2],mem[2]
13542 ; AVX2-FCP-NEXT: vpbroadcastd 876(%rdi), %ymm2
13543 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm2[7]
13544 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
13545 ; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
13546 ; AVX2-FCP-NEXT: vmovdqa 1200(%rdi), %xmm0
13547 ; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
13548 ; AVX2-FCP-NEXT: vpalignr {{.*#+}} ymm1 = ymm8[8,9,10,11,12,13,14,15],ymm1[0,1,2,3,4,5,6,7],ymm8[24,25,26,27,28,29,30,31],ymm1[16,17,18,19,20,21,22,23]
13549 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5,6,7]
13550 ; AVX2-FCP-NEXT: vpbroadcastd 1128(%rdi), %xmm1
13551 ; AVX2-FCP-NEXT: vmovdqa 1152(%rdi), %xmm2
13552 ; AVX2-FCP-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
13553 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2,3]
13554 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5,6,7]
13555 ; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
13556 ; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
13557 ; AVX2-FCP-NEXT: vpunpcklqdq {{.*#+}} ymm1 = ymm1[0],ymm8[0],ymm1[2],ymm8[2]
13558 ; AVX2-FCP-NEXT: vpbroadcastd 1324(%rdi), %ymm2
13559 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm2[7]
13560 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
13561 ; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
13562 ; AVX2-FCP-NEXT: vmovdqa 1648(%rdi), %xmm0
13563 ; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
13564 ; AVX2-FCP-NEXT: vpalignr {{.*#+}} ymm1 = ymm7[8,9,10,11,12,13,14,15],ymm1[0,1,2,3,4,5,6,7],ymm7[24,25,26,27,28,29,30,31],ymm1[16,17,18,19,20,21,22,23]
13565 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5,6,7]
13566 ; AVX2-FCP-NEXT: vpbroadcastd 1576(%rdi), %xmm1
13567 ; AVX2-FCP-NEXT: vmovdqa 1600(%rdi), %xmm2
13568 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2,3]
13569 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5,6,7]
13570 ; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
13571 ; AVX2-FCP-NEXT: vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
13572 ; AVX2-FCP-NEXT: # ymm1 = ymm1[0],mem[0],ymm1[2],mem[2]
13573 ; AVX2-FCP-NEXT: vpbroadcastd 1772(%rdi), %ymm6
13574 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm6[7]
13575 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
13576 ; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
13577 ; AVX2-FCP-NEXT: vmovdqa 80(%rdi), %xmm0
13578 ; AVX2-FCP-NEXT: vpalignr {{.*#+}} ymm1 = ymm12[8,9,10,11,12,13,14,15],ymm4[0,1,2,3,4,5,6,7],ymm12[24,25,26,27,28,29,30,31],ymm4[16,17,18,19,20,21,22,23]
13579 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5,6,7]
13580 ; AVX2-FCP-NEXT: vpbroadcastd 8(%rdi), %xmm1
13581 ; AVX2-FCP-NEXT: vmovdqa 32(%rdi), %xmm4
13582 ; AVX2-FCP-NEXT: vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
13583 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm4[1],xmm1[2,3]
13584 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5,6,7]
13585 ; AVX2-FCP-NEXT: vpunpcklqdq {{.*#+}} ymm1 = ymm14[0],ymm15[0],ymm14[2],ymm15[2]
13586 ; AVX2-FCP-NEXT: vpbroadcastd 204(%rdi), %ymm6
13587 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm6[7]
13588 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
13589 ; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
13590 ; AVX2-FCP-NEXT: vmovdqa 528(%rdi), %xmm0
13591 ; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
13592 ; AVX2-FCP-NEXT: vpalignr {{.*#+}} ymm1 = ymm11[8,9,10,11,12,13,14,15],ymm1[0,1,2,3,4,5,6,7],ymm11[24,25,26,27,28,29,30,31],ymm1[16,17,18,19,20,21,22,23]
13593 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5,6,7]
13594 ; AVX2-FCP-NEXT: vpbroadcastd 456(%rdi), %xmm1
13595 ; AVX2-FCP-NEXT: vmovdqa 480(%rdi), %xmm4
13596 ; AVX2-FCP-NEXT: vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
13597 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm4[1],xmm1[2,3]
13598 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5,6,7]
13599 ; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
13600 ; AVX2-FCP-NEXT: vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
13601 ; AVX2-FCP-NEXT: # ymm1 = ymm1[0],mem[0],ymm1[2],mem[2]
13602 ; AVX2-FCP-NEXT: vpbroadcastd 652(%rdi), %ymm15
13603 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm15[7]
13604 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
13605 ; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
13606 ; AVX2-FCP-NEXT: vmovdqa 976(%rdi), %xmm0
13607 ; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
13608 ; AVX2-FCP-NEXT: vpalignr $8, (%rsp), %ymm1, %ymm1 # 32-byte Folded Reload
13609 ; AVX2-FCP-NEXT: # ymm1 = mem[8,9,10,11,12,13,14,15],ymm1[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm1[16,17,18,19,20,21,22,23]
13610 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5,6,7]
13611 ; AVX2-FCP-NEXT: vpbroadcastd 904(%rdi), %xmm15
13612 ; AVX2-FCP-NEXT: vmovdqa 928(%rdi), %xmm12
13613 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm15 = xmm15[0],xmm12[1],xmm15[2,3]
13614 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm15[0,1],ymm0[2,3,4,5,6,7]
13615 ; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
13616 ; AVX2-FCP-NEXT: vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm15 # 32-byte Folded Reload
13617 ; AVX2-FCP-NEXT: # ymm15 = ymm1[0],mem[0],ymm1[2],mem[2]
13618 ; AVX2-FCP-NEXT: vpbroadcastd 1100(%rdi), %ymm14
13619 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm14 = ymm15[0,1,2,3,4,5,6],ymm14[7]
13620 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm14[5,6,7]
13621 ; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
13622 ; AVX2-FCP-NEXT: vmovdqa 1424(%rdi), %xmm0
13623 ; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
13624 ; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
13625 ; AVX2-FCP-NEXT: vpalignr {{.*#+}} ymm14 = ymm7[8,9,10,11,12,13,14,15],ymm6[0,1,2,3,4,5,6,7],ymm7[24,25,26,27,28,29,30,31],ymm6[16,17,18,19,20,21,22,23]
13626 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm14 = ymm14[0,1,2],ymm0[3],ymm14[4,5,6,7]
13627 ; AVX2-FCP-NEXT: vpbroadcastd 1352(%rdi), %xmm15
13628 ; AVX2-FCP-NEXT: vmovdqa 1376(%rdi), %xmm0
13629 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm15 = xmm15[0],xmm0[1],xmm15[2,3]
13630 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm14 = ymm15[0,1],ymm14[2,3,4,5,6,7]
13631 ; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
13632 ; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
13633 ; AVX2-FCP-NEXT: vpunpcklqdq {{.*#+}} ymm15 = ymm11[0],ymm1[0],ymm11[2],ymm1[2]
13634 ; AVX2-FCP-NEXT: vpbroadcastd 1548(%rdi), %ymm13
13635 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm13 = ymm15[0,1,2,3,4,5,6],ymm13[7]
13636 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm13 = ymm14[0,1,2,3,4],ymm13[5,6,7]
13637 ; AVX2-FCP-NEXT: vmovdqu %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
13638 ; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
13639 ; AVX2-FCP-NEXT: vpblendd $2, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm13 # 32-byte Folded Reload
13640 ; AVX2-FCP-NEXT: # ymm13 = ymm4[0],mem[1],ymm4[2,3,4,5,6,7]
13641 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm5 = xmm5[0,1,2],mem[3]
13642 ; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm5 = xmm5[3,2,2,3]
13643 ; AVX2-FCP-NEXT: vpshufd {{.*#+}} ymm13 = ymm13[3,1,1,0,7,5,5,4]
13644 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm5 = ymm5[0,1],ymm13[2,3,4,5,6,7]
13645 ; AVX2-FCP-NEXT: vshufps {{.*#+}} ymm13 = ymm10[0,2],ymm9[1,3],ymm10[4,6],ymm9[5,7]
13646 ; AVX2-FCP-NEXT: vbroadcastss 432(%rdi), %ymm14
13647 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm13 = ymm13[0,1,2,3,4,5,6],ymm14[7]
13648 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3,4],ymm13[5,6,7]
13649 ; AVX2-FCP-NEXT: vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
13650 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
13651 ; AVX2-FCP-NEXT: vblendps $2, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm5 # 32-byte Folded Reload
13652 ; AVX2-FCP-NEXT: # ymm5 = ymm4[0],mem[1],ymm4[2,3,4,5,6,7]
13653 ; AVX2-FCP-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
13654 ; AVX2-FCP-NEXT: vblendps {{.*#+}} xmm4 = xmm4[0,1,2],mem[3]
13655 ; AVX2-FCP-NEXT: vshufps {{.*#+}} xmm4 = xmm4[3,2,2,3]
13656 ; AVX2-FCP-NEXT: vshufps {{.*#+}} ymm5 = ymm5[3,1,1,0,7,5,5,4]
13657 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm5[2,3,4,5,6,7]
13658 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
13659 ; AVX2-FCP-NEXT: vshufps {{.*#+}} ymm5 = ymm3[0,2],ymm10[1,3],ymm3[4,6],ymm10[5,7]
13660 ; AVX2-FCP-NEXT: vbroadcastss 880(%rdi), %ymm13
13661 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3,4,5,6],ymm13[7]
13662 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0,1,2,3,4],ymm5[5,6,7]
13663 ; AVX2-FCP-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
13664 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
13665 ; AVX2-FCP-NEXT: vblendps $2, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm4 # 32-byte Folded Reload
13666 ; AVX2-FCP-NEXT: # ymm4 = ymm3[0],mem[1],ymm3[2,3,4,5,6,7]
13667 ; AVX2-FCP-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
13668 ; AVX2-FCP-NEXT: vblendps {{.*#+}} xmm3 = xmm3[0,1,2],mem[3]
13669 ; AVX2-FCP-NEXT: vshufps {{.*#+}} xmm3 = xmm3[3,2,2,3]
13670 ; AVX2-FCP-NEXT: vshufps {{.*#+}} ymm4 = ymm4[3,1,1,0,7,5,5,4]
13671 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm4[2,3,4,5,6,7]
13672 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
13673 ; AVX2-FCP-NEXT: vshufps {{.*#+}} ymm4 = ymm14[0,2],ymm8[1,3],ymm14[4,6],ymm8[5,7]
13674 ; AVX2-FCP-NEXT: vmovaps %ymm8, %ymm13
13675 ; AVX2-FCP-NEXT: vbroadcastss 1328(%rdi), %ymm5
13676 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3,4,5,6],ymm5[7]
13677 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3,4],ymm4[5,6,7]
13678 ; AVX2-FCP-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
13679 ; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
13680 ; AVX2-FCP-NEXT: vpblendd $2, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
13681 ; AVX2-FCP-NEXT: # ymm3 = ymm3[0],mem[1],ymm3[2,3,4,5,6,7]
13682 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm2 = xmm2[0,1,2],mem[3]
13683 ; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[3,2,2,3]
13684 ; AVX2-FCP-NEXT: vpshufd {{.*#+}} ymm3 = ymm3[3,1,1,0,7,5,5,4]
13685 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1],ymm3[2,3,4,5,6,7]
13686 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
13687 ; AVX2-FCP-NEXT: vshufps $216, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
13688 ; AVX2-FCP-NEXT: # ymm3 = ymm3[0,2],mem[1,3],ymm3[4,6],mem[5,7]
13689 ; AVX2-FCP-NEXT: vbroadcastss 1776(%rdi), %ymm4
13690 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5,6],ymm4[7]
13691 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm3[5,6,7]
13692 ; AVX2-FCP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
13693 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm7[0],ymm6[1],ymm7[2,3,4,5,6,7]
13694 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1,2],mem[3]
13695 ; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[3,2,2,3]
13696 ; AVX2-FCP-NEXT: vpshufd {{.*#+}} ymm2 = ymm2[3,1,1,0,7,5,5,4]
13697 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm2[2,3,4,5,6,7]
13698 ; AVX2-FCP-NEXT: vshufps {{.*#+}} ymm2 = ymm11[0,2],ymm1[1,3],ymm11[4,6],ymm1[5,7]
13699 ; AVX2-FCP-NEXT: vbroadcastss 1552(%rdi), %ymm3
13700 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,6],ymm3[7]
13701 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm2[5,6,7]
13702 ; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
13703 ; AVX2-FCP-NEXT: vmovdqu (%rsp), %ymm0 # 32-byte Reload
13704 ; AVX2-FCP-NEXT: vpblendd $2, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
13705 ; AVX2-FCP-NEXT: # ymm0 = ymm0[0],mem[1],ymm0[2,3,4,5,6,7]
13706 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm1 = xmm12[0,1,2],mem[3]
13707 ; AVX2-FCP-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[3,2,2,3]
13708 ; AVX2-FCP-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[3,1,1,0,7,5,5,4]
13709 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5,6,7]
13710 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
13711 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
13712 ; AVX2-FCP-NEXT: vshufps {{.*#+}} ymm1 = ymm11[0,2],ymm8[1,3],ymm11[4,6],ymm8[5,7]
13713 ; AVX2-FCP-NEXT: vbroadcastss 1104(%rdi), %ymm2
13714 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm2[7]
13715 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
13716 ; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
13717 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
13718 ; AVX2-FCP-NEXT: vblendps $2, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
13719 ; AVX2-FCP-NEXT: # ymm0 = ymm0[0],mem[1],ymm0[2,3,4,5,6,7]
13720 ; AVX2-FCP-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
13721 ; AVX2-FCP-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1,2],mem[3]
13722 ; AVX2-FCP-NEXT: vshufps {{.*#+}} xmm1 = xmm1[3,2,2,3]
13723 ; AVX2-FCP-NEXT: vshufps {{.*#+}} ymm0 = ymm0[3,1,1,0,7,5,5,4]
13724 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5,6,7]
13725 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
13726 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
13727 ; AVX2-FCP-NEXT: vshufps {{.*#+}} ymm1 = ymm6[0,2],ymm7[1,3],ymm6[4,6],ymm7[5,7]
13728 ; AVX2-FCP-NEXT: vbroadcastss 656(%rdi), %ymm2
13729 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm2[7]
13730 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
13731 ; AVX2-FCP-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
13732 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
13733 ; AVX2-FCP-NEXT: vblendps $2, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
13734 ; AVX2-FCP-NEXT: # ymm0 = ymm0[0],mem[1],ymm0[2,3,4,5,6,7]
13735 ; AVX2-FCP-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
13736 ; AVX2-FCP-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1,2],mem[3]
13737 ; AVX2-FCP-NEXT: vshufps {{.*#+}} xmm1 = xmm1[3,2,2,3]
13738 ; AVX2-FCP-NEXT: vshufps {{.*#+}} ymm0 = ymm0[3,1,1,0,7,5,5,4]
13739 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5,6,7]
13740 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
13741 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
13742 ; AVX2-FCP-NEXT: vshufps {{.*#+}} ymm1 = ymm4[0,2],ymm12[1,3],ymm4[4,6],ymm12[5,7]
13743 ; AVX2-FCP-NEXT: vbroadcastss 208(%rdi), %ymm2
13744 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm2[7]
13745 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
13746 ; AVX2-FCP-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
13747 ; AVX2-FCP-NEXT: vbroadcastss 100(%rdi), %xmm0
13748 ; AVX2-FCP-NEXT: vmovaps 64(%rdi), %xmm3
13749 ; AVX2-FCP-NEXT: vblendps {{.*#+}} xmm1 = xmm3[0,1,2],xmm0[3]
13750 ; AVX2-FCP-NEXT: vmovsd {{.*#+}} xmm0 = [4,3,0,0]
13751 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
13752 ; AVX2-FCP-NEXT: vblendps $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
13753 ; AVX2-FCP-NEXT: # ymm2 = mem[0,1,2,3],ymm2[4,5,6,7]
13754 ; AVX2-FCP-NEXT: vpermps %ymm2, %ymm0, %ymm2
13755 ; AVX2-FCP-NEXT: vblendps {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3]
13756 ; AVX2-FCP-NEXT: vbroadcastsd {{.*#+}} ymm15 = [0,7,0,7,0,7,0,7]
13757 ; AVX2-FCP-NEXT: vpermps %ymm4, %ymm15, %ymm2
13758 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5],ymm12[6,7]
13759 ; AVX2-FCP-NEXT: vbroadcastss 212(%rdi), %ymm4
13760 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,6],ymm4[7]
13761 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
13762 ; AVX2-FCP-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
13763 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
13764 ; AVX2-FCP-NEXT: vblendps $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
13765 ; AVX2-FCP-NEXT: # ymm1 = mem[0,1,2,3],ymm1[4,5,6,7]
13766 ; AVX2-FCP-NEXT: vpermps %ymm1, %ymm0, %ymm2
13767 ; AVX2-FCP-NEXT: vbroadcastss 324(%rdi), %xmm4
13768 ; AVX2-FCP-NEXT: vmovaps 288(%rdi), %xmm1
13769 ; AVX2-FCP-NEXT: vblendps {{.*#+}} xmm4 = xmm1[0,1,2],xmm4[3]
13770 ; AVX2-FCP-NEXT: vblendps {{.*#+}} xmm2 = xmm2[0,1],xmm4[2,3]
13771 ; AVX2-FCP-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm4 # 32-byte Folded Reload
13772 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3,4,5],ymm9[6,7]
13773 ; AVX2-FCP-NEXT: vbroadcastss 436(%rdi), %ymm5
13774 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3,4,5,6],ymm5[7]
13775 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5,6,7]
13776 ; AVX2-FCP-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
13777 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
13778 ; AVX2-FCP-NEXT: vblendps $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
13779 ; AVX2-FCP-NEXT: # ymm2 = mem[0,1,2,3],ymm2[4,5,6,7]
13780 ; AVX2-FCP-NEXT: vpermps %ymm2, %ymm0, %ymm4
13781 ; AVX2-FCP-NEXT: vbroadcastss 548(%rdi), %xmm5
13782 ; AVX2-FCP-NEXT: vmovaps 512(%rdi), %xmm2
13783 ; AVX2-FCP-NEXT: vblendps {{.*#+}} xmm5 = xmm2[0,1,2],xmm5[3]
13784 ; AVX2-FCP-NEXT: vblendps {{.*#+}} xmm4 = xmm4[0,1],xmm5[2,3]
13785 ; AVX2-FCP-NEXT: vpermps %ymm6, %ymm15, %ymm5
13786 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3,4,5],ymm7[6,7]
13787 ; AVX2-FCP-NEXT: vbroadcastss 660(%rdi), %ymm6
13788 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3,4,5,6],ymm6[7]
13789 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm5[4,5,6,7]
13790 ; AVX2-FCP-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
13791 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
13792 ; AVX2-FCP-NEXT: vblendps $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm4 # 32-byte Folded Reload
13793 ; AVX2-FCP-NEXT: # ymm4 = mem[0,1,2,3],ymm4[4,5,6,7]
13794 ; AVX2-FCP-NEXT: vpermps %ymm4, %ymm0, %ymm5
13795 ; AVX2-FCP-NEXT: vbroadcastss 772(%rdi), %xmm6
13796 ; AVX2-FCP-NEXT: vmovaps 736(%rdi), %xmm4
13797 ; AVX2-FCP-NEXT: vblendps {{.*#+}} xmm6 = xmm4[0,1,2],xmm6[3]
13798 ; AVX2-FCP-NEXT: vblendps {{.*#+}} xmm5 = xmm5[0,1],xmm6[2,3]
13799 ; AVX2-FCP-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm6 # 32-byte Folded Reload
13800 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3,4,5],ymm10[6,7]
13801 ; AVX2-FCP-NEXT: vbroadcastss 884(%rdi), %ymm7
13802 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3,4,5,6],ymm7[7]
13803 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm6[4,5,6,7]
13804 ; AVX2-FCP-NEXT: vmovups %ymm5, (%rsp) # 32-byte Spill
13805 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
13806 ; AVX2-FCP-NEXT: vblendps $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm5 # 32-byte Folded Reload
13807 ; AVX2-FCP-NEXT: # ymm5 = mem[0,1,2,3],ymm5[4,5,6,7]
13808 ; AVX2-FCP-NEXT: vpermps %ymm5, %ymm0, %ymm6
13809 ; AVX2-FCP-NEXT: vbroadcastss 996(%rdi), %xmm7
13810 ; AVX2-FCP-NEXT: vmovaps 960(%rdi), %xmm5
13811 ; AVX2-FCP-NEXT: vblendps {{.*#+}} xmm7 = xmm5[0,1,2],xmm7[3]
13812 ; AVX2-FCP-NEXT: vblendps {{.*#+}} xmm6 = xmm6[0,1],xmm7[2,3]
13813 ; AVX2-FCP-NEXT: vpermps %ymm11, %ymm15, %ymm7
13814 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3,4,5],ymm8[6,7]
13815 ; AVX2-FCP-NEXT: vbroadcastss 1108(%rdi), %ymm8
13816 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3,4,5,6],ymm8[7]
13817 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm7[4,5,6,7]
13818 ; AVX2-FCP-NEXT: vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
13819 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
13820 ; AVX2-FCP-NEXT: vblendps $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm6 # 32-byte Folded Reload
13821 ; AVX2-FCP-NEXT: # ymm6 = mem[0,1,2,3],ymm6[4,5,6,7]
13822 ; AVX2-FCP-NEXT: vpermps %ymm6, %ymm0, %ymm6
13823 ; AVX2-FCP-NEXT: vbroadcastss 1220(%rdi), %xmm7
13824 ; AVX2-FCP-NEXT: vmovaps 1184(%rdi), %xmm10
13825 ; AVX2-FCP-NEXT: vblendps {{.*#+}} xmm7 = xmm10[0,1,2],xmm7[3]
13826 ; AVX2-FCP-NEXT: vblendps {{.*#+}} xmm6 = xmm6[0,1],xmm7[2,3]
13827 ; AVX2-FCP-NEXT: vpermps %ymm14, %ymm15, %ymm7
13828 ; AVX2-FCP-NEXT: vmovaps %ymm13, %ymm11
13829 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3,4,5],ymm13[6,7]
13830 ; AVX2-FCP-NEXT: vbroadcastss 1332(%rdi), %ymm8
13831 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3,4,5,6],ymm8[7]
13832 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm7[4,5,6,7]
13833 ; AVX2-FCP-NEXT: vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
13834 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
13835 ; AVX2-FCP-NEXT: vblendps $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm6 # 32-byte Folded Reload
13836 ; AVX2-FCP-NEXT: # ymm6 = mem[0,1,2,3],ymm6[4,5,6,7]
13837 ; AVX2-FCP-NEXT: vpermps %ymm6, %ymm0, %ymm6
13838 ; AVX2-FCP-NEXT: vbroadcastss 1444(%rdi), %xmm7
13839 ; AVX2-FCP-NEXT: vmovaps 1408(%rdi), %xmm14
13840 ; AVX2-FCP-NEXT: vblendps {{.*#+}} xmm7 = xmm14[0,1,2],xmm7[3]
13841 ; AVX2-FCP-NEXT: vblendps {{.*#+}} xmm6 = xmm6[0,1],xmm7[2,3]
13842 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
13843 ; AVX2-FCP-NEXT: vpermps %ymm13, %ymm15, %ymm7
13844 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
13845 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3,4,5],ymm9[6,7]
13846 ; AVX2-FCP-NEXT: vbroadcastss 1556(%rdi), %ymm8
13847 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3,4,5,6],ymm8[7]
13848 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm7[4,5,6,7]
13849 ; AVX2-FCP-NEXT: vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
13850 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
13851 ; AVX2-FCP-NEXT: vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm6 # 32-byte Folded Reload
13852 ; AVX2-FCP-NEXT: # ymm6 = ymm6[0,1,2,3],mem[4,5,6,7]
13853 ; AVX2-FCP-NEXT: vpermps %ymm6, %ymm0, %ymm6
13854 ; AVX2-FCP-NEXT: vbroadcastss 1668(%rdi), %xmm7
13855 ; AVX2-FCP-NEXT: vmovaps 1632(%rdi), %xmm0
13856 ; AVX2-FCP-NEXT: vblendps {{.*#+}} xmm7 = xmm0[0,1,2],xmm7[3]
13857 ; AVX2-FCP-NEXT: vblendps {{.*#+}} xmm6 = xmm6[0,1],xmm7[2,3]
13858 ; AVX2-FCP-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm7 # 32-byte Folded Reload
13859 ; AVX2-FCP-NEXT: vblendps $192, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm7 # 32-byte Folded Reload
13860 ; AVX2-FCP-NEXT: # ymm7 = ymm7[0,1,2,3,4,5],mem[6,7]
13861 ; AVX2-FCP-NEXT: vbroadcastss 1780(%rdi), %ymm8
13862 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3,4,5,6],ymm8[7]
13863 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm7[4,5,6,7]
13864 ; AVX2-FCP-NEXT: vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
13865 ; AVX2-FCP-NEXT: vblendps $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm6 # 32-byte Folded Reload
13866 ; AVX2-FCP-NEXT: # ymm6 = ymm12[0],mem[1],ymm12[2,3,4],mem[5],ymm12[6,7]
13867 ; AVX2-FCP-NEXT: vmovaps {{.*#+}} ymm12 = [1,0,3,3,1,0,7,7]
13868 ; AVX2-FCP-NEXT: vpermps %ymm6, %ymm12, %ymm6
13869 ; AVX2-FCP-NEXT: vbroadcastss 216(%rdi), %ymm7
13870 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3,4,5,6],ymm7[7]
13871 ; AVX2-FCP-NEXT: vmovaps 96(%rdi), %xmm7
13872 ; AVX2-FCP-NEXT: vblendps {{.*#+}} xmm3 = xmm7[0,1,2],xmm3[3]
13873 ; AVX2-FCP-NEXT: vshufps {{.*#+}} xmm3 = xmm3[0,1,3,2]
13874 ; AVX2-FCP-NEXT: vpermilps $225, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Folded Reload
13875 ; AVX2-FCP-NEXT: # ymm8 = mem[1,0,2,3,5,4,6,7]
13876 ; AVX2-FCP-NEXT: vextractf128 $1, %ymm8, %xmm8
13877 ; AVX2-FCP-NEXT: vblendps {{.*#+}} xmm3 = xmm8[0,1],xmm3[2,3]
13878 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm6[4,5,6,7]
13879 ; AVX2-FCP-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
13880 ; AVX2-FCP-NEXT: vmovaps 320(%rdi), %xmm8
13881 ; AVX2-FCP-NEXT: vblendps {{.*#+}} xmm1 = xmm8[0,1,2],xmm1[3]
13882 ; AVX2-FCP-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,1,3,2]
13883 ; AVX2-FCP-NEXT: vpermilps $225, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Folded Reload
13884 ; AVX2-FCP-NEXT: # ymm3 = mem[1,0,2,3,5,4,6,7]
13885 ; AVX2-FCP-NEXT: vextractf128 $1, %ymm3, %xmm3
13886 ; AVX2-FCP-NEXT: vblendps {{.*#+}} xmm1 = xmm3[0,1],xmm1[2,3]
13887 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
13888 ; AVX2-FCP-NEXT: vblendps $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
13889 ; AVX2-FCP-NEXT: # ymm3 = ymm3[0],mem[1],ymm3[2,3,4],mem[5],ymm3[6,7]
13890 ; AVX2-FCP-NEXT: vpermps %ymm3, %ymm12, %ymm3
13891 ; AVX2-FCP-NEXT: vbroadcastss 440(%rdi), %ymm6
13892 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5,6],ymm6[7]
13893 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm3[4,5,6,7]
13894 ; AVX2-FCP-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
13895 ; AVX2-FCP-NEXT: vmovaps 544(%rdi), %xmm6
13896 ; AVX2-FCP-NEXT: vblendps {{.*#+}} xmm1 = xmm6[0,1,2],xmm2[3]
13897 ; AVX2-FCP-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,1,3,2]
13898 ; AVX2-FCP-NEXT: vpermilps $225, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
13899 ; AVX2-FCP-NEXT: # ymm2 = mem[1,0,2,3,5,4,6,7]
13900 ; AVX2-FCP-NEXT: vextractf128 $1, %ymm2, %xmm2
13901 ; AVX2-FCP-NEXT: vblendps {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3]
13902 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
13903 ; AVX2-FCP-NEXT: vblendps $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
13904 ; AVX2-FCP-NEXT: # ymm2 = ymm2[0],mem[1],ymm2[2,3,4],mem[5],ymm2[6,7]
13905 ; AVX2-FCP-NEXT: vpermps %ymm2, %ymm12, %ymm2
13906 ; AVX2-FCP-NEXT: vbroadcastss 664(%rdi), %ymm3
13907 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,6],ymm3[7]
13908 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
13909 ; AVX2-FCP-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
13910 ; AVX2-FCP-NEXT: vmovaps 768(%rdi), %xmm3
13911 ; AVX2-FCP-NEXT: vblendps {{.*#+}} xmm1 = xmm3[0,1,2],xmm4[3]
13912 ; AVX2-FCP-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,1,3,2]
13913 ; AVX2-FCP-NEXT: vpermilps $225, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
13914 ; AVX2-FCP-NEXT: # ymm2 = mem[1,0,2,3,5,4,6,7]
13915 ; AVX2-FCP-NEXT: vextractf128 $1, %ymm2, %xmm2
13916 ; AVX2-FCP-NEXT: vblendps {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3]
13917 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
13918 ; AVX2-FCP-NEXT: vblendps $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
13919 ; AVX2-FCP-NEXT: # ymm2 = ymm2[0],mem[1],ymm2[2,3,4],mem[5],ymm2[6,7]
13920 ; AVX2-FCP-NEXT: vpermps %ymm2, %ymm12, %ymm2
13921 ; AVX2-FCP-NEXT: vbroadcastss 888(%rdi), %ymm4
13922 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,6],ymm4[7]
13923 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
13924 ; AVX2-FCP-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
13925 ; AVX2-FCP-NEXT: vmovaps 992(%rdi), %xmm1
13926 ; AVX2-FCP-NEXT: vblendps {{.*#+}} xmm2 = xmm1[0,1,2],xmm5[3]
13927 ; AVX2-FCP-NEXT: vshufps {{.*#+}} xmm2 = xmm2[0,1,3,2]
13928 ; AVX2-FCP-NEXT: vpermilps $225, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Folded Reload
13929 ; AVX2-FCP-NEXT: # ymm4 = mem[1,0,2,3,5,4,6,7]
13930 ; AVX2-FCP-NEXT: vextractf128 $1, %ymm4, %xmm4
13931 ; AVX2-FCP-NEXT: vblendps {{.*#+}} xmm2 = xmm4[0,1],xmm2[2,3]
13932 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
13933 ; AVX2-FCP-NEXT: vblendps $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm4 # 32-byte Folded Reload
13934 ; AVX2-FCP-NEXT: # ymm4 = ymm4[0],mem[1],ymm4[2,3,4],mem[5],ymm4[6,7]
13935 ; AVX2-FCP-NEXT: vpermps %ymm4, %ymm12, %ymm4
13936 ; AVX2-FCP-NEXT: vbroadcastss 1112(%rdi), %ymm5
13937 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3,4,5,6],ymm5[7]
13938 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5,6,7]
13939 ; AVX2-FCP-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
13940 ; AVX2-FCP-NEXT: vmovaps 1216(%rdi), %xmm2
13941 ; AVX2-FCP-NEXT: vblendps {{.*#+}} xmm4 = xmm2[0,1,2],xmm10[3]
13942 ; AVX2-FCP-NEXT: vshufps {{.*#+}} xmm4 = xmm4[0,1,3,2]
13943 ; AVX2-FCP-NEXT: vpermilps $225, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload
13944 ; AVX2-FCP-NEXT: # ymm5 = mem[1,0,2,3,5,4,6,7]
13945 ; AVX2-FCP-NEXT: vextractf128 $1, %ymm5, %xmm5
13946 ; AVX2-FCP-NEXT: vblendps {{.*#+}} xmm4 = xmm5[0,1],xmm4[2,3]
13947 ; AVX2-FCP-NEXT: vblendps $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm5 # 32-byte Folded Reload
13948 ; AVX2-FCP-NEXT: # ymm5 = ymm11[0],mem[1],ymm11[2,3,4],mem[5],ymm11[6,7]
13949 ; AVX2-FCP-NEXT: vpermps %ymm5, %ymm12, %ymm5
13950 ; AVX2-FCP-NEXT: vbroadcastss 1336(%rdi), %ymm10
13951 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3,4,5,6],ymm10[7]
13952 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm5 = ymm4[0,1,2,3],ymm5[4,5,6,7]
13953 ; AVX2-FCP-NEXT: vmovaps 1440(%rdi), %xmm4
13954 ; AVX2-FCP-NEXT: vblendps {{.*#+}} xmm10 = xmm4[0,1,2],xmm14[3]
13955 ; AVX2-FCP-NEXT: vshufps {{.*#+}} xmm10 = xmm10[0,1,3,2]
13956 ; AVX2-FCP-NEXT: vpermilps $225, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Folded Reload
13957 ; AVX2-FCP-NEXT: # ymm14 = mem[1,0,2,3,5,4,6,7]
13958 ; AVX2-FCP-NEXT: vextractf128 $1, %ymm14, %xmm14
13959 ; AVX2-FCP-NEXT: vblendps {{.*#+}} xmm10 = xmm14[0,1],xmm10[2,3]
13960 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm14 = ymm9[0],ymm13[1],ymm9[2,3,4],ymm13[5],ymm9[6,7]
13961 ; AVX2-FCP-NEXT: vpermps %ymm14, %ymm12, %ymm14
13962 ; AVX2-FCP-NEXT: vbroadcastss 1560(%rdi), %ymm11
13963 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm11 = ymm14[0,1,2,3,4,5,6],ymm11[7]
13964 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0,1,2,3],ymm11[4,5,6,7]
13965 ; AVX2-FCP-NEXT: vmovaps 1664(%rdi), %xmm14
13966 ; AVX2-FCP-NEXT: vblendps {{.*#+}} xmm0 = xmm14[0,1,2],xmm0[3]
13967 ; AVX2-FCP-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,1,3,2]
13968 ; AVX2-FCP-NEXT: vpermilps $225, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Folded Reload
13969 ; AVX2-FCP-NEXT: # ymm11 = mem[1,0,2,3,5,4,6,7]
13970 ; AVX2-FCP-NEXT: vextractf128 $1, %ymm11, %xmm11
13971 ; AVX2-FCP-NEXT: vblendps {{.*#+}} xmm0 = xmm11[0,1],xmm0[2,3]
13972 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
13973 ; AVX2-FCP-NEXT: vblendps $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm11 # 32-byte Folded Reload
13974 ; AVX2-FCP-NEXT: # ymm11 = ymm9[0],mem[1],ymm9[2,3,4],mem[5],ymm9[6,7]
13975 ; AVX2-FCP-NEXT: vpermps %ymm11, %ymm12, %ymm11
13976 ; AVX2-FCP-NEXT: vbroadcastss 1784(%rdi), %ymm12
13977 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm11 = ymm11[0,1,2,3,4,5,6],ymm12[7]
13978 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm12 = ymm0[0,1,2,3],ymm11[4,5,6,7]
13979 ; AVX2-FCP-NEXT: vbroadcastss 136(%rdi), %xmm0
13980 ; AVX2-FCP-NEXT: vblendps $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
13981 ; AVX2-FCP-NEXT: # xmm0 = xmm0[0],mem[1],xmm0[2,3]
13982 ; AVX2-FCP-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
13983 ; AVX2-FCP-NEXT: vpermps 192(%rdi), %ymm15, %ymm11
13984 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm11[6,7]
13985 ; AVX2-FCP-NEXT: vbroadcastss 80(%rdi), %ymm11
13986 ; AVX2-FCP-NEXT: vblendps {{.*#+}} xmm7 = xmm11[0,1,2],xmm7[3]
13987 ; AVX2-FCP-NEXT: vpermilps $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Folded Reload
13988 ; AVX2-FCP-NEXT: # ymm11 = mem[2,3,2,3,6,7,6,7]
13989 ; AVX2-FCP-NEXT: vblendps $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm11 # 32-byte Folded Reload
13990 ; AVX2-FCP-NEXT: # ymm11 = ymm11[0],mem[1],ymm11[2,3,4],mem[5],ymm11[6,7]
13991 ; AVX2-FCP-NEXT: vextractf128 $1, %ymm11, %xmm11
13992 ; AVX2-FCP-NEXT: vblendps {{.*#+}} xmm7 = xmm11[0,1],xmm7[2,3]
13993 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm0[4,5,6,7]
13994 ; AVX2-FCP-NEXT: vbroadcastss 360(%rdi), %xmm0
13995 ; AVX2-FCP-NEXT: vblendps $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
13996 ; AVX2-FCP-NEXT: # xmm0 = xmm0[0],mem[1],xmm0[2,3]
13997 ; AVX2-FCP-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
13998 ; AVX2-FCP-NEXT: vpermps 416(%rdi), %ymm15, %ymm11
13999 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm11[6,7]
14000 ; AVX2-FCP-NEXT: vbroadcastss 304(%rdi), %ymm11
14001 ; AVX2-FCP-NEXT: vblendps {{.*#+}} xmm11 = xmm11[0,1,2],xmm8[3]
14002 ; AVX2-FCP-NEXT: vpermilps $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Folded Reload
14003 ; AVX2-FCP-NEXT: # ymm13 = mem[2,3,2,3,6,7,6,7]
14004 ; AVX2-FCP-NEXT: vblendps $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm13 # 32-byte Folded Reload
14005 ; AVX2-FCP-NEXT: # ymm13 = ymm13[0],mem[1],ymm13[2,3,4],mem[5],ymm13[6,7]
14006 ; AVX2-FCP-NEXT: vextractf128 $1, %ymm13, %xmm13
14007 ; AVX2-FCP-NEXT: vblendps {{.*#+}} xmm11 = xmm13[0,1],xmm11[2,3]
14008 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm13 = ymm11[0,1,2,3],ymm0[4,5,6,7]
14009 ; AVX2-FCP-NEXT: vbroadcastss 584(%rdi), %xmm0
14010 ; AVX2-FCP-NEXT: vblendps $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
14011 ; AVX2-FCP-NEXT: # xmm0 = xmm0[0],mem[1],xmm0[2,3]
14012 ; AVX2-FCP-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
14013 ; AVX2-FCP-NEXT: vpermps 640(%rdi), %ymm15, %ymm11
14014 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm11[6,7]
14015 ; AVX2-FCP-NEXT: vbroadcastss 528(%rdi), %ymm11
14016 ; AVX2-FCP-NEXT: vblendps {{.*#+}} xmm8 = xmm11[0,1,2],xmm6[3]
14017 ; AVX2-FCP-NEXT: vpermilps $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Folded Reload
14018 ; AVX2-FCP-NEXT: # ymm11 = mem[2,3,2,3,6,7,6,7]
14019 ; AVX2-FCP-NEXT: vblendps $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm11 # 32-byte Folded Reload
14020 ; AVX2-FCP-NEXT: # ymm11 = ymm11[0],mem[1],ymm11[2,3,4],mem[5],ymm11[6,7]
14021 ; AVX2-FCP-NEXT: vextractf128 $1, %ymm11, %xmm11
14022 ; AVX2-FCP-NEXT: vblendps {{.*#+}} xmm8 = xmm11[0,1],xmm8[2,3]
14023 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm0[4,5,6,7]
14024 ; AVX2-FCP-NEXT: vbroadcastss 808(%rdi), %xmm0
14025 ; AVX2-FCP-NEXT: vblendps $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
14026 ; AVX2-FCP-NEXT: # xmm0 = xmm0[0],mem[1],xmm0[2,3]
14027 ; AVX2-FCP-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
14028 ; AVX2-FCP-NEXT: vpermps 864(%rdi), %ymm15, %ymm11
14029 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm11[6,7]
14030 ; AVX2-FCP-NEXT: vbroadcastss 752(%rdi), %ymm11
14031 ; AVX2-FCP-NEXT: vblendps {{.*#+}} xmm3 = xmm11[0,1,2],xmm3[3]
14032 ; AVX2-FCP-NEXT: vpermilps $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Folded Reload
14033 ; AVX2-FCP-NEXT: # ymm11 = mem[2,3,2,3,6,7,6,7]
14034 ; AVX2-FCP-NEXT: vblendps $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm11 # 32-byte Folded Reload
14035 ; AVX2-FCP-NEXT: # ymm11 = ymm11[0],mem[1],ymm11[2,3,4],mem[5],ymm11[6,7]
14036 ; AVX2-FCP-NEXT: vextractf128 $1, %ymm11, %xmm11
14037 ; AVX2-FCP-NEXT: vblendps {{.*#+}} xmm3 = xmm11[0,1],xmm3[2,3]
14038 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm0[4,5,6,7]
14039 ; AVX2-FCP-NEXT: vbroadcastss 1032(%rdi), %xmm0
14040 ; AVX2-FCP-NEXT: vblendps $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
14041 ; AVX2-FCP-NEXT: # xmm0 = xmm0[0],mem[1],xmm0[2,3]
14042 ; AVX2-FCP-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
14043 ; AVX2-FCP-NEXT: vpermps 1088(%rdi), %ymm15, %ymm11
14044 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm11[6,7]
14045 ; AVX2-FCP-NEXT: vbroadcastss 976(%rdi), %ymm11
14046 ; AVX2-FCP-NEXT: vblendps {{.*#+}} xmm1 = xmm11[0,1,2],xmm1[3]
14047 ; AVX2-FCP-NEXT: vpermilps $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Folded Reload
14048 ; AVX2-FCP-NEXT: # ymm11 = mem[2,3,2,3,6,7,6,7]
14049 ; AVX2-FCP-NEXT: vblendps $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm11 # 32-byte Folded Reload
14050 ; AVX2-FCP-NEXT: # ymm11 = ymm11[0],mem[1],ymm11[2,3,4],mem[5],ymm11[6,7]
14051 ; AVX2-FCP-NEXT: vextractf128 $1, %ymm11, %xmm11
14052 ; AVX2-FCP-NEXT: vblendps {{.*#+}} xmm1 = xmm11[0,1],xmm1[2,3]
14053 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
14054 ; AVX2-FCP-NEXT: vbroadcastss 1256(%rdi), %xmm1
14055 ; AVX2-FCP-NEXT: vblendps $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
14056 ; AVX2-FCP-NEXT: # xmm1 = xmm1[0],mem[1],xmm1[2,3]
14057 ; AVX2-FCP-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
14058 ; AVX2-FCP-NEXT: vpermps 1312(%rdi), %ymm15, %ymm11
14059 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm11[6,7]
14060 ; AVX2-FCP-NEXT: vbroadcastss 1200(%rdi), %ymm11
14061 ; AVX2-FCP-NEXT: vblendps {{.*#+}} xmm2 = xmm11[0,1,2],xmm2[3]
14062 ; AVX2-FCP-NEXT: vpermilps $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Folded Reload
14063 ; AVX2-FCP-NEXT: # ymm11 = mem[2,3,2,3,6,7,6,7]
14064 ; AVX2-FCP-NEXT: vblendps $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm11 # 32-byte Folded Reload
14065 ; AVX2-FCP-NEXT: # ymm11 = ymm11[0],mem[1],ymm11[2,3,4],mem[5],ymm11[6,7]
14066 ; AVX2-FCP-NEXT: vextractf128 $1, %ymm11, %xmm11
14067 ; AVX2-FCP-NEXT: vblendps {{.*#+}} xmm2 = xmm11[0,1],xmm2[2,3]
14068 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
14069 ; AVX2-FCP-NEXT: vbroadcastss 1480(%rdi), %xmm2
14070 ; AVX2-FCP-NEXT: vblendps $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm2 # 16-byte Folded Reload
14071 ; AVX2-FCP-NEXT: # xmm2 = xmm2[0],mem[1],xmm2[2,3]
14072 ; AVX2-FCP-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2
14073 ; AVX2-FCP-NEXT: vpermps 1536(%rdi), %ymm15, %ymm11
14074 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5],ymm11[6,7]
14075 ; AVX2-FCP-NEXT: vbroadcastss 1424(%rdi), %ymm11
14076 ; AVX2-FCP-NEXT: vblendps {{.*#+}} xmm4 = xmm11[0,1,2],xmm4[3]
14077 ; AVX2-FCP-NEXT: vpermilps $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Folded Reload
14078 ; AVX2-FCP-NEXT: # ymm11 = mem[2,3,2,3,6,7,6,7]
14079 ; AVX2-FCP-NEXT: vblendps $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm11 # 32-byte Folded Reload
14080 ; AVX2-FCP-NEXT: # ymm11 = ymm11[0],mem[1],ymm11[2,3,4],mem[5],ymm11[6,7]
14081 ; AVX2-FCP-NEXT: vextractf128 $1, %ymm11, %xmm11
14082 ; AVX2-FCP-NEXT: vblendps {{.*#+}} xmm4 = xmm11[0,1],xmm4[2,3]
14083 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm2 = ymm4[0,1,2,3],ymm2[4,5,6,7]
14084 ; AVX2-FCP-NEXT: vbroadcastss 1704(%rdi), %xmm4
14085 ; AVX2-FCP-NEXT: vblendps $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm4 # 16-byte Folded Reload
14086 ; AVX2-FCP-NEXT: # xmm4 = xmm4[0],mem[1],xmm4[2,3]
14087 ; AVX2-FCP-NEXT: vpermps 1760(%rdi), %ymm15, %ymm11
14088 ; AVX2-FCP-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm4
14089 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3,4,5],ymm11[6,7]
14090 ; AVX2-FCP-NEXT: vbroadcastss 1648(%rdi), %ymm11
14091 ; AVX2-FCP-NEXT: vblendps {{.*#+}} xmm11 = xmm11[0,1,2],xmm14[3]
14092 ; AVX2-FCP-NEXT: vpermilps $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Folded Reload
14093 ; AVX2-FCP-NEXT: # ymm14 = mem[2,3,2,3,6,7,6,7]
14094 ; AVX2-FCP-NEXT: vblendps $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
14095 ; AVX2-FCP-NEXT: # ymm14 = ymm14[0],mem[1],ymm14[2,3,4],mem[5],ymm14[6,7]
14096 ; AVX2-FCP-NEXT: vextractf128 $1, %ymm14, %xmm14
14097 ; AVX2-FCP-NEXT: vblendps {{.*#+}} xmm11 = xmm14[0,1],xmm11[2,3]
14098 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm4 = ymm11[0,1,2,3],ymm4[4,5,6,7]
14099 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
14100 ; AVX2-FCP-NEXT: vmovaps %ymm11, 192(%rsi)
14101 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
14102 ; AVX2-FCP-NEXT: vmovaps %ymm11, 128(%rsi)
14103 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
14104 ; AVX2-FCP-NEXT: vmovaps %ymm11, 64(%rsi)
14105 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
14106 ; AVX2-FCP-NEXT: vmovaps %ymm11, (%rsi)
14107 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
14108 ; AVX2-FCP-NEXT: vmovaps %ymm11, 224(%rsi)
14109 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
14110 ; AVX2-FCP-NEXT: vmovaps %ymm11, 160(%rsi)
14111 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
14112 ; AVX2-FCP-NEXT: vmovaps %ymm11, 96(%rsi)
14113 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
14114 ; AVX2-FCP-NEXT: vmovaps %ymm11, 32(%rsi)
14115 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
14116 ; AVX2-FCP-NEXT: vmovaps %ymm9, 192(%rdx)
14117 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
14118 ; AVX2-FCP-NEXT: vmovaps %ymm9, 128(%rdx)
14119 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
14120 ; AVX2-FCP-NEXT: vmovaps %ymm9, 64(%rdx)
14121 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
14122 ; AVX2-FCP-NEXT: vmovaps %ymm9, (%rdx)
14123 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
14124 ; AVX2-FCP-NEXT: vmovaps %ymm9, 224(%rdx)
14125 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
14126 ; AVX2-FCP-NEXT: vmovaps %ymm11, 160(%rdx)
14127 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
14128 ; AVX2-FCP-NEXT: vmovaps %ymm11, 96(%rdx)
14129 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
14130 ; AVX2-FCP-NEXT: vmovaps %ymm11, 32(%rdx)
14131 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
14132 ; AVX2-FCP-NEXT: vmovaps %ymm9, 192(%rcx)
14133 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
14134 ; AVX2-FCP-NEXT: vmovaps %ymm9, 128(%rcx)
14135 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
14136 ; AVX2-FCP-NEXT: vmovaps %ymm9, 64(%rcx)
14137 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
14138 ; AVX2-FCP-NEXT: vmovaps %ymm9, (%rcx)
14139 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
14140 ; AVX2-FCP-NEXT: vmovaps %ymm9, 224(%rcx)
14141 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
14142 ; AVX2-FCP-NEXT: vmovaps %ymm9, 160(%rcx)
14143 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
14144 ; AVX2-FCP-NEXT: vmovaps %ymm9, 96(%rcx)
14145 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
14146 ; AVX2-FCP-NEXT: vmovaps %ymm9, 32(%rcx)
14147 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
14148 ; AVX2-FCP-NEXT: vmovaps %ymm9, (%r8)
14149 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
14150 ; AVX2-FCP-NEXT: vmovaps %ymm9, 64(%r8)
14151 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
14152 ; AVX2-FCP-NEXT: vmovaps %ymm9, 128(%r8)
14153 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
14154 ; AVX2-FCP-NEXT: vmovaps %ymm9, 192(%r8)
14155 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
14156 ; AVX2-FCP-NEXT: vmovaps %ymm9, 224(%r8)
14157 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
14158 ; AVX2-FCP-NEXT: vmovaps %ymm9, 160(%r8)
14159 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
14160 ; AVX2-FCP-NEXT: vmovaps %ymm9, 96(%r8)
14161 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
14162 ; AVX2-FCP-NEXT: vmovaps %ymm9, 32(%r8)
14163 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
14164 ; AVX2-FCP-NEXT: vmovaps %ymm9, 224(%r9)
14165 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
14166 ; AVX2-FCP-NEXT: vmovaps %ymm9, 192(%r9)
14167 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
14168 ; AVX2-FCP-NEXT: vmovaps %ymm9, 160(%r9)
14169 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
14170 ; AVX2-FCP-NEXT: vmovaps %ymm9, 128(%r9)
14171 ; AVX2-FCP-NEXT: vmovups (%rsp), %ymm9 # 32-byte Reload
14172 ; AVX2-FCP-NEXT: vmovaps %ymm9, 96(%r9)
14173 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
14174 ; AVX2-FCP-NEXT: vmovaps %ymm9, 64(%r9)
14175 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
14176 ; AVX2-FCP-NEXT: vmovaps %ymm9, 32(%r9)
14177 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
14178 ; AVX2-FCP-NEXT: vmovaps %ymm9, (%r9)
14179 ; AVX2-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
14180 ; AVX2-FCP-NEXT: vmovaps %ymm12, 224(%rax)
14181 ; AVX2-FCP-NEXT: vmovaps %ymm10, 192(%rax)
14182 ; AVX2-FCP-NEXT: vmovaps %ymm5, 160(%rax)
14183 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
14184 ; AVX2-FCP-NEXT: vmovaps %ymm5, 128(%rax)
14185 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
14186 ; AVX2-FCP-NEXT: vmovaps %ymm5, 96(%rax)
14187 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
14188 ; AVX2-FCP-NEXT: vmovaps %ymm5, 64(%rax)
14189 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
14190 ; AVX2-FCP-NEXT: vmovaps %ymm5, 32(%rax)
14191 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
14192 ; AVX2-FCP-NEXT: vmovaps %ymm5, (%rax)
14193 ; AVX2-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
14194 ; AVX2-FCP-NEXT: vmovaps %ymm4, 224(%rax)
14195 ; AVX2-FCP-NEXT: vmovaps %ymm2, 192(%rax)
14196 ; AVX2-FCP-NEXT: vmovaps %ymm1, 160(%rax)
14197 ; AVX2-FCP-NEXT: vmovaps %ymm0, 128(%rax)
14198 ; AVX2-FCP-NEXT: vmovaps %ymm3, 96(%rax)
14199 ; AVX2-FCP-NEXT: vmovaps %ymm8, 64(%rax)
14200 ; AVX2-FCP-NEXT: vmovaps %ymm13, 32(%rax)
14201 ; AVX2-FCP-NEXT: vmovaps %ymm7, (%rax)
14202 ; AVX2-FCP-NEXT: addq $2648, %rsp # imm = 0xA58
14203 ; AVX2-FCP-NEXT: vzeroupper
14204 ; AVX2-FCP-NEXT: retq
14206 ; AVX512-LABEL: load_i32_stride7_vf64:
14208 ; AVX512-NEXT: subq $3400, %rsp # imm = 0xD48
14209 ; AVX512-NEXT: vmovdqa64 1728(%rdi), %zmm2
14210 ; AVX512-NEXT: vmovdqa64 1664(%rdi), %zmm17
14211 ; AVX512-NEXT: vmovdqa64 1600(%rdi), %zmm11
14212 ; AVX512-NEXT: vmovdqa64 1280(%rdi), %zmm7
14213 ; AVX512-NEXT: vmovdqa64 1216(%rdi), %zmm5
14214 ; AVX512-NEXT: vmovdqa64 1152(%rdi), %zmm12
14215 ; AVX512-NEXT: vmovdqa64 832(%rdi), %zmm6
14216 ; AVX512-NEXT: vmovdqa64 768(%rdi), %zmm8
14217 ; AVX512-NEXT: vmovdqa64 704(%rdi), %zmm13
14218 ; AVX512-NEXT: vmovdqa64 384(%rdi), %zmm20
14219 ; AVX512-NEXT: vmovdqa64 320(%rdi), %zmm4
14220 ; AVX512-NEXT: vmovdqa64 256(%rdi), %zmm14
14221 ; AVX512-NEXT: vbroadcasti32x4 {{.*#+}} zmm1 = [20,27,6,13,20,27,6,13,20,27,6,13,20,27,6,13]
14222 ; AVX512-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
14223 ; AVX512-NEXT: vmovdqa64 %zmm14, %zmm3
14224 ; AVX512-NEXT: vpermt2d %zmm4, %zmm1, %zmm3
14225 ; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm0 = [0,0,10,11,12,13,18,25,0,0,10,11,12,13,18,25]
14226 ; AVX512-NEXT: # zmm0 = mem[0,1,2,3,0,1,2,3]
14227 ; AVX512-NEXT: vpermt2d %zmm20, %zmm0, %zmm3
14228 ; AVX512-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14229 ; AVX512-NEXT: vmovdqa64 %zmm13, %zmm3
14230 ; AVX512-NEXT: vpermt2d %zmm8, %zmm1, %zmm3
14231 ; AVX512-NEXT: vpermt2d %zmm6, %zmm0, %zmm3
14232 ; AVX512-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14233 ; AVX512-NEXT: vmovdqa64 %zmm12, %zmm3
14234 ; AVX512-NEXT: vpermt2d %zmm5, %zmm1, %zmm3
14235 ; AVX512-NEXT: vpermt2d %zmm7, %zmm0, %zmm3
14236 ; AVX512-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14237 ; AVX512-NEXT: vpermi2d %zmm17, %zmm11, %zmm1
14238 ; AVX512-NEXT: vpermt2d %zmm2, %zmm0, %zmm1
14239 ; AVX512-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14240 ; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm1 = [0,0,7,14,21,28,0,0,0,0,7,14,21,28,0,0]
14241 ; AVX512-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3]
14242 ; AVX512-NEXT: vmovdqa64 %zmm12, %zmm3
14243 ; AVX512-NEXT: vpermt2d %zmm5, %zmm1, %zmm3
14244 ; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm0 = [0,9,10,11,12,13,19,26,0,9,10,11,12,13,19,26]
14245 ; AVX512-NEXT: # zmm0 = mem[0,1,2,3,0,1,2,3]
14246 ; AVX512-NEXT: vpermt2d %zmm7, %zmm0, %zmm3
14247 ; AVX512-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14248 ; AVX512-NEXT: vmovdqa64 %zmm13, %zmm3
14249 ; AVX512-NEXT: vpermt2d %zmm8, %zmm1, %zmm3
14250 ; AVX512-NEXT: vpermt2d %zmm6, %zmm0, %zmm3
14251 ; AVX512-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14252 ; AVX512-NEXT: vmovdqa64 %zmm14, %zmm3
14253 ; AVX512-NEXT: vpermt2d %zmm4, %zmm1, %zmm3
14254 ; AVX512-NEXT: vpermt2d %zmm20, %zmm0, %zmm3
14255 ; AVX512-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14256 ; AVX512-NEXT: vpermi2d %zmm17, %zmm11, %zmm1
14257 ; AVX512-NEXT: vpermt2d %zmm2, %zmm0, %zmm1
14258 ; AVX512-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14259 ; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm1 = [0,1,8,15,22,29,0,0,0,1,8,15,22,29,0,0]
14260 ; AVX512-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3]
14261 ; AVX512-NEXT: vmovdqa64 %zmm12, %zmm3
14262 ; AVX512-NEXT: vpermt2d %zmm5, %zmm1, %zmm3
14263 ; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm0 = [0,9,10,11,12,13,20,27,0,9,10,11,12,13,20,27]
14264 ; AVX512-NEXT: # zmm0 = mem[0,1,2,3,0,1,2,3]
14265 ; AVX512-NEXT: vpermt2d %zmm7, %zmm0, %zmm3
14266 ; AVX512-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14267 ; AVX512-NEXT: vmovdqa64 %zmm13, %zmm3
14268 ; AVX512-NEXT: vpermt2d %zmm8, %zmm1, %zmm3
14269 ; AVX512-NEXT: vpermt2d %zmm6, %zmm0, %zmm3
14270 ; AVX512-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14271 ; AVX512-NEXT: vmovdqa64 %zmm14, %zmm3
14272 ; AVX512-NEXT: vpermt2d %zmm4, %zmm1, %zmm3
14273 ; AVX512-NEXT: vpermt2d %zmm20, %zmm0, %zmm3
14274 ; AVX512-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14275 ; AVX512-NEXT: vpermi2d %zmm17, %zmm11, %zmm1
14276 ; AVX512-NEXT: vpermt2d %zmm2, %zmm0, %zmm1
14277 ; AVX512-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14278 ; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm1 = [0,18,25,0,7,14,0,0,0,18,25,0,7,14,0,0]
14279 ; AVX512-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3]
14280 ; AVX512-NEXT: vmovdqa64 %zmm5, %zmm3
14281 ; AVX512-NEXT: vpermt2d %zmm12, %zmm1, %zmm3
14282 ; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm0 = [0,9,10,11,12,13,21,28,0,9,10,11,12,13,21,28]
14283 ; AVX512-NEXT: # zmm0 = mem[0,1,2,3,0,1,2,3]
14284 ; AVX512-NEXT: vpermt2d %zmm7, %zmm0, %zmm3
14285 ; AVX512-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14286 ; AVX512-NEXT: vmovdqa64 %zmm8, %zmm3
14287 ; AVX512-NEXT: vpermt2d %zmm13, %zmm1, %zmm3
14288 ; AVX512-NEXT: vpermt2d %zmm6, %zmm0, %zmm3
14289 ; AVX512-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14290 ; AVX512-NEXT: vmovdqa64 %zmm4, %zmm3
14291 ; AVX512-NEXT: vpermt2d %zmm14, %zmm1, %zmm3
14292 ; AVX512-NEXT: vpermt2d %zmm20, %zmm0, %zmm3
14293 ; AVX512-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14294 ; AVX512-NEXT: vpermi2d %zmm11, %zmm17, %zmm1
14295 ; AVX512-NEXT: vpermt2d %zmm2, %zmm0, %zmm1
14296 ; AVX512-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14297 ; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm1 = [0,19,26,1,8,15,0,0,0,19,26,1,8,15,0,0]
14298 ; AVX512-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3]
14299 ; AVX512-NEXT: vmovdqa64 %zmm5, %zmm3
14300 ; AVX512-NEXT: vpermt2d %zmm12, %zmm1, %zmm3
14301 ; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm0 = [0,9,10,11,12,13,22,29,0,9,10,11,12,13,22,29]
14302 ; AVX512-NEXT: # zmm0 = mem[0,1,2,3,0,1,2,3]
14303 ; AVX512-NEXT: vpermt2d %zmm7, %zmm0, %zmm3
14304 ; AVX512-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14305 ; AVX512-NEXT: vmovdqa64 %zmm8, %zmm3
14306 ; AVX512-NEXT: vpermt2d %zmm13, %zmm1, %zmm3
14307 ; AVX512-NEXT: vpermt2d %zmm6, %zmm0, %zmm3
14308 ; AVX512-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14309 ; AVX512-NEXT: vmovdqa64 %zmm4, %zmm3
14310 ; AVX512-NEXT: vpermt2d %zmm14, %zmm1, %zmm3
14311 ; AVX512-NEXT: vpermt2d %zmm20, %zmm0, %zmm3
14312 ; AVX512-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14313 ; AVX512-NEXT: vpermi2d %zmm11, %zmm17, %zmm1
14314 ; AVX512-NEXT: vpermt2d %zmm2, %zmm0, %zmm1
14315 ; AVX512-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14316 ; AVX512-NEXT: vmovdqa64 1024(%rdi), %zmm3
14317 ; AVX512-NEXT: vmovdqa64 1088(%rdi), %zmm15
14318 ; AVX512-NEXT: vbroadcasti32x4 {{.*#+}} zmm30 = [25,4,11,18,25,4,11,18,25,4,11,18,25,4,11,18]
14319 ; AVX512-NEXT: # zmm30 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
14320 ; AVX512-NEXT: vmovdqa64 %zmm3, %zmm0
14321 ; AVX512-NEXT: vpermt2d %zmm15, %zmm30, %zmm0
14322 ; AVX512-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14323 ; AVX512-NEXT: vmovdqa64 576(%rdi), %zmm9
14324 ; AVX512-NEXT: vmovdqa64 640(%rdi), %zmm16
14325 ; AVX512-NEXT: vmovdqa64 %zmm9, %zmm0
14326 ; AVX512-NEXT: vpermt2d %zmm16, %zmm30, %zmm0
14327 ; AVX512-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14328 ; AVX512-NEXT: vmovdqa64 128(%rdi), %zmm0
14329 ; AVX512-NEXT: vmovdqa64 192(%rdi), %zmm18
14330 ; AVX512-NEXT: vmovdqa64 %zmm0, %zmm1
14331 ; AVX512-NEXT: vpermt2d %zmm18, %zmm30, %zmm1
14332 ; AVX512-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14333 ; AVX512-NEXT: vmovdqa64 1472(%rdi), %zmm1
14334 ; AVX512-NEXT: vmovdqa64 1536(%rdi), %zmm19
14335 ; AVX512-NEXT: vmovdqa64 %zmm1, %zmm10
14336 ; AVX512-NEXT: vpermt2d %zmm19, %zmm30, %zmm10
14337 ; AVX512-NEXT: vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14338 ; AVX512-NEXT: vmovdqa64 %zmm12, %zmm21
14339 ; AVX512-NEXT: vpermt2d %zmm5, %zmm30, %zmm21
14340 ; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm10 = [0,9,10,11,12,16,23,30,0,9,10,11,12,16,23,30]
14341 ; AVX512-NEXT: # zmm10 = mem[0,1,2,3,0,1,2,3]
14342 ; AVX512-NEXT: vpermt2d %zmm7, %zmm10, %zmm21
14343 ; AVX512-NEXT: vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14344 ; AVX512-NEXT: vmovdqa64 %zmm13, %zmm21
14345 ; AVX512-NEXT: vpermt2d %zmm8, %zmm30, %zmm21
14346 ; AVX512-NEXT: vpermt2d %zmm6, %zmm10, %zmm21
14347 ; AVX512-NEXT: vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14348 ; AVX512-NEXT: vmovdqa64 %zmm14, %zmm21
14349 ; AVX512-NEXT: vpermt2d %zmm4, %zmm30, %zmm21
14350 ; AVX512-NEXT: vpermt2d %zmm20, %zmm10, %zmm21
14351 ; AVX512-NEXT: vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14352 ; AVX512-NEXT: vpermi2d %zmm17, %zmm11, %zmm30
14353 ; AVX512-NEXT: vpermt2d %zmm2, %zmm10, %zmm30
14354 ; AVX512-NEXT: vbroadcasti32x4 {{.*#+}} zmm10 = [26,5,12,19,26,5,12,19,26,5,12,19,26,5,12,19]
14355 ; AVX512-NEXT: # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
14356 ; AVX512-NEXT: vpermt2d %zmm8, %zmm10, %zmm13
14357 ; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm8 = [0,9,10,11,12,17,24,31,0,9,10,11,12,17,24,31]
14358 ; AVX512-NEXT: # zmm8 = mem[0,1,2,3,0,1,2,3]
14359 ; AVX512-NEXT: vpermt2d %zmm6, %zmm8, %zmm13
14360 ; AVX512-NEXT: vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14361 ; AVX512-NEXT: vpermt2d %zmm4, %zmm10, %zmm14
14362 ; AVX512-NEXT: vpermt2d %zmm20, %zmm8, %zmm14
14363 ; AVX512-NEXT: vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14364 ; AVX512-NEXT: vpermt2d %zmm17, %zmm10, %zmm11
14365 ; AVX512-NEXT: vpermt2d %zmm2, %zmm8, %zmm11
14366 ; AVX512-NEXT: vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14367 ; AVX512-NEXT: vpermt2d %zmm5, %zmm10, %zmm12
14368 ; AVX512-NEXT: vpermt2d %zmm7, %zmm8, %zmm12
14369 ; AVX512-NEXT: vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14370 ; AVX512-NEXT: vmovdqa64 %zmm3, %zmm2
14371 ; AVX512-NEXT: vpermt2d %zmm15, %zmm10, %zmm2
14372 ; AVX512-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14373 ; AVX512-NEXT: vmovdqa64 %zmm9, %zmm2
14374 ; AVX512-NEXT: vpermt2d %zmm16, %zmm10, %zmm2
14375 ; AVX512-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14376 ; AVX512-NEXT: vmovdqa64 %zmm0, %zmm2
14377 ; AVX512-NEXT: vpermt2d %zmm18, %zmm10, %zmm2
14378 ; AVX512-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14379 ; AVX512-NEXT: vmovdqa64 %zmm1, %zmm2
14380 ; AVX512-NEXT: vpermt2d %zmm19, %zmm10, %zmm2
14381 ; AVX512-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14382 ; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm25 = [8,15,0,0,0,19,26,1,8,15,0,0,0,19,26,1]
14383 ; AVX512-NEXT: # zmm25 = mem[0,1,2,3,0,1,2,3]
14384 ; AVX512-NEXT: vmovdqa64 %zmm16, %zmm2
14385 ; AVX512-NEXT: vpermt2d %zmm9, %zmm25, %zmm2
14386 ; AVX512-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14387 ; AVX512-NEXT: vbroadcasti32x4 {{.*#+}} zmm27 = [27,6,13,20,27,6,13,20,27,6,13,20,27,6,13,20]
14388 ; AVX512-NEXT: # zmm27 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
14389 ; AVX512-NEXT: vmovdqa64 %zmm9, %zmm2
14390 ; AVX512-NEXT: vpermt2d %zmm16, %zmm27, %zmm2
14391 ; AVX512-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14392 ; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm28 = [28,0,0,0,0,7,14,21,28,0,0,0,0,7,14,21]
14393 ; AVX512-NEXT: # zmm28 = mem[0,1,2,3,0,1,2,3]
14394 ; AVX512-NEXT: vmovdqa64 %zmm9, %zmm2
14395 ; AVX512-NEXT: vpermt2d %zmm16, %zmm28, %zmm2
14396 ; AVX512-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14397 ; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm31 = [29,0,0,0,1,8,15,22,29,0,0,0,1,8,15,22]
14398 ; AVX512-NEXT: # zmm31 = mem[0,1,2,3,0,1,2,3]
14399 ; AVX512-NEXT: vmovdqa64 %zmm9, %zmm2
14400 ; AVX512-NEXT: vpermt2d %zmm16, %zmm31, %zmm2
14401 ; AVX512-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14402 ; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm2 = [14,0,0,0,18,25,0,7,14,0,0,0,18,25,0,7]
14403 ; AVX512-NEXT: # zmm2 = mem[0,1,2,3,0,1,2,3]
14404 ; AVX512-NEXT: vpermt2d %zmm9, %zmm2, %zmm16
14405 ; AVX512-NEXT: vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14406 ; AVX512-NEXT: vmovdqa64 %zmm18, %zmm4
14407 ; AVX512-NEXT: vpermt2d %zmm0, %zmm25, %zmm4
14408 ; AVX512-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14409 ; AVX512-NEXT: vmovdqa64 %zmm0, %zmm4
14410 ; AVX512-NEXT: vpermt2d %zmm18, %zmm27, %zmm4
14411 ; AVX512-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14412 ; AVX512-NEXT: vmovdqa64 %zmm0, %zmm4
14413 ; AVX512-NEXT: vpermt2d %zmm18, %zmm28, %zmm4
14414 ; AVX512-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14415 ; AVX512-NEXT: vmovdqa64 %zmm0, %zmm4
14416 ; AVX512-NEXT: vpermt2d %zmm18, %zmm31, %zmm4
14417 ; AVX512-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14418 ; AVX512-NEXT: vpermt2d %zmm0, %zmm2, %zmm18
14419 ; AVX512-NEXT: vmovdqu64 %zmm18, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14420 ; AVX512-NEXT: vmovdqa64 %zmm15, %zmm0
14421 ; AVX512-NEXT: vpermt2d %zmm3, %zmm25, %zmm0
14422 ; AVX512-NEXT: vmovdqu64 %zmm0, (%rsp) # 64-byte Spill
14423 ; AVX512-NEXT: vpermi2d %zmm1, %zmm19, %zmm25
14424 ; AVX512-NEXT: vmovdqa64 %zmm3, %zmm0
14425 ; AVX512-NEXT: vpermt2d %zmm15, %zmm27, %zmm0
14426 ; AVX512-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14427 ; AVX512-NEXT: vpermi2d %zmm19, %zmm1, %zmm27
14428 ; AVX512-NEXT: vmovdqa64 %zmm3, %zmm0
14429 ; AVX512-NEXT: vpermt2d %zmm15, %zmm28, %zmm0
14430 ; AVX512-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14431 ; AVX512-NEXT: vpermi2d %zmm19, %zmm1, %zmm28
14432 ; AVX512-NEXT: vmovdqa64 %zmm3, %zmm0
14433 ; AVX512-NEXT: vpermt2d %zmm15, %zmm31, %zmm0
14434 ; AVX512-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14435 ; AVX512-NEXT: vpermi2d %zmm19, %zmm1, %zmm31
14436 ; AVX512-NEXT: vpermt2d %zmm1, %zmm2, %zmm19
14437 ; AVX512-NEXT: vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14438 ; AVX512-NEXT: vpermt2d %zmm3, %zmm2, %zmm15
14439 ; AVX512-NEXT: vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14440 ; AVX512-NEXT: vmovdqa64 512(%rdi), %zmm0
14441 ; AVX512-NEXT: vmovdqa64 448(%rdi), %zmm17
14442 ; AVX512-NEXT: vpmovsxbd {{.*#+}} ymm2 = [0,7,14,21,28,0,0,0]
14443 ; AVX512-NEXT: vmovdqa64 %zmm17, %zmm22
14444 ; AVX512-NEXT: vpermt2d %zmm0, %zmm2, %zmm22
14445 ; AVX512-NEXT: vpmovsxbd {{.*#+}} ymm3 = [1,8,15,22,29,0,0,0]
14446 ; AVX512-NEXT: vmovdqa64 %zmm17, %zmm23
14447 ; AVX512-NEXT: vpermt2d %zmm0, %zmm3, %zmm23
14448 ; AVX512-NEXT: vpmovsxbd {{.*#+}} ymm4 = [18,25,0,7,14,0,0,0]
14449 ; AVX512-NEXT: vmovdqa64 %zmm0, %zmm24
14450 ; AVX512-NEXT: vpermt2d %zmm17, %zmm4, %zmm24
14451 ; AVX512-NEXT: vpmovsxbd {{.*#+}} ymm7 = [19,26,1,8,15,0,0,0]
14452 ; AVX512-NEXT: vmovdqa64 %zmm0, %zmm29
14453 ; AVX512-NEXT: vpermt2d %zmm17, %zmm7, %zmm29
14454 ; AVX512-NEXT: vpmovsxbd {{.*#+}} xmm18 = [4,11,18,25]
14455 ; AVX512-NEXT: vmovdqa64 %zmm17, %zmm1
14456 ; AVX512-NEXT: vpermt2d %zmm0, %zmm18, %zmm1
14457 ; AVX512-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14458 ; AVX512-NEXT: vpmovsxbd {{.*#+}} xmm19 = [5,12,19,26]
14459 ; AVX512-NEXT: vmovdqa64 %zmm17, %zmm1
14460 ; AVX512-NEXT: vpermt2d %zmm0, %zmm19, %zmm1
14461 ; AVX512-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14462 ; AVX512-NEXT: vpmovsxbd {{.*#+}} xmm20 = [6,13,20,27]
14463 ; AVX512-NEXT: vpermt2d %zmm0, %zmm20, %zmm17
14464 ; AVX512-NEXT: vmovdqa64 (%rdi), %zmm5
14465 ; AVX512-NEXT: vmovdqa64 64(%rdi), %zmm0
14466 ; AVX512-NEXT: vmovdqa64 %zmm5, %zmm13
14467 ; AVX512-NEXT: vpermt2d %zmm0, %zmm2, %zmm13
14468 ; AVX512-NEXT: vmovdqa64 %zmm5, %zmm14
14469 ; AVX512-NEXT: vpermt2d %zmm0, %zmm3, %zmm14
14470 ; AVX512-NEXT: vmovdqa64 %zmm0, %zmm15
14471 ; AVX512-NEXT: vpermt2d %zmm5, %zmm4, %zmm15
14472 ; AVX512-NEXT: vmovdqa64 %zmm0, %zmm16
14473 ; AVX512-NEXT: vpermt2d %zmm5, %zmm7, %zmm16
14474 ; AVX512-NEXT: vmovdqa64 %zmm5, %zmm1
14475 ; AVX512-NEXT: vpermt2d %zmm0, %zmm18, %zmm1
14476 ; AVX512-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14477 ; AVX512-NEXT: vmovdqa64 %zmm5, %zmm1
14478 ; AVX512-NEXT: vpermt2d %zmm0, %zmm19, %zmm1
14479 ; AVX512-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14480 ; AVX512-NEXT: vpermt2d %zmm0, %zmm20, %zmm5
14481 ; AVX512-NEXT: vmovdqa64 960(%rdi), %zmm9
14482 ; AVX512-NEXT: vmovdqa64 896(%rdi), %zmm6
14483 ; AVX512-NEXT: vmovdqa64 %zmm6, %zmm8
14484 ; AVX512-NEXT: vpermt2d %zmm9, %zmm2, %zmm8
14485 ; AVX512-NEXT: vmovdqa64 1408(%rdi), %zmm0
14486 ; AVX512-NEXT: vmovdqa64 1344(%rdi), %zmm1
14487 ; AVX512-NEXT: vpermi2d %zmm0, %zmm1, %zmm2
14488 ; AVX512-NEXT: vmovdqa64 %zmm6, %zmm10
14489 ; AVX512-NEXT: vpermt2d %zmm9, %zmm3, %zmm10
14490 ; AVX512-NEXT: vpermi2d %zmm0, %zmm1, %zmm3
14491 ; AVX512-NEXT: vmovdqa64 %zmm9, %zmm11
14492 ; AVX512-NEXT: vpermt2d %zmm6, %zmm4, %zmm11
14493 ; AVX512-NEXT: vpermi2d %zmm1, %zmm0, %zmm4
14494 ; AVX512-NEXT: vmovdqa64 %zmm9, %zmm12
14495 ; AVX512-NEXT: vpermt2d %zmm6, %zmm7, %zmm12
14496 ; AVX512-NEXT: vpermi2d %zmm1, %zmm0, %zmm7
14497 ; AVX512-NEXT: vmovdqa64 %zmm6, %zmm21
14498 ; AVX512-NEXT: vpermt2d %zmm9, %zmm18, %zmm21
14499 ; AVX512-NEXT: vpermi2d %zmm0, %zmm1, %zmm18
14500 ; AVX512-NEXT: vmovdqa64 %zmm6, %zmm26
14501 ; AVX512-NEXT: vpermt2d %zmm9, %zmm19, %zmm26
14502 ; AVX512-NEXT: vpermi2d %zmm0, %zmm1, %zmm19
14503 ; AVX512-NEXT: vpermt2d %zmm0, %zmm20, %zmm1
14504 ; AVX512-NEXT: vpermt2d %zmm9, %zmm20, %zmm6
14505 ; AVX512-NEXT: movw $992, %ax # imm = 0x3E0
14506 ; AVX512-NEXT: kmovw %eax, %k1
14507 ; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
14508 ; AVX512-NEXT: vmovdqa32 %zmm0, %zmm13 {%k1}
14509 ; AVX512-NEXT: movb $-32, %al
14510 ; AVX512-NEXT: kmovw %eax, %k2
14511 ; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
14512 ; AVX512-NEXT: vmovdqa64 %zmm0, %zmm13 {%k2}
14513 ; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
14514 ; AVX512-NEXT: vmovdqa32 %zmm0, %zmm22 {%k1}
14515 ; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
14516 ; AVX512-NEXT: vmovdqa64 %zmm0, %zmm22 {%k2}
14517 ; AVX512-NEXT: vmovdqu64 (%rsp), %zmm0 # 64-byte Reload
14518 ; AVX512-NEXT: vmovdqa32 %zmm0, %zmm8 {%k1}
14519 ; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
14520 ; AVX512-NEXT: vmovdqa64 %zmm0, %zmm8 {%k2}
14521 ; AVX512-NEXT: vmovdqa32 %zmm25, %zmm2 {%k1}
14522 ; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
14523 ; AVX512-NEXT: vmovdqa64 %zmm0, %zmm2 {%k2}
14524 ; AVX512-NEXT: movw $480, %ax # imm = 0x1E0
14525 ; AVX512-NEXT: kmovw %eax, %k2
14526 ; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
14527 ; AVX512-NEXT: vmovdqa32 %zmm0, %zmm10 {%k2}
14528 ; AVX512-NEXT: movw $-512, %ax # imm = 0xFE00
14529 ; AVX512-NEXT: kmovw %eax, %k1
14530 ; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
14531 ; AVX512-NEXT: vmovdqa32 %zmm0, %zmm10 {%k1}
14532 ; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
14533 ; AVX512-NEXT: vmovdqa32 %zmm0, %zmm23 {%k2}
14534 ; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
14535 ; AVX512-NEXT: vmovdqa32 %zmm0, %zmm23 {%k1}
14536 ; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
14537 ; AVX512-NEXT: vmovdqa32 %zmm0, %zmm14 {%k2}
14538 ; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
14539 ; AVX512-NEXT: vmovdqa32 %zmm0, %zmm14 {%k1}
14540 ; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
14541 ; AVX512-NEXT: vmovdqa32 %zmm0, %zmm3 {%k2}
14542 ; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
14543 ; AVX512-NEXT: vmovdqa32 %zmm0, %zmm3 {%k1}
14544 ; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
14545 ; AVX512-NEXT: vmovdqa32 %zmm0, %zmm11 {%k2}
14546 ; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
14547 ; AVX512-NEXT: vmovdqa32 %zmm0, %zmm11 {%k1}
14548 ; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
14549 ; AVX512-NEXT: vmovdqa32 %zmm0, %zmm24 {%k2}
14550 ; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
14551 ; AVX512-NEXT: vmovdqa32 %zmm0, %zmm24 {%k1}
14552 ; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
14553 ; AVX512-NEXT: vmovdqa32 %zmm0, %zmm15 {%k2}
14554 ; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
14555 ; AVX512-NEXT: vmovdqa32 %zmm0, %zmm15 {%k1}
14556 ; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
14557 ; AVX512-NEXT: vmovdqa32 %zmm0, %zmm4 {%k2}
14558 ; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
14559 ; AVX512-NEXT: vmovdqa32 %zmm0, %zmm4 {%k1}
14560 ; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
14561 ; AVX512-NEXT: vmovdqa32 %zmm0, %zmm12 {%k2}
14562 ; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
14563 ; AVX512-NEXT: vmovdqa32 %zmm0, %zmm12 {%k1}
14564 ; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
14565 ; AVX512-NEXT: vmovdqa32 %zmm0, %zmm29 {%k2}
14566 ; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
14567 ; AVX512-NEXT: vmovdqa32 %zmm0, %zmm29 {%k1}
14568 ; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
14569 ; AVX512-NEXT: vmovdqa32 %zmm0, %zmm16 {%k2}
14570 ; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
14571 ; AVX512-NEXT: vmovdqa32 %zmm0, %zmm16 {%k1}
14572 ; AVX512-NEXT: vmovdqa32 %zmm27, %zmm7 {%k2}
14573 ; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
14574 ; AVX512-NEXT: vmovdqa32 %zmm0, %zmm7 {%k1}
14575 ; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
14576 ; AVX512-NEXT: vinserti32x4 $0, %xmm21, %zmm0, %zmm0
14577 ; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
14578 ; AVX512-NEXT: vmovdqa32 %zmm9, %zmm0 {%k1}
14579 ; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
14580 ; AVX512-NEXT: vinserti32x4 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm9, %zmm9 # 16-byte Folded Reload
14581 ; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
14582 ; AVX512-NEXT: vmovdqa32 %zmm20, %zmm9 {%k1}
14583 ; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
14584 ; AVX512-NEXT: vinserti32x4 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm20, %zmm20 # 16-byte Folded Reload
14585 ; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload
14586 ; AVX512-NEXT: vmovdqa32 %zmm25, %zmm20 {%k1}
14587 ; AVX512-NEXT: vinserti32x4 $0, %xmm18, %zmm28, %zmm18
14588 ; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload
14589 ; AVX512-NEXT: vmovdqa32 %zmm25, %zmm18 {%k1}
14590 ; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload
14591 ; AVX512-NEXT: vinserti32x4 $0, %xmm26, %zmm25, %zmm25
14592 ; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload
14593 ; AVX512-NEXT: vmovdqa32 %zmm26, %zmm25 {%k1}
14594 ; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload
14595 ; AVX512-NEXT: vinserti32x4 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm26, %zmm26 # 16-byte Folded Reload
14596 ; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload
14597 ; AVX512-NEXT: vmovdqa32 %zmm27, %zmm26 {%k1}
14598 ; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload
14599 ; AVX512-NEXT: vinserti32x4 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm27, %zmm27 # 16-byte Folded Reload
14600 ; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
14601 ; AVX512-NEXT: vmovdqa32 %zmm28, %zmm27 {%k1}
14602 ; AVX512-NEXT: vinserti32x4 $0, %xmm19, %zmm31, %zmm19
14603 ; AVX512-NEXT: vmovdqa32 %zmm30, %zmm19 {%k1}
14604 ; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
14605 ; AVX512-NEXT: vinserti32x4 $0, %xmm17, %zmm28, %zmm17
14606 ; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
14607 ; AVX512-NEXT: vmovdqa32 %zmm28, %zmm17 {%k1}
14608 ; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
14609 ; AVX512-NEXT: vinserti32x4 $0, %xmm5, %zmm28, %zmm5
14610 ; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
14611 ; AVX512-NEXT: vmovdqa32 %zmm28, %zmm5 {%k1}
14612 ; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
14613 ; AVX512-NEXT: vinserti32x4 $0, %xmm1, %zmm28, %zmm1
14614 ; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
14615 ; AVX512-NEXT: vmovdqa32 %zmm28, %zmm1 {%k1}
14616 ; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
14617 ; AVX512-NEXT: vinserti32x4 $0, %xmm6, %zmm28, %zmm6
14618 ; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
14619 ; AVX512-NEXT: vmovdqa32 %zmm28, %zmm6 {%k1}
14620 ; AVX512-NEXT: vmovdqa64 %zmm2, 192(%rsi)
14621 ; AVX512-NEXT: vmovdqa64 %zmm8, 128(%rsi)
14622 ; AVX512-NEXT: vmovdqa64 %zmm22, 64(%rsi)
14623 ; AVX512-NEXT: vmovdqa64 %zmm13, (%rsi)
14624 ; AVX512-NEXT: vmovdqa64 %zmm3, 192(%rdx)
14625 ; AVX512-NEXT: vmovdqa64 %zmm14, (%rdx)
14626 ; AVX512-NEXT: vmovdqa64 %zmm23, 64(%rdx)
14627 ; AVX512-NEXT: vmovdqa64 %zmm10, 128(%rdx)
14628 ; AVX512-NEXT: vmovdqa64 %zmm4, 192(%rcx)
14629 ; AVX512-NEXT: vmovdqa64 %zmm15, (%rcx)
14630 ; AVX512-NEXT: vmovdqa64 %zmm24, 64(%rcx)
14631 ; AVX512-NEXT: vmovdqa64 %zmm11, 128(%rcx)
14632 ; AVX512-NEXT: vmovdqa64 %zmm7, 192(%r8)
14633 ; AVX512-NEXT: vmovdqa64 %zmm16, (%r8)
14634 ; AVX512-NEXT: vmovdqa64 %zmm29, 64(%r8)
14635 ; AVX512-NEXT: vmovdqa64 %zmm12, 128(%r8)
14636 ; AVX512-NEXT: vmovdqa64 %zmm18, 192(%r9)
14637 ; AVX512-NEXT: vmovdqa64 %zmm20, (%r9)
14638 ; AVX512-NEXT: vmovdqa64 %zmm9, 64(%r9)
14639 ; AVX512-NEXT: vmovdqa64 %zmm0, 128(%r9)
14640 ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax
14641 ; AVX512-NEXT: vmovdqa64 %zmm19, 192(%rax)
14642 ; AVX512-NEXT: vmovdqa64 %zmm27, (%rax)
14643 ; AVX512-NEXT: vmovdqa64 %zmm26, 64(%rax)
14644 ; AVX512-NEXT: vmovdqa64 %zmm25, 128(%rax)
14645 ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax
14646 ; AVX512-NEXT: vmovdqa64 %zmm6, 128(%rax)
14647 ; AVX512-NEXT: vmovdqa64 %zmm1, 192(%rax)
14648 ; AVX512-NEXT: vmovdqa64 %zmm5, (%rax)
14649 ; AVX512-NEXT: vmovdqa64 %zmm17, 64(%rax)
14650 ; AVX512-NEXT: addq $3400, %rsp # imm = 0xD48
14651 ; AVX512-NEXT: vzeroupper
14652 ; AVX512-NEXT: retq
14654 ; AVX512-FCP-LABEL: load_i32_stride7_vf64:
14655 ; AVX512-FCP: # %bb.0:
14656 ; AVX512-FCP-NEXT: subq $3400, %rsp # imm = 0xD48
14657 ; AVX512-FCP-NEXT: vmovdqa64 1728(%rdi), %zmm2
14658 ; AVX512-FCP-NEXT: vmovdqa64 1664(%rdi), %zmm17
14659 ; AVX512-FCP-NEXT: vmovdqa64 1600(%rdi), %zmm11
14660 ; AVX512-FCP-NEXT: vmovdqa64 1280(%rdi), %zmm7
14661 ; AVX512-FCP-NEXT: vmovdqa64 1216(%rdi), %zmm5
14662 ; AVX512-FCP-NEXT: vmovdqa64 1152(%rdi), %zmm12
14663 ; AVX512-FCP-NEXT: vmovdqa64 832(%rdi), %zmm6
14664 ; AVX512-FCP-NEXT: vmovdqa64 768(%rdi), %zmm8
14665 ; AVX512-FCP-NEXT: vmovdqa64 704(%rdi), %zmm13
14666 ; AVX512-FCP-NEXT: vmovdqa64 384(%rdi), %zmm20
14667 ; AVX512-FCP-NEXT: vmovdqa64 320(%rdi), %zmm4
14668 ; AVX512-FCP-NEXT: vmovdqa64 256(%rdi), %zmm14
14669 ; AVX512-FCP-NEXT: vbroadcasti32x4 {{.*#+}} zmm1 = [20,27,6,13,20,27,6,13,20,27,6,13,20,27,6,13]
14670 ; AVX512-FCP-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
14671 ; AVX512-FCP-NEXT: vmovdqa64 %zmm14, %zmm3
14672 ; AVX512-FCP-NEXT: vpermt2d %zmm4, %zmm1, %zmm3
14673 ; AVX512-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm0 = [0,0,10,11,12,13,18,25,0,0,10,11,12,13,18,25]
14674 ; AVX512-FCP-NEXT: # zmm0 = mem[0,1,2,3,0,1,2,3]
14675 ; AVX512-FCP-NEXT: vpermt2d %zmm20, %zmm0, %zmm3
14676 ; AVX512-FCP-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14677 ; AVX512-FCP-NEXT: vmovdqa64 %zmm13, %zmm3
14678 ; AVX512-FCP-NEXT: vpermt2d %zmm8, %zmm1, %zmm3
14679 ; AVX512-FCP-NEXT: vpermt2d %zmm6, %zmm0, %zmm3
14680 ; AVX512-FCP-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14681 ; AVX512-FCP-NEXT: vmovdqa64 %zmm12, %zmm3
14682 ; AVX512-FCP-NEXT: vpermt2d %zmm5, %zmm1, %zmm3
14683 ; AVX512-FCP-NEXT: vpermt2d %zmm7, %zmm0, %zmm3
14684 ; AVX512-FCP-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14685 ; AVX512-FCP-NEXT: vpermi2d %zmm17, %zmm11, %zmm1
14686 ; AVX512-FCP-NEXT: vpermt2d %zmm2, %zmm0, %zmm1
14687 ; AVX512-FCP-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14688 ; AVX512-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm1 = [0,0,7,14,21,28,0,0,0,0,7,14,21,28,0,0]
14689 ; AVX512-FCP-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3]
14690 ; AVX512-FCP-NEXT: vmovdqa64 %zmm12, %zmm3
14691 ; AVX512-FCP-NEXT: vpermt2d %zmm5, %zmm1, %zmm3
14692 ; AVX512-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm0 = [0,9,10,11,12,13,19,26,0,9,10,11,12,13,19,26]
14693 ; AVX512-FCP-NEXT: # zmm0 = mem[0,1,2,3,0,1,2,3]
14694 ; AVX512-FCP-NEXT: vpermt2d %zmm7, %zmm0, %zmm3
14695 ; AVX512-FCP-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14696 ; AVX512-FCP-NEXT: vmovdqa64 %zmm13, %zmm3
14697 ; AVX512-FCP-NEXT: vpermt2d %zmm8, %zmm1, %zmm3
14698 ; AVX512-FCP-NEXT: vpermt2d %zmm6, %zmm0, %zmm3
14699 ; AVX512-FCP-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14700 ; AVX512-FCP-NEXT: vmovdqa64 %zmm14, %zmm3
14701 ; AVX512-FCP-NEXT: vpermt2d %zmm4, %zmm1, %zmm3
14702 ; AVX512-FCP-NEXT: vpermt2d %zmm20, %zmm0, %zmm3
14703 ; AVX512-FCP-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14704 ; AVX512-FCP-NEXT: vpermi2d %zmm17, %zmm11, %zmm1
14705 ; AVX512-FCP-NEXT: vpermt2d %zmm2, %zmm0, %zmm1
14706 ; AVX512-FCP-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14707 ; AVX512-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm1 = [0,1,8,15,22,29,0,0,0,1,8,15,22,29,0,0]
14708 ; AVX512-FCP-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3]
14709 ; AVX512-FCP-NEXT: vmovdqa64 %zmm12, %zmm3
14710 ; AVX512-FCP-NEXT: vpermt2d %zmm5, %zmm1, %zmm3
14711 ; AVX512-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm0 = [0,9,10,11,12,13,20,27,0,9,10,11,12,13,20,27]
14712 ; AVX512-FCP-NEXT: # zmm0 = mem[0,1,2,3,0,1,2,3]
14713 ; AVX512-FCP-NEXT: vpermt2d %zmm7, %zmm0, %zmm3
14714 ; AVX512-FCP-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14715 ; AVX512-FCP-NEXT: vmovdqa64 %zmm13, %zmm3
14716 ; AVX512-FCP-NEXT: vpermt2d %zmm8, %zmm1, %zmm3
14717 ; AVX512-FCP-NEXT: vpermt2d %zmm6, %zmm0, %zmm3
14718 ; AVX512-FCP-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14719 ; AVX512-FCP-NEXT: vmovdqa64 %zmm14, %zmm3
14720 ; AVX512-FCP-NEXT: vpermt2d %zmm4, %zmm1, %zmm3
14721 ; AVX512-FCP-NEXT: vpermt2d %zmm20, %zmm0, %zmm3
14722 ; AVX512-FCP-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14723 ; AVX512-FCP-NEXT: vpermi2d %zmm17, %zmm11, %zmm1
14724 ; AVX512-FCP-NEXT: vpermt2d %zmm2, %zmm0, %zmm1
14725 ; AVX512-FCP-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14726 ; AVX512-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm1 = [0,18,25,0,7,14,0,0,0,18,25,0,7,14,0,0]
14727 ; AVX512-FCP-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3]
14728 ; AVX512-FCP-NEXT: vmovdqa64 %zmm5, %zmm3
14729 ; AVX512-FCP-NEXT: vpermt2d %zmm12, %zmm1, %zmm3
14730 ; AVX512-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm0 = [0,9,10,11,12,13,21,28,0,9,10,11,12,13,21,28]
14731 ; AVX512-FCP-NEXT: # zmm0 = mem[0,1,2,3,0,1,2,3]
14732 ; AVX512-FCP-NEXT: vpermt2d %zmm7, %zmm0, %zmm3
14733 ; AVX512-FCP-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14734 ; AVX512-FCP-NEXT: vmovdqa64 %zmm8, %zmm3
14735 ; AVX512-FCP-NEXT: vpermt2d %zmm13, %zmm1, %zmm3
14736 ; AVX512-FCP-NEXT: vpermt2d %zmm6, %zmm0, %zmm3
14737 ; AVX512-FCP-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14738 ; AVX512-FCP-NEXT: vmovdqa64 %zmm4, %zmm3
14739 ; AVX512-FCP-NEXT: vpermt2d %zmm14, %zmm1, %zmm3
14740 ; AVX512-FCP-NEXT: vpermt2d %zmm20, %zmm0, %zmm3
14741 ; AVX512-FCP-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14742 ; AVX512-FCP-NEXT: vpermi2d %zmm11, %zmm17, %zmm1
14743 ; AVX512-FCP-NEXT: vpermt2d %zmm2, %zmm0, %zmm1
14744 ; AVX512-FCP-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14745 ; AVX512-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm1 = [0,19,26,1,8,15,0,0,0,19,26,1,8,15,0,0]
14746 ; AVX512-FCP-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3]
14747 ; AVX512-FCP-NEXT: vmovdqa64 %zmm5, %zmm3
14748 ; AVX512-FCP-NEXT: vpermt2d %zmm12, %zmm1, %zmm3
14749 ; AVX512-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm0 = [0,9,10,11,12,13,22,29,0,9,10,11,12,13,22,29]
14750 ; AVX512-FCP-NEXT: # zmm0 = mem[0,1,2,3,0,1,2,3]
14751 ; AVX512-FCP-NEXT: vpermt2d %zmm7, %zmm0, %zmm3
14752 ; AVX512-FCP-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14753 ; AVX512-FCP-NEXT: vmovdqa64 %zmm8, %zmm3
14754 ; AVX512-FCP-NEXT: vpermt2d %zmm13, %zmm1, %zmm3
14755 ; AVX512-FCP-NEXT: vpermt2d %zmm6, %zmm0, %zmm3
14756 ; AVX512-FCP-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14757 ; AVX512-FCP-NEXT: vmovdqa64 %zmm4, %zmm3
14758 ; AVX512-FCP-NEXT: vpermt2d %zmm14, %zmm1, %zmm3
14759 ; AVX512-FCP-NEXT: vpermt2d %zmm20, %zmm0, %zmm3
14760 ; AVX512-FCP-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14761 ; AVX512-FCP-NEXT: vpermi2d %zmm11, %zmm17, %zmm1
14762 ; AVX512-FCP-NEXT: vpermt2d %zmm2, %zmm0, %zmm1
14763 ; AVX512-FCP-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14764 ; AVX512-FCP-NEXT: vmovdqa64 1024(%rdi), %zmm3
14765 ; AVX512-FCP-NEXT: vmovdqa64 1088(%rdi), %zmm15
14766 ; AVX512-FCP-NEXT: vbroadcasti32x4 {{.*#+}} zmm30 = [25,4,11,18,25,4,11,18,25,4,11,18,25,4,11,18]
14767 ; AVX512-FCP-NEXT: # zmm30 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
14768 ; AVX512-FCP-NEXT: vmovdqa64 %zmm3, %zmm0
14769 ; AVX512-FCP-NEXT: vpermt2d %zmm15, %zmm30, %zmm0
14770 ; AVX512-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14771 ; AVX512-FCP-NEXT: vmovdqa64 576(%rdi), %zmm9
14772 ; AVX512-FCP-NEXT: vmovdqa64 640(%rdi), %zmm16
14773 ; AVX512-FCP-NEXT: vmovdqa64 %zmm9, %zmm0
14774 ; AVX512-FCP-NEXT: vpermt2d %zmm16, %zmm30, %zmm0
14775 ; AVX512-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14776 ; AVX512-FCP-NEXT: vmovdqa64 128(%rdi), %zmm0
14777 ; AVX512-FCP-NEXT: vmovdqa64 192(%rdi), %zmm18
14778 ; AVX512-FCP-NEXT: vmovdqa64 %zmm0, %zmm1
14779 ; AVX512-FCP-NEXT: vpermt2d %zmm18, %zmm30, %zmm1
14780 ; AVX512-FCP-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14781 ; AVX512-FCP-NEXT: vmovdqa64 1472(%rdi), %zmm1
14782 ; AVX512-FCP-NEXT: vmovdqa64 1536(%rdi), %zmm19
14783 ; AVX512-FCP-NEXT: vmovdqa64 %zmm1, %zmm10
14784 ; AVX512-FCP-NEXT: vpermt2d %zmm19, %zmm30, %zmm10
14785 ; AVX512-FCP-NEXT: vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14786 ; AVX512-FCP-NEXT: vmovdqa64 %zmm12, %zmm21
14787 ; AVX512-FCP-NEXT: vpermt2d %zmm5, %zmm30, %zmm21
14788 ; AVX512-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm10 = [0,9,10,11,12,16,23,30,0,9,10,11,12,16,23,30]
14789 ; AVX512-FCP-NEXT: # zmm10 = mem[0,1,2,3,0,1,2,3]
14790 ; AVX512-FCP-NEXT: vpermt2d %zmm7, %zmm10, %zmm21
14791 ; AVX512-FCP-NEXT: vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14792 ; AVX512-FCP-NEXT: vmovdqa64 %zmm13, %zmm21
14793 ; AVX512-FCP-NEXT: vpermt2d %zmm8, %zmm30, %zmm21
14794 ; AVX512-FCP-NEXT: vpermt2d %zmm6, %zmm10, %zmm21
14795 ; AVX512-FCP-NEXT: vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14796 ; AVX512-FCP-NEXT: vmovdqa64 %zmm14, %zmm21
14797 ; AVX512-FCP-NEXT: vpermt2d %zmm4, %zmm30, %zmm21
14798 ; AVX512-FCP-NEXT: vpermt2d %zmm20, %zmm10, %zmm21
14799 ; AVX512-FCP-NEXT: vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14800 ; AVX512-FCP-NEXT: vpermi2d %zmm17, %zmm11, %zmm30
14801 ; AVX512-FCP-NEXT: vpermt2d %zmm2, %zmm10, %zmm30
14802 ; AVX512-FCP-NEXT: vbroadcasti32x4 {{.*#+}} zmm10 = [26,5,12,19,26,5,12,19,26,5,12,19,26,5,12,19]
14803 ; AVX512-FCP-NEXT: # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
14804 ; AVX512-FCP-NEXT: vpermt2d %zmm8, %zmm10, %zmm13
14805 ; AVX512-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm8 = [0,9,10,11,12,17,24,31,0,9,10,11,12,17,24,31]
14806 ; AVX512-FCP-NEXT: # zmm8 = mem[0,1,2,3,0,1,2,3]
14807 ; AVX512-FCP-NEXT: vpermt2d %zmm6, %zmm8, %zmm13
14808 ; AVX512-FCP-NEXT: vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14809 ; AVX512-FCP-NEXT: vpermt2d %zmm4, %zmm10, %zmm14
14810 ; AVX512-FCP-NEXT: vpermt2d %zmm20, %zmm8, %zmm14
14811 ; AVX512-FCP-NEXT: vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14812 ; AVX512-FCP-NEXT: vpermt2d %zmm17, %zmm10, %zmm11
14813 ; AVX512-FCP-NEXT: vpermt2d %zmm2, %zmm8, %zmm11
14814 ; AVX512-FCP-NEXT: vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14815 ; AVX512-FCP-NEXT: vpermt2d %zmm5, %zmm10, %zmm12
14816 ; AVX512-FCP-NEXT: vpermt2d %zmm7, %zmm8, %zmm12
14817 ; AVX512-FCP-NEXT: vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14818 ; AVX512-FCP-NEXT: vmovdqa64 %zmm3, %zmm2
14819 ; AVX512-FCP-NEXT: vpermt2d %zmm15, %zmm10, %zmm2
14820 ; AVX512-FCP-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14821 ; AVX512-FCP-NEXT: vmovdqa64 %zmm9, %zmm2
14822 ; AVX512-FCP-NEXT: vpermt2d %zmm16, %zmm10, %zmm2
14823 ; AVX512-FCP-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14824 ; AVX512-FCP-NEXT: vmovdqa64 %zmm0, %zmm2
14825 ; AVX512-FCP-NEXT: vpermt2d %zmm18, %zmm10, %zmm2
14826 ; AVX512-FCP-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14827 ; AVX512-FCP-NEXT: vmovdqa64 %zmm1, %zmm2
14828 ; AVX512-FCP-NEXT: vpermt2d %zmm19, %zmm10, %zmm2
14829 ; AVX512-FCP-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14830 ; AVX512-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm25 = [8,15,0,0,0,19,26,1,8,15,0,0,0,19,26,1]
14831 ; AVX512-FCP-NEXT: # zmm25 = mem[0,1,2,3,0,1,2,3]
14832 ; AVX512-FCP-NEXT: vmovdqa64 %zmm16, %zmm2
14833 ; AVX512-FCP-NEXT: vpermt2d %zmm9, %zmm25, %zmm2
14834 ; AVX512-FCP-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14835 ; AVX512-FCP-NEXT: vbroadcasti32x4 {{.*#+}} zmm27 = [27,6,13,20,27,6,13,20,27,6,13,20,27,6,13,20]
14836 ; AVX512-FCP-NEXT: # zmm27 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
14837 ; AVX512-FCP-NEXT: vmovdqa64 %zmm9, %zmm2
14838 ; AVX512-FCP-NEXT: vpermt2d %zmm16, %zmm27, %zmm2
14839 ; AVX512-FCP-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14840 ; AVX512-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm28 = [28,0,0,0,0,7,14,21,28,0,0,0,0,7,14,21]
14841 ; AVX512-FCP-NEXT: # zmm28 = mem[0,1,2,3,0,1,2,3]
14842 ; AVX512-FCP-NEXT: vmovdqa64 %zmm9, %zmm2
14843 ; AVX512-FCP-NEXT: vpermt2d %zmm16, %zmm28, %zmm2
14844 ; AVX512-FCP-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14845 ; AVX512-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm31 = [29,0,0,0,1,8,15,22,29,0,0,0,1,8,15,22]
14846 ; AVX512-FCP-NEXT: # zmm31 = mem[0,1,2,3,0,1,2,3]
14847 ; AVX512-FCP-NEXT: vmovdqa64 %zmm9, %zmm2
14848 ; AVX512-FCP-NEXT: vpermt2d %zmm16, %zmm31, %zmm2
14849 ; AVX512-FCP-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14850 ; AVX512-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm2 = [14,0,0,0,18,25,0,7,14,0,0,0,18,25,0,7]
14851 ; AVX512-FCP-NEXT: # zmm2 = mem[0,1,2,3,0,1,2,3]
14852 ; AVX512-FCP-NEXT: vpermt2d %zmm9, %zmm2, %zmm16
14853 ; AVX512-FCP-NEXT: vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14854 ; AVX512-FCP-NEXT: vmovdqa64 %zmm18, %zmm4
14855 ; AVX512-FCP-NEXT: vpermt2d %zmm0, %zmm25, %zmm4
14856 ; AVX512-FCP-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14857 ; AVX512-FCP-NEXT: vmovdqa64 %zmm0, %zmm4
14858 ; AVX512-FCP-NEXT: vpermt2d %zmm18, %zmm27, %zmm4
14859 ; AVX512-FCP-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14860 ; AVX512-FCP-NEXT: vmovdqa64 %zmm0, %zmm4
14861 ; AVX512-FCP-NEXT: vpermt2d %zmm18, %zmm28, %zmm4
14862 ; AVX512-FCP-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14863 ; AVX512-FCP-NEXT: vmovdqa64 %zmm0, %zmm4
14864 ; AVX512-FCP-NEXT: vpermt2d %zmm18, %zmm31, %zmm4
14865 ; AVX512-FCP-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14866 ; AVX512-FCP-NEXT: vpermt2d %zmm0, %zmm2, %zmm18
14867 ; AVX512-FCP-NEXT: vmovdqu64 %zmm18, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14868 ; AVX512-FCP-NEXT: vmovdqa64 %zmm15, %zmm0
14869 ; AVX512-FCP-NEXT: vpermt2d %zmm3, %zmm25, %zmm0
14870 ; AVX512-FCP-NEXT: vmovdqu64 %zmm0, (%rsp) # 64-byte Spill
14871 ; AVX512-FCP-NEXT: vpermi2d %zmm1, %zmm19, %zmm25
14872 ; AVX512-FCP-NEXT: vmovdqa64 %zmm3, %zmm0
14873 ; AVX512-FCP-NEXT: vpermt2d %zmm15, %zmm27, %zmm0
14874 ; AVX512-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14875 ; AVX512-FCP-NEXT: vpermi2d %zmm19, %zmm1, %zmm27
14876 ; AVX512-FCP-NEXT: vmovdqa64 %zmm3, %zmm0
14877 ; AVX512-FCP-NEXT: vpermt2d %zmm15, %zmm28, %zmm0
14878 ; AVX512-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14879 ; AVX512-FCP-NEXT: vpermi2d %zmm19, %zmm1, %zmm28
14880 ; AVX512-FCP-NEXT: vmovdqa64 %zmm3, %zmm0
14881 ; AVX512-FCP-NEXT: vpermt2d %zmm15, %zmm31, %zmm0
14882 ; AVX512-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14883 ; AVX512-FCP-NEXT: vpermi2d %zmm19, %zmm1, %zmm31
14884 ; AVX512-FCP-NEXT: vpermt2d %zmm1, %zmm2, %zmm19
14885 ; AVX512-FCP-NEXT: vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14886 ; AVX512-FCP-NEXT: vpermt2d %zmm3, %zmm2, %zmm15
14887 ; AVX512-FCP-NEXT: vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14888 ; AVX512-FCP-NEXT: vmovdqa64 512(%rdi), %zmm0
14889 ; AVX512-FCP-NEXT: vmovdqa64 448(%rdi), %zmm17
14890 ; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm2 = [0,7,14,21,28,0,0,0]
14891 ; AVX512-FCP-NEXT: vmovdqa64 %zmm17, %zmm22
14892 ; AVX512-FCP-NEXT: vpermt2d %zmm0, %zmm2, %zmm22
14893 ; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm3 = [1,8,15,22,29,0,0,0]
14894 ; AVX512-FCP-NEXT: vmovdqa64 %zmm17, %zmm23
14895 ; AVX512-FCP-NEXT: vpermt2d %zmm0, %zmm3, %zmm23
14896 ; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm4 = [18,25,0,7,14,0,0,0]
14897 ; AVX512-FCP-NEXT: vmovdqa64 %zmm0, %zmm24
14898 ; AVX512-FCP-NEXT: vpermt2d %zmm17, %zmm4, %zmm24
14899 ; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm7 = [19,26,1,8,15,0,0,0]
14900 ; AVX512-FCP-NEXT: vmovdqa64 %zmm0, %zmm29
14901 ; AVX512-FCP-NEXT: vpermt2d %zmm17, %zmm7, %zmm29
14902 ; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} xmm18 = [4,11,18,25]
14903 ; AVX512-FCP-NEXT: vmovdqa64 %zmm17, %zmm1
14904 ; AVX512-FCP-NEXT: vpermt2d %zmm0, %zmm18, %zmm1
14905 ; AVX512-FCP-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14906 ; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} xmm19 = [5,12,19,26]
14907 ; AVX512-FCP-NEXT: vmovdqa64 %zmm17, %zmm1
14908 ; AVX512-FCP-NEXT: vpermt2d %zmm0, %zmm19, %zmm1
14909 ; AVX512-FCP-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14910 ; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} xmm20 = [6,13,20,27]
14911 ; AVX512-FCP-NEXT: vpermt2d %zmm0, %zmm20, %zmm17
14912 ; AVX512-FCP-NEXT: vmovdqa64 (%rdi), %zmm5
14913 ; AVX512-FCP-NEXT: vmovdqa64 64(%rdi), %zmm0
14914 ; AVX512-FCP-NEXT: vmovdqa64 %zmm5, %zmm13
14915 ; AVX512-FCP-NEXT: vpermt2d %zmm0, %zmm2, %zmm13
14916 ; AVX512-FCP-NEXT: vmovdqa64 %zmm5, %zmm14
14917 ; AVX512-FCP-NEXT: vpermt2d %zmm0, %zmm3, %zmm14
14918 ; AVX512-FCP-NEXT: vmovdqa64 %zmm0, %zmm15
14919 ; AVX512-FCP-NEXT: vpermt2d %zmm5, %zmm4, %zmm15
14920 ; AVX512-FCP-NEXT: vmovdqa64 %zmm0, %zmm16
14921 ; AVX512-FCP-NEXT: vpermt2d %zmm5, %zmm7, %zmm16
14922 ; AVX512-FCP-NEXT: vmovdqa64 %zmm5, %zmm1
14923 ; AVX512-FCP-NEXT: vpermt2d %zmm0, %zmm18, %zmm1
14924 ; AVX512-FCP-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14925 ; AVX512-FCP-NEXT: vmovdqa64 %zmm5, %zmm1
14926 ; AVX512-FCP-NEXT: vpermt2d %zmm0, %zmm19, %zmm1
14927 ; AVX512-FCP-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
14928 ; AVX512-FCP-NEXT: vpermt2d %zmm0, %zmm20, %zmm5
14929 ; AVX512-FCP-NEXT: vmovdqa64 960(%rdi), %zmm9
14930 ; AVX512-FCP-NEXT: vmovdqa64 896(%rdi), %zmm6
14931 ; AVX512-FCP-NEXT: vmovdqa64 %zmm6, %zmm8
14932 ; AVX512-FCP-NEXT: vpermt2d %zmm9, %zmm2, %zmm8
14933 ; AVX512-FCP-NEXT: vmovdqa64 1408(%rdi), %zmm0
14934 ; AVX512-FCP-NEXT: vmovdqa64 1344(%rdi), %zmm1
14935 ; AVX512-FCP-NEXT: vpermi2d %zmm0, %zmm1, %zmm2
14936 ; AVX512-FCP-NEXT: vmovdqa64 %zmm6, %zmm10
14937 ; AVX512-FCP-NEXT: vpermt2d %zmm9, %zmm3, %zmm10
14938 ; AVX512-FCP-NEXT: vpermi2d %zmm0, %zmm1, %zmm3
14939 ; AVX512-FCP-NEXT: vmovdqa64 %zmm9, %zmm11
14940 ; AVX512-FCP-NEXT: vpermt2d %zmm6, %zmm4, %zmm11
14941 ; AVX512-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm4
14942 ; AVX512-FCP-NEXT: vmovdqa64 %zmm9, %zmm12
14943 ; AVX512-FCP-NEXT: vpermt2d %zmm6, %zmm7, %zmm12
14944 ; AVX512-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm7
14945 ; AVX512-FCP-NEXT: vmovdqa64 %zmm6, %zmm21
14946 ; AVX512-FCP-NEXT: vpermt2d %zmm9, %zmm18, %zmm21
14947 ; AVX512-FCP-NEXT: vpermi2d %zmm0, %zmm1, %zmm18
14948 ; AVX512-FCP-NEXT: vmovdqa64 %zmm6, %zmm26
14949 ; AVX512-FCP-NEXT: vpermt2d %zmm9, %zmm19, %zmm26
14950 ; AVX512-FCP-NEXT: vpermi2d %zmm0, %zmm1, %zmm19
14951 ; AVX512-FCP-NEXT: vpermt2d %zmm0, %zmm20, %zmm1
14952 ; AVX512-FCP-NEXT: vpermt2d %zmm9, %zmm20, %zmm6
14953 ; AVX512-FCP-NEXT: movw $992, %ax # imm = 0x3E0
14954 ; AVX512-FCP-NEXT: kmovw %eax, %k1
14955 ; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
14956 ; AVX512-FCP-NEXT: vmovdqa32 %zmm0, %zmm13 {%k1}
14957 ; AVX512-FCP-NEXT: movb $-32, %al
14958 ; AVX512-FCP-NEXT: kmovw %eax, %k2
14959 ; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
14960 ; AVX512-FCP-NEXT: vmovdqa64 %zmm0, %zmm13 {%k2}
14961 ; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
14962 ; AVX512-FCP-NEXT: vmovdqa32 %zmm0, %zmm22 {%k1}
14963 ; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
14964 ; AVX512-FCP-NEXT: vmovdqa64 %zmm0, %zmm22 {%k2}
14965 ; AVX512-FCP-NEXT: vmovdqu64 (%rsp), %zmm0 # 64-byte Reload
14966 ; AVX512-FCP-NEXT: vmovdqa32 %zmm0, %zmm8 {%k1}
14967 ; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
14968 ; AVX512-FCP-NEXT: vmovdqa64 %zmm0, %zmm8 {%k2}
14969 ; AVX512-FCP-NEXT: vmovdqa32 %zmm25, %zmm2 {%k1}
14970 ; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
14971 ; AVX512-FCP-NEXT: vmovdqa64 %zmm0, %zmm2 {%k2}
14972 ; AVX512-FCP-NEXT: movw $480, %ax # imm = 0x1E0
14973 ; AVX512-FCP-NEXT: kmovw %eax, %k2
14974 ; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
14975 ; AVX512-FCP-NEXT: vmovdqa32 %zmm0, %zmm10 {%k2}
14976 ; AVX512-FCP-NEXT: movw $-512, %ax # imm = 0xFE00
14977 ; AVX512-FCP-NEXT: kmovw %eax, %k1
14978 ; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
14979 ; AVX512-FCP-NEXT: vmovdqa32 %zmm0, %zmm10 {%k1}
14980 ; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
14981 ; AVX512-FCP-NEXT: vmovdqa32 %zmm0, %zmm23 {%k2}
14982 ; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
14983 ; AVX512-FCP-NEXT: vmovdqa32 %zmm0, %zmm23 {%k1}
14984 ; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
14985 ; AVX512-FCP-NEXT: vmovdqa32 %zmm0, %zmm14 {%k2}
14986 ; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
14987 ; AVX512-FCP-NEXT: vmovdqa32 %zmm0, %zmm14 {%k1}
14988 ; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
14989 ; AVX512-FCP-NEXT: vmovdqa32 %zmm0, %zmm3 {%k2}
14990 ; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
14991 ; AVX512-FCP-NEXT: vmovdqa32 %zmm0, %zmm3 {%k1}
14992 ; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
14993 ; AVX512-FCP-NEXT: vmovdqa32 %zmm0, %zmm11 {%k2}
14994 ; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
14995 ; AVX512-FCP-NEXT: vmovdqa32 %zmm0, %zmm11 {%k1}
14996 ; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
14997 ; AVX512-FCP-NEXT: vmovdqa32 %zmm0, %zmm24 {%k2}
14998 ; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
14999 ; AVX512-FCP-NEXT: vmovdqa32 %zmm0, %zmm24 {%k1}
15000 ; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
15001 ; AVX512-FCP-NEXT: vmovdqa32 %zmm0, %zmm15 {%k2}
15002 ; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
15003 ; AVX512-FCP-NEXT: vmovdqa32 %zmm0, %zmm15 {%k1}
15004 ; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
15005 ; AVX512-FCP-NEXT: vmovdqa32 %zmm0, %zmm4 {%k2}
15006 ; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
15007 ; AVX512-FCP-NEXT: vmovdqa32 %zmm0, %zmm4 {%k1}
15008 ; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
15009 ; AVX512-FCP-NEXT: vmovdqa32 %zmm0, %zmm12 {%k2}
15010 ; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
15011 ; AVX512-FCP-NEXT: vmovdqa32 %zmm0, %zmm12 {%k1}
15012 ; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
15013 ; AVX512-FCP-NEXT: vmovdqa32 %zmm0, %zmm29 {%k2}
15014 ; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
15015 ; AVX512-FCP-NEXT: vmovdqa32 %zmm0, %zmm29 {%k1}
15016 ; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
15017 ; AVX512-FCP-NEXT: vmovdqa32 %zmm0, %zmm16 {%k2}
15018 ; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
15019 ; AVX512-FCP-NEXT: vmovdqa32 %zmm0, %zmm16 {%k1}
15020 ; AVX512-FCP-NEXT: vmovdqa32 %zmm27, %zmm7 {%k2}
15021 ; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
15022 ; AVX512-FCP-NEXT: vmovdqa32 %zmm0, %zmm7 {%k1}
15023 ; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
15024 ; AVX512-FCP-NEXT: vinserti32x4 $0, %xmm21, %zmm0, %zmm0
15025 ; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
15026 ; AVX512-FCP-NEXT: vmovdqa32 %zmm9, %zmm0 {%k1}
15027 ; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
15028 ; AVX512-FCP-NEXT: vinserti32x4 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm9, %zmm9 # 16-byte Folded Reload
15029 ; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
15030 ; AVX512-FCP-NEXT: vmovdqa32 %zmm20, %zmm9 {%k1}
15031 ; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
15032 ; AVX512-FCP-NEXT: vinserti32x4 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm20, %zmm20 # 16-byte Folded Reload
15033 ; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload
15034 ; AVX512-FCP-NEXT: vmovdqa32 %zmm25, %zmm20 {%k1}
15035 ; AVX512-FCP-NEXT: vinserti32x4 $0, %xmm18, %zmm28, %zmm18
15036 ; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload
15037 ; AVX512-FCP-NEXT: vmovdqa32 %zmm25, %zmm18 {%k1}
15038 ; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload
15039 ; AVX512-FCP-NEXT: vinserti32x4 $0, %xmm26, %zmm25, %zmm25
15040 ; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload
15041 ; AVX512-FCP-NEXT: vmovdqa32 %zmm26, %zmm25 {%k1}
15042 ; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload
15043 ; AVX512-FCP-NEXT: vinserti32x4 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm26, %zmm26 # 16-byte Folded Reload
15044 ; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload
15045 ; AVX512-FCP-NEXT: vmovdqa32 %zmm27, %zmm26 {%k1}
15046 ; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload
15047 ; AVX512-FCP-NEXT: vinserti32x4 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm27, %zmm27 # 16-byte Folded Reload
15048 ; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
15049 ; AVX512-FCP-NEXT: vmovdqa32 %zmm28, %zmm27 {%k1}
15050 ; AVX512-FCP-NEXT: vinserti32x4 $0, %xmm19, %zmm31, %zmm19
15051 ; AVX512-FCP-NEXT: vmovdqa32 %zmm30, %zmm19 {%k1}
15052 ; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
15053 ; AVX512-FCP-NEXT: vinserti32x4 $0, %xmm17, %zmm28, %zmm17
15054 ; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
15055 ; AVX512-FCP-NEXT: vmovdqa32 %zmm28, %zmm17 {%k1}
15056 ; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
15057 ; AVX512-FCP-NEXT: vinserti32x4 $0, %xmm5, %zmm28, %zmm5
15058 ; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
15059 ; AVX512-FCP-NEXT: vmovdqa32 %zmm28, %zmm5 {%k1}
15060 ; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
15061 ; AVX512-FCP-NEXT: vinserti32x4 $0, %xmm1, %zmm28, %zmm1
15062 ; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
15063 ; AVX512-FCP-NEXT: vmovdqa32 %zmm28, %zmm1 {%k1}
15064 ; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
15065 ; AVX512-FCP-NEXT: vinserti32x4 $0, %xmm6, %zmm28, %zmm6
15066 ; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
15067 ; AVX512-FCP-NEXT: vmovdqa32 %zmm28, %zmm6 {%k1}
15068 ; AVX512-FCP-NEXT: vmovdqa64 %zmm2, 192(%rsi)
15069 ; AVX512-FCP-NEXT: vmovdqa64 %zmm8, 128(%rsi)
15070 ; AVX512-FCP-NEXT: vmovdqa64 %zmm22, 64(%rsi)
15071 ; AVX512-FCP-NEXT: vmovdqa64 %zmm13, (%rsi)
15072 ; AVX512-FCP-NEXT: vmovdqa64 %zmm3, 192(%rdx)
15073 ; AVX512-FCP-NEXT: vmovdqa64 %zmm14, (%rdx)
15074 ; AVX512-FCP-NEXT: vmovdqa64 %zmm23, 64(%rdx)
15075 ; AVX512-FCP-NEXT: vmovdqa64 %zmm10, 128(%rdx)
15076 ; AVX512-FCP-NEXT: vmovdqa64 %zmm4, 192(%rcx)
15077 ; AVX512-FCP-NEXT: vmovdqa64 %zmm15, (%rcx)
15078 ; AVX512-FCP-NEXT: vmovdqa64 %zmm24, 64(%rcx)
15079 ; AVX512-FCP-NEXT: vmovdqa64 %zmm11, 128(%rcx)
15080 ; AVX512-FCP-NEXT: vmovdqa64 %zmm7, 192(%r8)
15081 ; AVX512-FCP-NEXT: vmovdqa64 %zmm16, (%r8)
15082 ; AVX512-FCP-NEXT: vmovdqa64 %zmm29, 64(%r8)
15083 ; AVX512-FCP-NEXT: vmovdqa64 %zmm12, 128(%r8)
15084 ; AVX512-FCP-NEXT: vmovdqa64 %zmm18, 192(%r9)
15085 ; AVX512-FCP-NEXT: vmovdqa64 %zmm20, (%r9)
15086 ; AVX512-FCP-NEXT: vmovdqa64 %zmm9, 64(%r9)
15087 ; AVX512-FCP-NEXT: vmovdqa64 %zmm0, 128(%r9)
15088 ; AVX512-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
15089 ; AVX512-FCP-NEXT: vmovdqa64 %zmm19, 192(%rax)
15090 ; AVX512-FCP-NEXT: vmovdqa64 %zmm27, (%rax)
15091 ; AVX512-FCP-NEXT: vmovdqa64 %zmm26, 64(%rax)
15092 ; AVX512-FCP-NEXT: vmovdqa64 %zmm25, 128(%rax)
15093 ; AVX512-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
15094 ; AVX512-FCP-NEXT: vmovdqa64 %zmm6, 128(%rax)
15095 ; AVX512-FCP-NEXT: vmovdqa64 %zmm1, 192(%rax)
15096 ; AVX512-FCP-NEXT: vmovdqa64 %zmm5, (%rax)
15097 ; AVX512-FCP-NEXT: vmovdqa64 %zmm17, 64(%rax)
15098 ; AVX512-FCP-NEXT: addq $3400, %rsp # imm = 0xD48
15099 ; AVX512-FCP-NEXT: vzeroupper
15100 ; AVX512-FCP-NEXT: retq
15102 ; AVX512DQ-LABEL: load_i32_stride7_vf64:
15103 ; AVX512DQ: # %bb.0:
15104 ; AVX512DQ-NEXT: subq $3400, %rsp # imm = 0xD48
15105 ; AVX512DQ-NEXT: vmovdqa64 1728(%rdi), %zmm2
15106 ; AVX512DQ-NEXT: vmovdqa64 1664(%rdi), %zmm17
15107 ; AVX512DQ-NEXT: vmovdqa64 1600(%rdi), %zmm11
15108 ; AVX512DQ-NEXT: vmovdqa64 1280(%rdi), %zmm7
15109 ; AVX512DQ-NEXT: vmovdqa64 1216(%rdi), %zmm5
15110 ; AVX512DQ-NEXT: vmovdqa64 1152(%rdi), %zmm12
15111 ; AVX512DQ-NEXT: vmovdqa64 832(%rdi), %zmm6
15112 ; AVX512DQ-NEXT: vmovdqa64 768(%rdi), %zmm8
15113 ; AVX512DQ-NEXT: vmovdqa64 704(%rdi), %zmm13
15114 ; AVX512DQ-NEXT: vmovdqa64 384(%rdi), %zmm20
15115 ; AVX512DQ-NEXT: vmovdqa64 320(%rdi), %zmm4
15116 ; AVX512DQ-NEXT: vmovdqa64 256(%rdi), %zmm14
15117 ; AVX512DQ-NEXT: vbroadcasti32x4 {{.*#+}} zmm1 = [20,27,6,13,20,27,6,13,20,27,6,13,20,27,6,13]
15118 ; AVX512DQ-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
15119 ; AVX512DQ-NEXT: vmovdqa64 %zmm14, %zmm3
15120 ; AVX512DQ-NEXT: vpermt2d %zmm4, %zmm1, %zmm3
15121 ; AVX512DQ-NEXT: vbroadcasti64x4 {{.*#+}} zmm0 = [0,0,10,11,12,13,18,25,0,0,10,11,12,13,18,25]
15122 ; AVX512DQ-NEXT: # zmm0 = mem[0,1,2,3,0,1,2,3]
15123 ; AVX512DQ-NEXT: vpermt2d %zmm20, %zmm0, %zmm3
15124 ; AVX512DQ-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15125 ; AVX512DQ-NEXT: vmovdqa64 %zmm13, %zmm3
15126 ; AVX512DQ-NEXT: vpermt2d %zmm8, %zmm1, %zmm3
15127 ; AVX512DQ-NEXT: vpermt2d %zmm6, %zmm0, %zmm3
15128 ; AVX512DQ-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15129 ; AVX512DQ-NEXT: vmovdqa64 %zmm12, %zmm3
15130 ; AVX512DQ-NEXT: vpermt2d %zmm5, %zmm1, %zmm3
15131 ; AVX512DQ-NEXT: vpermt2d %zmm7, %zmm0, %zmm3
15132 ; AVX512DQ-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15133 ; AVX512DQ-NEXT: vpermi2d %zmm17, %zmm11, %zmm1
15134 ; AVX512DQ-NEXT: vpermt2d %zmm2, %zmm0, %zmm1
15135 ; AVX512DQ-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15136 ; AVX512DQ-NEXT: vbroadcasti64x4 {{.*#+}} zmm1 = [0,0,7,14,21,28,0,0,0,0,7,14,21,28,0,0]
15137 ; AVX512DQ-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3]
15138 ; AVX512DQ-NEXT: vmovdqa64 %zmm12, %zmm3
15139 ; AVX512DQ-NEXT: vpermt2d %zmm5, %zmm1, %zmm3
15140 ; AVX512DQ-NEXT: vbroadcasti64x4 {{.*#+}} zmm0 = [0,9,10,11,12,13,19,26,0,9,10,11,12,13,19,26]
15141 ; AVX512DQ-NEXT: # zmm0 = mem[0,1,2,3,0,1,2,3]
15142 ; AVX512DQ-NEXT: vpermt2d %zmm7, %zmm0, %zmm3
15143 ; AVX512DQ-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15144 ; AVX512DQ-NEXT: vmovdqa64 %zmm13, %zmm3
15145 ; AVX512DQ-NEXT: vpermt2d %zmm8, %zmm1, %zmm3
15146 ; AVX512DQ-NEXT: vpermt2d %zmm6, %zmm0, %zmm3
15147 ; AVX512DQ-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15148 ; AVX512DQ-NEXT: vmovdqa64 %zmm14, %zmm3
15149 ; AVX512DQ-NEXT: vpermt2d %zmm4, %zmm1, %zmm3
15150 ; AVX512DQ-NEXT: vpermt2d %zmm20, %zmm0, %zmm3
15151 ; AVX512DQ-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15152 ; AVX512DQ-NEXT: vpermi2d %zmm17, %zmm11, %zmm1
15153 ; AVX512DQ-NEXT: vpermt2d %zmm2, %zmm0, %zmm1
15154 ; AVX512DQ-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15155 ; AVX512DQ-NEXT: vbroadcasti64x4 {{.*#+}} zmm1 = [0,1,8,15,22,29,0,0,0,1,8,15,22,29,0,0]
15156 ; AVX512DQ-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3]
15157 ; AVX512DQ-NEXT: vmovdqa64 %zmm12, %zmm3
15158 ; AVX512DQ-NEXT: vpermt2d %zmm5, %zmm1, %zmm3
15159 ; AVX512DQ-NEXT: vbroadcasti64x4 {{.*#+}} zmm0 = [0,9,10,11,12,13,20,27,0,9,10,11,12,13,20,27]
15160 ; AVX512DQ-NEXT: # zmm0 = mem[0,1,2,3,0,1,2,3]
15161 ; AVX512DQ-NEXT: vpermt2d %zmm7, %zmm0, %zmm3
15162 ; AVX512DQ-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15163 ; AVX512DQ-NEXT: vmovdqa64 %zmm13, %zmm3
15164 ; AVX512DQ-NEXT: vpermt2d %zmm8, %zmm1, %zmm3
15165 ; AVX512DQ-NEXT: vpermt2d %zmm6, %zmm0, %zmm3
15166 ; AVX512DQ-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15167 ; AVX512DQ-NEXT: vmovdqa64 %zmm14, %zmm3
15168 ; AVX512DQ-NEXT: vpermt2d %zmm4, %zmm1, %zmm3
15169 ; AVX512DQ-NEXT: vpermt2d %zmm20, %zmm0, %zmm3
15170 ; AVX512DQ-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15171 ; AVX512DQ-NEXT: vpermi2d %zmm17, %zmm11, %zmm1
15172 ; AVX512DQ-NEXT: vpermt2d %zmm2, %zmm0, %zmm1
15173 ; AVX512DQ-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15174 ; AVX512DQ-NEXT: vbroadcasti64x4 {{.*#+}} zmm1 = [0,18,25,0,7,14,0,0,0,18,25,0,7,14,0,0]
15175 ; AVX512DQ-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3]
15176 ; AVX512DQ-NEXT: vmovdqa64 %zmm5, %zmm3
15177 ; AVX512DQ-NEXT: vpermt2d %zmm12, %zmm1, %zmm3
15178 ; AVX512DQ-NEXT: vbroadcasti64x4 {{.*#+}} zmm0 = [0,9,10,11,12,13,21,28,0,9,10,11,12,13,21,28]
15179 ; AVX512DQ-NEXT: # zmm0 = mem[0,1,2,3,0,1,2,3]
15180 ; AVX512DQ-NEXT: vpermt2d %zmm7, %zmm0, %zmm3
15181 ; AVX512DQ-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15182 ; AVX512DQ-NEXT: vmovdqa64 %zmm8, %zmm3
15183 ; AVX512DQ-NEXT: vpermt2d %zmm13, %zmm1, %zmm3
15184 ; AVX512DQ-NEXT: vpermt2d %zmm6, %zmm0, %zmm3
15185 ; AVX512DQ-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15186 ; AVX512DQ-NEXT: vmovdqa64 %zmm4, %zmm3
15187 ; AVX512DQ-NEXT: vpermt2d %zmm14, %zmm1, %zmm3
15188 ; AVX512DQ-NEXT: vpermt2d %zmm20, %zmm0, %zmm3
15189 ; AVX512DQ-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15190 ; AVX512DQ-NEXT: vpermi2d %zmm11, %zmm17, %zmm1
15191 ; AVX512DQ-NEXT: vpermt2d %zmm2, %zmm0, %zmm1
15192 ; AVX512DQ-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15193 ; AVX512DQ-NEXT: vbroadcasti64x4 {{.*#+}} zmm1 = [0,19,26,1,8,15,0,0,0,19,26,1,8,15,0,0]
15194 ; AVX512DQ-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3]
15195 ; AVX512DQ-NEXT: vmovdqa64 %zmm5, %zmm3
15196 ; AVX512DQ-NEXT: vpermt2d %zmm12, %zmm1, %zmm3
15197 ; AVX512DQ-NEXT: vbroadcasti64x4 {{.*#+}} zmm0 = [0,9,10,11,12,13,22,29,0,9,10,11,12,13,22,29]
15198 ; AVX512DQ-NEXT: # zmm0 = mem[0,1,2,3,0,1,2,3]
15199 ; AVX512DQ-NEXT: vpermt2d %zmm7, %zmm0, %zmm3
15200 ; AVX512DQ-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15201 ; AVX512DQ-NEXT: vmovdqa64 %zmm8, %zmm3
15202 ; AVX512DQ-NEXT: vpermt2d %zmm13, %zmm1, %zmm3
15203 ; AVX512DQ-NEXT: vpermt2d %zmm6, %zmm0, %zmm3
15204 ; AVX512DQ-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15205 ; AVX512DQ-NEXT: vmovdqa64 %zmm4, %zmm3
15206 ; AVX512DQ-NEXT: vpermt2d %zmm14, %zmm1, %zmm3
15207 ; AVX512DQ-NEXT: vpermt2d %zmm20, %zmm0, %zmm3
15208 ; AVX512DQ-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15209 ; AVX512DQ-NEXT: vpermi2d %zmm11, %zmm17, %zmm1
15210 ; AVX512DQ-NEXT: vpermt2d %zmm2, %zmm0, %zmm1
15211 ; AVX512DQ-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15212 ; AVX512DQ-NEXT: vmovdqa64 1024(%rdi), %zmm3
15213 ; AVX512DQ-NEXT: vmovdqa64 1088(%rdi), %zmm15
15214 ; AVX512DQ-NEXT: vbroadcasti32x4 {{.*#+}} zmm30 = [25,4,11,18,25,4,11,18,25,4,11,18,25,4,11,18]
15215 ; AVX512DQ-NEXT: # zmm30 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
15216 ; AVX512DQ-NEXT: vmovdqa64 %zmm3, %zmm0
15217 ; AVX512DQ-NEXT: vpermt2d %zmm15, %zmm30, %zmm0
15218 ; AVX512DQ-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15219 ; AVX512DQ-NEXT: vmovdqa64 576(%rdi), %zmm9
15220 ; AVX512DQ-NEXT: vmovdqa64 640(%rdi), %zmm16
15221 ; AVX512DQ-NEXT: vmovdqa64 %zmm9, %zmm0
15222 ; AVX512DQ-NEXT: vpermt2d %zmm16, %zmm30, %zmm0
15223 ; AVX512DQ-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15224 ; AVX512DQ-NEXT: vmovdqa64 128(%rdi), %zmm0
15225 ; AVX512DQ-NEXT: vmovdqa64 192(%rdi), %zmm18
15226 ; AVX512DQ-NEXT: vmovdqa64 %zmm0, %zmm1
15227 ; AVX512DQ-NEXT: vpermt2d %zmm18, %zmm30, %zmm1
15228 ; AVX512DQ-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15229 ; AVX512DQ-NEXT: vmovdqa64 1472(%rdi), %zmm1
15230 ; AVX512DQ-NEXT: vmovdqa64 1536(%rdi), %zmm19
15231 ; AVX512DQ-NEXT: vmovdqa64 %zmm1, %zmm10
15232 ; AVX512DQ-NEXT: vpermt2d %zmm19, %zmm30, %zmm10
15233 ; AVX512DQ-NEXT: vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15234 ; AVX512DQ-NEXT: vmovdqa64 %zmm12, %zmm21
15235 ; AVX512DQ-NEXT: vpermt2d %zmm5, %zmm30, %zmm21
15236 ; AVX512DQ-NEXT: vbroadcasti64x4 {{.*#+}} zmm10 = [0,9,10,11,12,16,23,30,0,9,10,11,12,16,23,30]
15237 ; AVX512DQ-NEXT: # zmm10 = mem[0,1,2,3,0,1,2,3]
15238 ; AVX512DQ-NEXT: vpermt2d %zmm7, %zmm10, %zmm21
15239 ; AVX512DQ-NEXT: vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15240 ; AVX512DQ-NEXT: vmovdqa64 %zmm13, %zmm21
15241 ; AVX512DQ-NEXT: vpermt2d %zmm8, %zmm30, %zmm21
15242 ; AVX512DQ-NEXT: vpermt2d %zmm6, %zmm10, %zmm21
15243 ; AVX512DQ-NEXT: vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15244 ; AVX512DQ-NEXT: vmovdqa64 %zmm14, %zmm21
15245 ; AVX512DQ-NEXT: vpermt2d %zmm4, %zmm30, %zmm21
15246 ; AVX512DQ-NEXT: vpermt2d %zmm20, %zmm10, %zmm21
15247 ; AVX512DQ-NEXT: vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15248 ; AVX512DQ-NEXT: vpermi2d %zmm17, %zmm11, %zmm30
15249 ; AVX512DQ-NEXT: vpermt2d %zmm2, %zmm10, %zmm30
15250 ; AVX512DQ-NEXT: vbroadcasti32x4 {{.*#+}} zmm10 = [26,5,12,19,26,5,12,19,26,5,12,19,26,5,12,19]
15251 ; AVX512DQ-NEXT: # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
15252 ; AVX512DQ-NEXT: vpermt2d %zmm8, %zmm10, %zmm13
15253 ; AVX512DQ-NEXT: vbroadcasti64x4 {{.*#+}} zmm8 = [0,9,10,11,12,17,24,31,0,9,10,11,12,17,24,31]
15254 ; AVX512DQ-NEXT: # zmm8 = mem[0,1,2,3,0,1,2,3]
15255 ; AVX512DQ-NEXT: vpermt2d %zmm6, %zmm8, %zmm13
15256 ; AVX512DQ-NEXT: vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15257 ; AVX512DQ-NEXT: vpermt2d %zmm4, %zmm10, %zmm14
15258 ; AVX512DQ-NEXT: vpermt2d %zmm20, %zmm8, %zmm14
15259 ; AVX512DQ-NEXT: vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15260 ; AVX512DQ-NEXT: vpermt2d %zmm17, %zmm10, %zmm11
15261 ; AVX512DQ-NEXT: vpermt2d %zmm2, %zmm8, %zmm11
15262 ; AVX512DQ-NEXT: vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15263 ; AVX512DQ-NEXT: vpermt2d %zmm5, %zmm10, %zmm12
15264 ; AVX512DQ-NEXT: vpermt2d %zmm7, %zmm8, %zmm12
15265 ; AVX512DQ-NEXT: vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15266 ; AVX512DQ-NEXT: vmovdqa64 %zmm3, %zmm2
15267 ; AVX512DQ-NEXT: vpermt2d %zmm15, %zmm10, %zmm2
15268 ; AVX512DQ-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15269 ; AVX512DQ-NEXT: vmovdqa64 %zmm9, %zmm2
15270 ; AVX512DQ-NEXT: vpermt2d %zmm16, %zmm10, %zmm2
15271 ; AVX512DQ-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15272 ; AVX512DQ-NEXT: vmovdqa64 %zmm0, %zmm2
15273 ; AVX512DQ-NEXT: vpermt2d %zmm18, %zmm10, %zmm2
15274 ; AVX512DQ-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15275 ; AVX512DQ-NEXT: vmovdqa64 %zmm1, %zmm2
15276 ; AVX512DQ-NEXT: vpermt2d %zmm19, %zmm10, %zmm2
15277 ; AVX512DQ-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15278 ; AVX512DQ-NEXT: vbroadcasti64x4 {{.*#+}} zmm25 = [8,15,0,0,0,19,26,1,8,15,0,0,0,19,26,1]
15279 ; AVX512DQ-NEXT: # zmm25 = mem[0,1,2,3,0,1,2,3]
15280 ; AVX512DQ-NEXT: vmovdqa64 %zmm16, %zmm2
15281 ; AVX512DQ-NEXT: vpermt2d %zmm9, %zmm25, %zmm2
15282 ; AVX512DQ-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15283 ; AVX512DQ-NEXT: vbroadcasti32x4 {{.*#+}} zmm27 = [27,6,13,20,27,6,13,20,27,6,13,20,27,6,13,20]
15284 ; AVX512DQ-NEXT: # zmm27 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
15285 ; AVX512DQ-NEXT: vmovdqa64 %zmm9, %zmm2
15286 ; AVX512DQ-NEXT: vpermt2d %zmm16, %zmm27, %zmm2
15287 ; AVX512DQ-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15288 ; AVX512DQ-NEXT: vbroadcasti64x4 {{.*#+}} zmm28 = [28,0,0,0,0,7,14,21,28,0,0,0,0,7,14,21]
15289 ; AVX512DQ-NEXT: # zmm28 = mem[0,1,2,3,0,1,2,3]
15290 ; AVX512DQ-NEXT: vmovdqa64 %zmm9, %zmm2
15291 ; AVX512DQ-NEXT: vpermt2d %zmm16, %zmm28, %zmm2
15292 ; AVX512DQ-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15293 ; AVX512DQ-NEXT: vbroadcasti64x4 {{.*#+}} zmm31 = [29,0,0,0,1,8,15,22,29,0,0,0,1,8,15,22]
15294 ; AVX512DQ-NEXT: # zmm31 = mem[0,1,2,3,0,1,2,3]
15295 ; AVX512DQ-NEXT: vmovdqa64 %zmm9, %zmm2
15296 ; AVX512DQ-NEXT: vpermt2d %zmm16, %zmm31, %zmm2
15297 ; AVX512DQ-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15298 ; AVX512DQ-NEXT: vbroadcasti64x4 {{.*#+}} zmm2 = [14,0,0,0,18,25,0,7,14,0,0,0,18,25,0,7]
15299 ; AVX512DQ-NEXT: # zmm2 = mem[0,1,2,3,0,1,2,3]
15300 ; AVX512DQ-NEXT: vpermt2d %zmm9, %zmm2, %zmm16
15301 ; AVX512DQ-NEXT: vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15302 ; AVX512DQ-NEXT: vmovdqa64 %zmm18, %zmm4
15303 ; AVX512DQ-NEXT: vpermt2d %zmm0, %zmm25, %zmm4
15304 ; AVX512DQ-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15305 ; AVX512DQ-NEXT: vmovdqa64 %zmm0, %zmm4
15306 ; AVX512DQ-NEXT: vpermt2d %zmm18, %zmm27, %zmm4
15307 ; AVX512DQ-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15308 ; AVX512DQ-NEXT: vmovdqa64 %zmm0, %zmm4
15309 ; AVX512DQ-NEXT: vpermt2d %zmm18, %zmm28, %zmm4
15310 ; AVX512DQ-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15311 ; AVX512DQ-NEXT: vmovdqa64 %zmm0, %zmm4
15312 ; AVX512DQ-NEXT: vpermt2d %zmm18, %zmm31, %zmm4
15313 ; AVX512DQ-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15314 ; AVX512DQ-NEXT: vpermt2d %zmm0, %zmm2, %zmm18
15315 ; AVX512DQ-NEXT: vmovdqu64 %zmm18, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15316 ; AVX512DQ-NEXT: vmovdqa64 %zmm15, %zmm0
15317 ; AVX512DQ-NEXT: vpermt2d %zmm3, %zmm25, %zmm0
15318 ; AVX512DQ-NEXT: vmovdqu64 %zmm0, (%rsp) # 64-byte Spill
15319 ; AVX512DQ-NEXT: vpermi2d %zmm1, %zmm19, %zmm25
15320 ; AVX512DQ-NEXT: vmovdqa64 %zmm3, %zmm0
15321 ; AVX512DQ-NEXT: vpermt2d %zmm15, %zmm27, %zmm0
15322 ; AVX512DQ-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15323 ; AVX512DQ-NEXT: vpermi2d %zmm19, %zmm1, %zmm27
15324 ; AVX512DQ-NEXT: vmovdqa64 %zmm3, %zmm0
15325 ; AVX512DQ-NEXT: vpermt2d %zmm15, %zmm28, %zmm0
15326 ; AVX512DQ-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15327 ; AVX512DQ-NEXT: vpermi2d %zmm19, %zmm1, %zmm28
15328 ; AVX512DQ-NEXT: vmovdqa64 %zmm3, %zmm0
15329 ; AVX512DQ-NEXT: vpermt2d %zmm15, %zmm31, %zmm0
15330 ; AVX512DQ-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15331 ; AVX512DQ-NEXT: vpermi2d %zmm19, %zmm1, %zmm31
15332 ; AVX512DQ-NEXT: vpermt2d %zmm1, %zmm2, %zmm19
15333 ; AVX512DQ-NEXT: vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15334 ; AVX512DQ-NEXT: vpermt2d %zmm3, %zmm2, %zmm15
15335 ; AVX512DQ-NEXT: vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15336 ; AVX512DQ-NEXT: vmovdqa64 512(%rdi), %zmm0
15337 ; AVX512DQ-NEXT: vmovdqa64 448(%rdi), %zmm17
15338 ; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} ymm2 = [0,7,14,21,28,0,0,0]
15339 ; AVX512DQ-NEXT: vmovdqa64 %zmm17, %zmm22
15340 ; AVX512DQ-NEXT: vpermt2d %zmm0, %zmm2, %zmm22
15341 ; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} ymm3 = [1,8,15,22,29,0,0,0]
15342 ; AVX512DQ-NEXT: vmovdqa64 %zmm17, %zmm23
15343 ; AVX512DQ-NEXT: vpermt2d %zmm0, %zmm3, %zmm23
15344 ; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} ymm4 = [18,25,0,7,14,0,0,0]
15345 ; AVX512DQ-NEXT: vmovdqa64 %zmm0, %zmm24
15346 ; AVX512DQ-NEXT: vpermt2d %zmm17, %zmm4, %zmm24
15347 ; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} ymm7 = [19,26,1,8,15,0,0,0]
15348 ; AVX512DQ-NEXT: vmovdqa64 %zmm0, %zmm29
15349 ; AVX512DQ-NEXT: vpermt2d %zmm17, %zmm7, %zmm29
15350 ; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} xmm18 = [4,11,18,25]
15351 ; AVX512DQ-NEXT: vmovdqa64 %zmm17, %zmm1
15352 ; AVX512DQ-NEXT: vpermt2d %zmm0, %zmm18, %zmm1
15353 ; AVX512DQ-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15354 ; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} xmm19 = [5,12,19,26]
15355 ; AVX512DQ-NEXT: vmovdqa64 %zmm17, %zmm1
15356 ; AVX512DQ-NEXT: vpermt2d %zmm0, %zmm19, %zmm1
15357 ; AVX512DQ-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15358 ; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} xmm20 = [6,13,20,27]
15359 ; AVX512DQ-NEXT: vpermt2d %zmm0, %zmm20, %zmm17
15360 ; AVX512DQ-NEXT: vmovdqa64 (%rdi), %zmm5
15361 ; AVX512DQ-NEXT: vmovdqa64 64(%rdi), %zmm0
15362 ; AVX512DQ-NEXT: vmovdqa64 %zmm5, %zmm13
15363 ; AVX512DQ-NEXT: vpermt2d %zmm0, %zmm2, %zmm13
15364 ; AVX512DQ-NEXT: vmovdqa64 %zmm5, %zmm14
15365 ; AVX512DQ-NEXT: vpermt2d %zmm0, %zmm3, %zmm14
15366 ; AVX512DQ-NEXT: vmovdqa64 %zmm0, %zmm15
15367 ; AVX512DQ-NEXT: vpermt2d %zmm5, %zmm4, %zmm15
15368 ; AVX512DQ-NEXT: vmovdqa64 %zmm0, %zmm16
15369 ; AVX512DQ-NEXT: vpermt2d %zmm5, %zmm7, %zmm16
15370 ; AVX512DQ-NEXT: vmovdqa64 %zmm5, %zmm1
15371 ; AVX512DQ-NEXT: vpermt2d %zmm0, %zmm18, %zmm1
15372 ; AVX512DQ-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15373 ; AVX512DQ-NEXT: vmovdqa64 %zmm5, %zmm1
15374 ; AVX512DQ-NEXT: vpermt2d %zmm0, %zmm19, %zmm1
15375 ; AVX512DQ-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15376 ; AVX512DQ-NEXT: vpermt2d %zmm0, %zmm20, %zmm5
15377 ; AVX512DQ-NEXT: vmovdqa64 960(%rdi), %zmm9
15378 ; AVX512DQ-NEXT: vmovdqa64 896(%rdi), %zmm6
15379 ; AVX512DQ-NEXT: vmovdqa64 %zmm6, %zmm8
15380 ; AVX512DQ-NEXT: vpermt2d %zmm9, %zmm2, %zmm8
15381 ; AVX512DQ-NEXT: vmovdqa64 1408(%rdi), %zmm0
15382 ; AVX512DQ-NEXT: vmovdqa64 1344(%rdi), %zmm1
15383 ; AVX512DQ-NEXT: vpermi2d %zmm0, %zmm1, %zmm2
15384 ; AVX512DQ-NEXT: vmovdqa64 %zmm6, %zmm10
15385 ; AVX512DQ-NEXT: vpermt2d %zmm9, %zmm3, %zmm10
15386 ; AVX512DQ-NEXT: vpermi2d %zmm0, %zmm1, %zmm3
15387 ; AVX512DQ-NEXT: vmovdqa64 %zmm9, %zmm11
15388 ; AVX512DQ-NEXT: vpermt2d %zmm6, %zmm4, %zmm11
15389 ; AVX512DQ-NEXT: vpermi2d %zmm1, %zmm0, %zmm4
15390 ; AVX512DQ-NEXT: vmovdqa64 %zmm9, %zmm12
15391 ; AVX512DQ-NEXT: vpermt2d %zmm6, %zmm7, %zmm12
15392 ; AVX512DQ-NEXT: vpermi2d %zmm1, %zmm0, %zmm7
15393 ; AVX512DQ-NEXT: vmovdqa64 %zmm6, %zmm21
15394 ; AVX512DQ-NEXT: vpermt2d %zmm9, %zmm18, %zmm21
15395 ; AVX512DQ-NEXT: vpermi2d %zmm0, %zmm1, %zmm18
15396 ; AVX512DQ-NEXT: vmovdqa64 %zmm6, %zmm26
15397 ; AVX512DQ-NEXT: vpermt2d %zmm9, %zmm19, %zmm26
15398 ; AVX512DQ-NEXT: vpermi2d %zmm0, %zmm1, %zmm19
15399 ; AVX512DQ-NEXT: vpermt2d %zmm0, %zmm20, %zmm1
15400 ; AVX512DQ-NEXT: vpermt2d %zmm9, %zmm20, %zmm6
15401 ; AVX512DQ-NEXT: movw $992, %ax # imm = 0x3E0
15402 ; AVX512DQ-NEXT: kmovw %eax, %k1
15403 ; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
15404 ; AVX512DQ-NEXT: vmovdqa32 %zmm0, %zmm13 {%k1}
15405 ; AVX512DQ-NEXT: movb $-32, %al
15406 ; AVX512DQ-NEXT: kmovw %eax, %k2
15407 ; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
15408 ; AVX512DQ-NEXT: vmovdqa64 %zmm0, %zmm13 {%k2}
15409 ; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
15410 ; AVX512DQ-NEXT: vmovdqa32 %zmm0, %zmm22 {%k1}
15411 ; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
15412 ; AVX512DQ-NEXT: vmovdqa64 %zmm0, %zmm22 {%k2}
15413 ; AVX512DQ-NEXT: vmovdqu64 (%rsp), %zmm0 # 64-byte Reload
15414 ; AVX512DQ-NEXT: vmovdqa32 %zmm0, %zmm8 {%k1}
15415 ; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
15416 ; AVX512DQ-NEXT: vmovdqa64 %zmm0, %zmm8 {%k2}
15417 ; AVX512DQ-NEXT: vmovdqa32 %zmm25, %zmm2 {%k1}
15418 ; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
15419 ; AVX512DQ-NEXT: vmovdqa64 %zmm0, %zmm2 {%k2}
15420 ; AVX512DQ-NEXT: movw $480, %ax # imm = 0x1E0
15421 ; AVX512DQ-NEXT: kmovw %eax, %k2
15422 ; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
15423 ; AVX512DQ-NEXT: vmovdqa32 %zmm0, %zmm10 {%k2}
15424 ; AVX512DQ-NEXT: movw $-512, %ax # imm = 0xFE00
15425 ; AVX512DQ-NEXT: kmovw %eax, %k1
15426 ; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
15427 ; AVX512DQ-NEXT: vmovdqa32 %zmm0, %zmm10 {%k1}
15428 ; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
15429 ; AVX512DQ-NEXT: vmovdqa32 %zmm0, %zmm23 {%k2}
15430 ; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
15431 ; AVX512DQ-NEXT: vmovdqa32 %zmm0, %zmm23 {%k1}
15432 ; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
15433 ; AVX512DQ-NEXT: vmovdqa32 %zmm0, %zmm14 {%k2}
15434 ; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
15435 ; AVX512DQ-NEXT: vmovdqa32 %zmm0, %zmm14 {%k1}
15436 ; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
15437 ; AVX512DQ-NEXT: vmovdqa32 %zmm0, %zmm3 {%k2}
15438 ; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
15439 ; AVX512DQ-NEXT: vmovdqa32 %zmm0, %zmm3 {%k1}
15440 ; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
15441 ; AVX512DQ-NEXT: vmovdqa32 %zmm0, %zmm11 {%k2}
15442 ; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
15443 ; AVX512DQ-NEXT: vmovdqa32 %zmm0, %zmm11 {%k1}
15444 ; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
15445 ; AVX512DQ-NEXT: vmovdqa32 %zmm0, %zmm24 {%k2}
15446 ; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
15447 ; AVX512DQ-NEXT: vmovdqa32 %zmm0, %zmm24 {%k1}
15448 ; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
15449 ; AVX512DQ-NEXT: vmovdqa32 %zmm0, %zmm15 {%k2}
15450 ; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
15451 ; AVX512DQ-NEXT: vmovdqa32 %zmm0, %zmm15 {%k1}
15452 ; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
15453 ; AVX512DQ-NEXT: vmovdqa32 %zmm0, %zmm4 {%k2}
15454 ; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
15455 ; AVX512DQ-NEXT: vmovdqa32 %zmm0, %zmm4 {%k1}
15456 ; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
15457 ; AVX512DQ-NEXT: vmovdqa32 %zmm0, %zmm12 {%k2}
15458 ; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
15459 ; AVX512DQ-NEXT: vmovdqa32 %zmm0, %zmm12 {%k1}
15460 ; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
15461 ; AVX512DQ-NEXT: vmovdqa32 %zmm0, %zmm29 {%k2}
15462 ; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
15463 ; AVX512DQ-NEXT: vmovdqa32 %zmm0, %zmm29 {%k1}
15464 ; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
15465 ; AVX512DQ-NEXT: vmovdqa32 %zmm0, %zmm16 {%k2}
15466 ; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
15467 ; AVX512DQ-NEXT: vmovdqa32 %zmm0, %zmm16 {%k1}
15468 ; AVX512DQ-NEXT: vmovdqa32 %zmm27, %zmm7 {%k2}
15469 ; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
15470 ; AVX512DQ-NEXT: vmovdqa32 %zmm0, %zmm7 {%k1}
15471 ; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
15472 ; AVX512DQ-NEXT: vinserti32x4 $0, %xmm21, %zmm0, %zmm0
15473 ; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
15474 ; AVX512DQ-NEXT: vmovdqa32 %zmm9, %zmm0 {%k1}
15475 ; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
15476 ; AVX512DQ-NEXT: vinserti32x4 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm9, %zmm9 # 16-byte Folded Reload
15477 ; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
15478 ; AVX512DQ-NEXT: vmovdqa32 %zmm20, %zmm9 {%k1}
15479 ; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
15480 ; AVX512DQ-NEXT: vinserti32x4 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm20, %zmm20 # 16-byte Folded Reload
15481 ; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload
15482 ; AVX512DQ-NEXT: vmovdqa32 %zmm25, %zmm20 {%k1}
15483 ; AVX512DQ-NEXT: vinserti32x4 $0, %xmm18, %zmm28, %zmm18
15484 ; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload
15485 ; AVX512DQ-NEXT: vmovdqa32 %zmm25, %zmm18 {%k1}
15486 ; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload
15487 ; AVX512DQ-NEXT: vinserti32x4 $0, %xmm26, %zmm25, %zmm25
15488 ; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload
15489 ; AVX512DQ-NEXT: vmovdqa32 %zmm26, %zmm25 {%k1}
15490 ; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload
15491 ; AVX512DQ-NEXT: vinserti32x4 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm26, %zmm26 # 16-byte Folded Reload
15492 ; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload
15493 ; AVX512DQ-NEXT: vmovdqa32 %zmm27, %zmm26 {%k1}
15494 ; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload
15495 ; AVX512DQ-NEXT: vinserti32x4 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm27, %zmm27 # 16-byte Folded Reload
15496 ; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
15497 ; AVX512DQ-NEXT: vmovdqa32 %zmm28, %zmm27 {%k1}
15498 ; AVX512DQ-NEXT: vinserti32x4 $0, %xmm19, %zmm31, %zmm19
15499 ; AVX512DQ-NEXT: vmovdqa32 %zmm30, %zmm19 {%k1}
15500 ; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
15501 ; AVX512DQ-NEXT: vinserti32x4 $0, %xmm17, %zmm28, %zmm17
15502 ; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
15503 ; AVX512DQ-NEXT: vmovdqa32 %zmm28, %zmm17 {%k1}
15504 ; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
15505 ; AVX512DQ-NEXT: vinserti32x4 $0, %xmm5, %zmm28, %zmm5
15506 ; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
15507 ; AVX512DQ-NEXT: vmovdqa32 %zmm28, %zmm5 {%k1}
15508 ; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
15509 ; AVX512DQ-NEXT: vinserti32x4 $0, %xmm1, %zmm28, %zmm1
15510 ; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
15511 ; AVX512DQ-NEXT: vmovdqa32 %zmm28, %zmm1 {%k1}
15512 ; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
15513 ; AVX512DQ-NEXT: vinserti32x4 $0, %xmm6, %zmm28, %zmm6
15514 ; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
15515 ; AVX512DQ-NEXT: vmovdqa32 %zmm28, %zmm6 {%k1}
15516 ; AVX512DQ-NEXT: vmovdqa64 %zmm2, 192(%rsi)
15517 ; AVX512DQ-NEXT: vmovdqa64 %zmm8, 128(%rsi)
15518 ; AVX512DQ-NEXT: vmovdqa64 %zmm22, 64(%rsi)
15519 ; AVX512DQ-NEXT: vmovdqa64 %zmm13, (%rsi)
15520 ; AVX512DQ-NEXT: vmovdqa64 %zmm3, 192(%rdx)
15521 ; AVX512DQ-NEXT: vmovdqa64 %zmm14, (%rdx)
15522 ; AVX512DQ-NEXT: vmovdqa64 %zmm23, 64(%rdx)
15523 ; AVX512DQ-NEXT: vmovdqa64 %zmm10, 128(%rdx)
15524 ; AVX512DQ-NEXT: vmovdqa64 %zmm4, 192(%rcx)
15525 ; AVX512DQ-NEXT: vmovdqa64 %zmm15, (%rcx)
15526 ; AVX512DQ-NEXT: vmovdqa64 %zmm24, 64(%rcx)
15527 ; AVX512DQ-NEXT: vmovdqa64 %zmm11, 128(%rcx)
15528 ; AVX512DQ-NEXT: vmovdqa64 %zmm7, 192(%r8)
15529 ; AVX512DQ-NEXT: vmovdqa64 %zmm16, (%r8)
15530 ; AVX512DQ-NEXT: vmovdqa64 %zmm29, 64(%r8)
15531 ; AVX512DQ-NEXT: vmovdqa64 %zmm12, 128(%r8)
15532 ; AVX512DQ-NEXT: vmovdqa64 %zmm18, 192(%r9)
15533 ; AVX512DQ-NEXT: vmovdqa64 %zmm20, (%r9)
15534 ; AVX512DQ-NEXT: vmovdqa64 %zmm9, 64(%r9)
15535 ; AVX512DQ-NEXT: vmovdqa64 %zmm0, 128(%r9)
15536 ; AVX512DQ-NEXT: movq {{[0-9]+}}(%rsp), %rax
15537 ; AVX512DQ-NEXT: vmovdqa64 %zmm19, 192(%rax)
15538 ; AVX512DQ-NEXT: vmovdqa64 %zmm27, (%rax)
15539 ; AVX512DQ-NEXT: vmovdqa64 %zmm26, 64(%rax)
15540 ; AVX512DQ-NEXT: vmovdqa64 %zmm25, 128(%rax)
15541 ; AVX512DQ-NEXT: movq {{[0-9]+}}(%rsp), %rax
15542 ; AVX512DQ-NEXT: vmovdqa64 %zmm6, 128(%rax)
15543 ; AVX512DQ-NEXT: vmovdqa64 %zmm1, 192(%rax)
15544 ; AVX512DQ-NEXT: vmovdqa64 %zmm5, (%rax)
15545 ; AVX512DQ-NEXT: vmovdqa64 %zmm17, 64(%rax)
15546 ; AVX512DQ-NEXT: addq $3400, %rsp # imm = 0xD48
15547 ; AVX512DQ-NEXT: vzeroupper
15548 ; AVX512DQ-NEXT: retq
15550 ; AVX512DQ-FCP-LABEL: load_i32_stride7_vf64:
15551 ; AVX512DQ-FCP: # %bb.0:
15552 ; AVX512DQ-FCP-NEXT: subq $3400, %rsp # imm = 0xD48
15553 ; AVX512DQ-FCP-NEXT: vmovdqa64 1728(%rdi), %zmm2
15554 ; AVX512DQ-FCP-NEXT: vmovdqa64 1664(%rdi), %zmm17
15555 ; AVX512DQ-FCP-NEXT: vmovdqa64 1600(%rdi), %zmm11
15556 ; AVX512DQ-FCP-NEXT: vmovdqa64 1280(%rdi), %zmm7
15557 ; AVX512DQ-FCP-NEXT: vmovdqa64 1216(%rdi), %zmm5
15558 ; AVX512DQ-FCP-NEXT: vmovdqa64 1152(%rdi), %zmm12
15559 ; AVX512DQ-FCP-NEXT: vmovdqa64 832(%rdi), %zmm6
15560 ; AVX512DQ-FCP-NEXT: vmovdqa64 768(%rdi), %zmm8
15561 ; AVX512DQ-FCP-NEXT: vmovdqa64 704(%rdi), %zmm13
15562 ; AVX512DQ-FCP-NEXT: vmovdqa64 384(%rdi), %zmm20
15563 ; AVX512DQ-FCP-NEXT: vmovdqa64 320(%rdi), %zmm4
15564 ; AVX512DQ-FCP-NEXT: vmovdqa64 256(%rdi), %zmm14
15565 ; AVX512DQ-FCP-NEXT: vbroadcasti32x4 {{.*#+}} zmm1 = [20,27,6,13,20,27,6,13,20,27,6,13,20,27,6,13]
15566 ; AVX512DQ-FCP-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
15567 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm14, %zmm3
15568 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm4, %zmm1, %zmm3
15569 ; AVX512DQ-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm0 = [0,0,10,11,12,13,18,25,0,0,10,11,12,13,18,25]
15570 ; AVX512DQ-FCP-NEXT: # zmm0 = mem[0,1,2,3,0,1,2,3]
15571 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm20, %zmm0, %zmm3
15572 ; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15573 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm13, %zmm3
15574 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm8, %zmm1, %zmm3
15575 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm6, %zmm0, %zmm3
15576 ; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15577 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm12, %zmm3
15578 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm5, %zmm1, %zmm3
15579 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm7, %zmm0, %zmm3
15580 ; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15581 ; AVX512DQ-FCP-NEXT: vpermi2d %zmm17, %zmm11, %zmm1
15582 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm2, %zmm0, %zmm1
15583 ; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15584 ; AVX512DQ-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm1 = [0,0,7,14,21,28,0,0,0,0,7,14,21,28,0,0]
15585 ; AVX512DQ-FCP-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3]
15586 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm12, %zmm3
15587 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm5, %zmm1, %zmm3
15588 ; AVX512DQ-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm0 = [0,9,10,11,12,13,19,26,0,9,10,11,12,13,19,26]
15589 ; AVX512DQ-FCP-NEXT: # zmm0 = mem[0,1,2,3,0,1,2,3]
15590 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm7, %zmm0, %zmm3
15591 ; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15592 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm13, %zmm3
15593 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm8, %zmm1, %zmm3
15594 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm6, %zmm0, %zmm3
15595 ; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15596 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm14, %zmm3
15597 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm4, %zmm1, %zmm3
15598 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm20, %zmm0, %zmm3
15599 ; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15600 ; AVX512DQ-FCP-NEXT: vpermi2d %zmm17, %zmm11, %zmm1
15601 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm2, %zmm0, %zmm1
15602 ; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15603 ; AVX512DQ-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm1 = [0,1,8,15,22,29,0,0,0,1,8,15,22,29,0,0]
15604 ; AVX512DQ-FCP-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3]
15605 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm12, %zmm3
15606 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm5, %zmm1, %zmm3
15607 ; AVX512DQ-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm0 = [0,9,10,11,12,13,20,27,0,9,10,11,12,13,20,27]
15608 ; AVX512DQ-FCP-NEXT: # zmm0 = mem[0,1,2,3,0,1,2,3]
15609 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm7, %zmm0, %zmm3
15610 ; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15611 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm13, %zmm3
15612 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm8, %zmm1, %zmm3
15613 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm6, %zmm0, %zmm3
15614 ; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15615 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm14, %zmm3
15616 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm4, %zmm1, %zmm3
15617 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm20, %zmm0, %zmm3
15618 ; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15619 ; AVX512DQ-FCP-NEXT: vpermi2d %zmm17, %zmm11, %zmm1
15620 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm2, %zmm0, %zmm1
15621 ; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15622 ; AVX512DQ-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm1 = [0,18,25,0,7,14,0,0,0,18,25,0,7,14,0,0]
15623 ; AVX512DQ-FCP-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3]
15624 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm5, %zmm3
15625 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm12, %zmm1, %zmm3
15626 ; AVX512DQ-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm0 = [0,9,10,11,12,13,21,28,0,9,10,11,12,13,21,28]
15627 ; AVX512DQ-FCP-NEXT: # zmm0 = mem[0,1,2,3,0,1,2,3]
15628 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm7, %zmm0, %zmm3
15629 ; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15630 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm8, %zmm3
15631 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm13, %zmm1, %zmm3
15632 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm6, %zmm0, %zmm3
15633 ; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15634 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm4, %zmm3
15635 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm14, %zmm1, %zmm3
15636 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm20, %zmm0, %zmm3
15637 ; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15638 ; AVX512DQ-FCP-NEXT: vpermi2d %zmm11, %zmm17, %zmm1
15639 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm2, %zmm0, %zmm1
15640 ; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15641 ; AVX512DQ-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm1 = [0,19,26,1,8,15,0,0,0,19,26,1,8,15,0,0]
15642 ; AVX512DQ-FCP-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3]
15643 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm5, %zmm3
15644 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm12, %zmm1, %zmm3
15645 ; AVX512DQ-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm0 = [0,9,10,11,12,13,22,29,0,9,10,11,12,13,22,29]
15646 ; AVX512DQ-FCP-NEXT: # zmm0 = mem[0,1,2,3,0,1,2,3]
15647 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm7, %zmm0, %zmm3
15648 ; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15649 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm8, %zmm3
15650 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm13, %zmm1, %zmm3
15651 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm6, %zmm0, %zmm3
15652 ; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15653 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm4, %zmm3
15654 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm14, %zmm1, %zmm3
15655 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm20, %zmm0, %zmm3
15656 ; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15657 ; AVX512DQ-FCP-NEXT: vpermi2d %zmm11, %zmm17, %zmm1
15658 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm2, %zmm0, %zmm1
15659 ; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15660 ; AVX512DQ-FCP-NEXT: vmovdqa64 1024(%rdi), %zmm3
15661 ; AVX512DQ-FCP-NEXT: vmovdqa64 1088(%rdi), %zmm15
15662 ; AVX512DQ-FCP-NEXT: vbroadcasti32x4 {{.*#+}} zmm30 = [25,4,11,18,25,4,11,18,25,4,11,18,25,4,11,18]
15663 ; AVX512DQ-FCP-NEXT: # zmm30 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
15664 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm3, %zmm0
15665 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm15, %zmm30, %zmm0
15666 ; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15667 ; AVX512DQ-FCP-NEXT: vmovdqa64 576(%rdi), %zmm9
15668 ; AVX512DQ-FCP-NEXT: vmovdqa64 640(%rdi), %zmm16
15669 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm9, %zmm0
15670 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm16, %zmm30, %zmm0
15671 ; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15672 ; AVX512DQ-FCP-NEXT: vmovdqa64 128(%rdi), %zmm0
15673 ; AVX512DQ-FCP-NEXT: vmovdqa64 192(%rdi), %zmm18
15674 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm0, %zmm1
15675 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm18, %zmm30, %zmm1
15676 ; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15677 ; AVX512DQ-FCP-NEXT: vmovdqa64 1472(%rdi), %zmm1
15678 ; AVX512DQ-FCP-NEXT: vmovdqa64 1536(%rdi), %zmm19
15679 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm1, %zmm10
15680 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm19, %zmm30, %zmm10
15681 ; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15682 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm12, %zmm21
15683 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm5, %zmm30, %zmm21
15684 ; AVX512DQ-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm10 = [0,9,10,11,12,16,23,30,0,9,10,11,12,16,23,30]
15685 ; AVX512DQ-FCP-NEXT: # zmm10 = mem[0,1,2,3,0,1,2,3]
15686 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm7, %zmm10, %zmm21
15687 ; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15688 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm13, %zmm21
15689 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm8, %zmm30, %zmm21
15690 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm6, %zmm10, %zmm21
15691 ; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15692 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm14, %zmm21
15693 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm4, %zmm30, %zmm21
15694 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm20, %zmm10, %zmm21
15695 ; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15696 ; AVX512DQ-FCP-NEXT: vpermi2d %zmm17, %zmm11, %zmm30
15697 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm2, %zmm10, %zmm30
15698 ; AVX512DQ-FCP-NEXT: vbroadcasti32x4 {{.*#+}} zmm10 = [26,5,12,19,26,5,12,19,26,5,12,19,26,5,12,19]
15699 ; AVX512DQ-FCP-NEXT: # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
15700 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm8, %zmm10, %zmm13
15701 ; AVX512DQ-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm8 = [0,9,10,11,12,17,24,31,0,9,10,11,12,17,24,31]
15702 ; AVX512DQ-FCP-NEXT: # zmm8 = mem[0,1,2,3,0,1,2,3]
15703 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm6, %zmm8, %zmm13
15704 ; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15705 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm4, %zmm10, %zmm14
15706 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm20, %zmm8, %zmm14
15707 ; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15708 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm17, %zmm10, %zmm11
15709 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm2, %zmm8, %zmm11
15710 ; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15711 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm5, %zmm10, %zmm12
15712 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm7, %zmm8, %zmm12
15713 ; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15714 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm3, %zmm2
15715 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm15, %zmm10, %zmm2
15716 ; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15717 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm9, %zmm2
15718 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm16, %zmm10, %zmm2
15719 ; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15720 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm0, %zmm2
15721 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm18, %zmm10, %zmm2
15722 ; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15723 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm1, %zmm2
15724 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm19, %zmm10, %zmm2
15725 ; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15726 ; AVX512DQ-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm25 = [8,15,0,0,0,19,26,1,8,15,0,0,0,19,26,1]
15727 ; AVX512DQ-FCP-NEXT: # zmm25 = mem[0,1,2,3,0,1,2,3]
15728 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm16, %zmm2
15729 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm9, %zmm25, %zmm2
15730 ; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15731 ; AVX512DQ-FCP-NEXT: vbroadcasti32x4 {{.*#+}} zmm27 = [27,6,13,20,27,6,13,20,27,6,13,20,27,6,13,20]
15732 ; AVX512DQ-FCP-NEXT: # zmm27 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
15733 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm9, %zmm2
15734 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm16, %zmm27, %zmm2
15735 ; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15736 ; AVX512DQ-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm28 = [28,0,0,0,0,7,14,21,28,0,0,0,0,7,14,21]
15737 ; AVX512DQ-FCP-NEXT: # zmm28 = mem[0,1,2,3,0,1,2,3]
15738 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm9, %zmm2
15739 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm16, %zmm28, %zmm2
15740 ; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15741 ; AVX512DQ-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm31 = [29,0,0,0,1,8,15,22,29,0,0,0,1,8,15,22]
15742 ; AVX512DQ-FCP-NEXT: # zmm31 = mem[0,1,2,3,0,1,2,3]
15743 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm9, %zmm2
15744 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm16, %zmm31, %zmm2
15745 ; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15746 ; AVX512DQ-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm2 = [14,0,0,0,18,25,0,7,14,0,0,0,18,25,0,7]
15747 ; AVX512DQ-FCP-NEXT: # zmm2 = mem[0,1,2,3,0,1,2,3]
15748 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm9, %zmm2, %zmm16
15749 ; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15750 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm18, %zmm4
15751 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm0, %zmm25, %zmm4
15752 ; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15753 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm0, %zmm4
15754 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm18, %zmm27, %zmm4
15755 ; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15756 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm0, %zmm4
15757 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm18, %zmm28, %zmm4
15758 ; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15759 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm0, %zmm4
15760 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm18, %zmm31, %zmm4
15761 ; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15762 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm0, %zmm2, %zmm18
15763 ; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm18, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15764 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm15, %zmm0
15765 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm3, %zmm25, %zmm0
15766 ; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm0, (%rsp) # 64-byte Spill
15767 ; AVX512DQ-FCP-NEXT: vpermi2d %zmm1, %zmm19, %zmm25
15768 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm3, %zmm0
15769 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm15, %zmm27, %zmm0
15770 ; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15771 ; AVX512DQ-FCP-NEXT: vpermi2d %zmm19, %zmm1, %zmm27
15772 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm3, %zmm0
15773 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm15, %zmm28, %zmm0
15774 ; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15775 ; AVX512DQ-FCP-NEXT: vpermi2d %zmm19, %zmm1, %zmm28
15776 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm3, %zmm0
15777 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm15, %zmm31, %zmm0
15778 ; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15779 ; AVX512DQ-FCP-NEXT: vpermi2d %zmm19, %zmm1, %zmm31
15780 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm1, %zmm2, %zmm19
15781 ; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15782 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm3, %zmm2, %zmm15
15783 ; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15784 ; AVX512DQ-FCP-NEXT: vmovdqa64 512(%rdi), %zmm0
15785 ; AVX512DQ-FCP-NEXT: vmovdqa64 448(%rdi), %zmm17
15786 ; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm2 = [0,7,14,21,28,0,0,0]
15787 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm17, %zmm22
15788 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm0, %zmm2, %zmm22
15789 ; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm3 = [1,8,15,22,29,0,0,0]
15790 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm17, %zmm23
15791 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm0, %zmm3, %zmm23
15792 ; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm4 = [18,25,0,7,14,0,0,0]
15793 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm0, %zmm24
15794 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm17, %zmm4, %zmm24
15795 ; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm7 = [19,26,1,8,15,0,0,0]
15796 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm0, %zmm29
15797 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm17, %zmm7, %zmm29
15798 ; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} xmm18 = [4,11,18,25]
15799 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm17, %zmm1
15800 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm0, %zmm18, %zmm1
15801 ; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15802 ; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} xmm19 = [5,12,19,26]
15803 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm17, %zmm1
15804 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm0, %zmm19, %zmm1
15805 ; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15806 ; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} xmm20 = [6,13,20,27]
15807 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm0, %zmm20, %zmm17
15808 ; AVX512DQ-FCP-NEXT: vmovdqa64 (%rdi), %zmm5
15809 ; AVX512DQ-FCP-NEXT: vmovdqa64 64(%rdi), %zmm0
15810 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm5, %zmm13
15811 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm0, %zmm2, %zmm13
15812 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm5, %zmm14
15813 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm0, %zmm3, %zmm14
15814 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm0, %zmm15
15815 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm5, %zmm4, %zmm15
15816 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm0, %zmm16
15817 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm5, %zmm7, %zmm16
15818 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm5, %zmm1
15819 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm0, %zmm18, %zmm1
15820 ; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15821 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm5, %zmm1
15822 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm0, %zmm19, %zmm1
15823 ; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
15824 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm0, %zmm20, %zmm5
15825 ; AVX512DQ-FCP-NEXT: vmovdqa64 960(%rdi), %zmm9
15826 ; AVX512DQ-FCP-NEXT: vmovdqa64 896(%rdi), %zmm6
15827 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm6, %zmm8
15828 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm9, %zmm2, %zmm8
15829 ; AVX512DQ-FCP-NEXT: vmovdqa64 1408(%rdi), %zmm0
15830 ; AVX512DQ-FCP-NEXT: vmovdqa64 1344(%rdi), %zmm1
15831 ; AVX512DQ-FCP-NEXT: vpermi2d %zmm0, %zmm1, %zmm2
15832 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm6, %zmm10
15833 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm9, %zmm3, %zmm10
15834 ; AVX512DQ-FCP-NEXT: vpermi2d %zmm0, %zmm1, %zmm3
15835 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm9, %zmm11
15836 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm6, %zmm4, %zmm11
15837 ; AVX512DQ-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm4
15838 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm9, %zmm12
15839 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm6, %zmm7, %zmm12
15840 ; AVX512DQ-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm7
15841 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm6, %zmm21
15842 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm9, %zmm18, %zmm21
15843 ; AVX512DQ-FCP-NEXT: vpermi2d %zmm0, %zmm1, %zmm18
15844 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm6, %zmm26
15845 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm9, %zmm19, %zmm26
15846 ; AVX512DQ-FCP-NEXT: vpermi2d %zmm0, %zmm1, %zmm19
15847 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm0, %zmm20, %zmm1
15848 ; AVX512DQ-FCP-NEXT: vpermt2d %zmm9, %zmm20, %zmm6
15849 ; AVX512DQ-FCP-NEXT: movw $992, %ax # imm = 0x3E0
15850 ; AVX512DQ-FCP-NEXT: kmovw %eax, %k1
15851 ; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
15852 ; AVX512DQ-FCP-NEXT: vmovdqa32 %zmm0, %zmm13 {%k1}
15853 ; AVX512DQ-FCP-NEXT: movb $-32, %al
15854 ; AVX512DQ-FCP-NEXT: kmovw %eax, %k2
15855 ; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
15856 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm0, %zmm13 {%k2}
15857 ; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
15858 ; AVX512DQ-FCP-NEXT: vmovdqa32 %zmm0, %zmm22 {%k1}
15859 ; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
15860 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm0, %zmm22 {%k2}
15861 ; AVX512DQ-FCP-NEXT: vmovdqu64 (%rsp), %zmm0 # 64-byte Reload
15862 ; AVX512DQ-FCP-NEXT: vmovdqa32 %zmm0, %zmm8 {%k1}
15863 ; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
15864 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm0, %zmm8 {%k2}
15865 ; AVX512DQ-FCP-NEXT: vmovdqa32 %zmm25, %zmm2 {%k1}
15866 ; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
15867 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm0, %zmm2 {%k2}
15868 ; AVX512DQ-FCP-NEXT: movw $480, %ax # imm = 0x1E0
15869 ; AVX512DQ-FCP-NEXT: kmovw %eax, %k2
15870 ; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
15871 ; AVX512DQ-FCP-NEXT: vmovdqa32 %zmm0, %zmm10 {%k2}
15872 ; AVX512DQ-FCP-NEXT: movw $-512, %ax # imm = 0xFE00
15873 ; AVX512DQ-FCP-NEXT: kmovw %eax, %k1
15874 ; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
15875 ; AVX512DQ-FCP-NEXT: vmovdqa32 %zmm0, %zmm10 {%k1}
15876 ; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
15877 ; AVX512DQ-FCP-NEXT: vmovdqa32 %zmm0, %zmm23 {%k2}
15878 ; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
15879 ; AVX512DQ-FCP-NEXT: vmovdqa32 %zmm0, %zmm23 {%k1}
15880 ; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
15881 ; AVX512DQ-FCP-NEXT: vmovdqa32 %zmm0, %zmm14 {%k2}
15882 ; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
15883 ; AVX512DQ-FCP-NEXT: vmovdqa32 %zmm0, %zmm14 {%k1}
15884 ; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
15885 ; AVX512DQ-FCP-NEXT: vmovdqa32 %zmm0, %zmm3 {%k2}
15886 ; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
15887 ; AVX512DQ-FCP-NEXT: vmovdqa32 %zmm0, %zmm3 {%k1}
15888 ; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
15889 ; AVX512DQ-FCP-NEXT: vmovdqa32 %zmm0, %zmm11 {%k2}
15890 ; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
15891 ; AVX512DQ-FCP-NEXT: vmovdqa32 %zmm0, %zmm11 {%k1}
15892 ; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
15893 ; AVX512DQ-FCP-NEXT: vmovdqa32 %zmm0, %zmm24 {%k2}
15894 ; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
15895 ; AVX512DQ-FCP-NEXT: vmovdqa32 %zmm0, %zmm24 {%k1}
15896 ; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
15897 ; AVX512DQ-FCP-NEXT: vmovdqa32 %zmm0, %zmm15 {%k2}
15898 ; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
15899 ; AVX512DQ-FCP-NEXT: vmovdqa32 %zmm0, %zmm15 {%k1}
15900 ; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
15901 ; AVX512DQ-FCP-NEXT: vmovdqa32 %zmm0, %zmm4 {%k2}
15902 ; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
15903 ; AVX512DQ-FCP-NEXT: vmovdqa32 %zmm0, %zmm4 {%k1}
15904 ; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
15905 ; AVX512DQ-FCP-NEXT: vmovdqa32 %zmm0, %zmm12 {%k2}
15906 ; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
15907 ; AVX512DQ-FCP-NEXT: vmovdqa32 %zmm0, %zmm12 {%k1}
15908 ; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
15909 ; AVX512DQ-FCP-NEXT: vmovdqa32 %zmm0, %zmm29 {%k2}
15910 ; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
15911 ; AVX512DQ-FCP-NEXT: vmovdqa32 %zmm0, %zmm29 {%k1}
15912 ; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
15913 ; AVX512DQ-FCP-NEXT: vmovdqa32 %zmm0, %zmm16 {%k2}
15914 ; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
15915 ; AVX512DQ-FCP-NEXT: vmovdqa32 %zmm0, %zmm16 {%k1}
15916 ; AVX512DQ-FCP-NEXT: vmovdqa32 %zmm27, %zmm7 {%k2}
15917 ; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
15918 ; AVX512DQ-FCP-NEXT: vmovdqa32 %zmm0, %zmm7 {%k1}
15919 ; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
15920 ; AVX512DQ-FCP-NEXT: vinserti32x4 $0, %xmm21, %zmm0, %zmm0
15921 ; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
15922 ; AVX512DQ-FCP-NEXT: vmovdqa32 %zmm9, %zmm0 {%k1}
15923 ; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
15924 ; AVX512DQ-FCP-NEXT: vinserti32x4 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm9, %zmm9 # 16-byte Folded Reload
15925 ; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
15926 ; AVX512DQ-FCP-NEXT: vmovdqa32 %zmm20, %zmm9 {%k1}
15927 ; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
15928 ; AVX512DQ-FCP-NEXT: vinserti32x4 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm20, %zmm20 # 16-byte Folded Reload
15929 ; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload
15930 ; AVX512DQ-FCP-NEXT: vmovdqa32 %zmm25, %zmm20 {%k1}
15931 ; AVX512DQ-FCP-NEXT: vinserti32x4 $0, %xmm18, %zmm28, %zmm18
15932 ; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload
15933 ; AVX512DQ-FCP-NEXT: vmovdqa32 %zmm25, %zmm18 {%k1}
15934 ; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload
15935 ; AVX512DQ-FCP-NEXT: vinserti32x4 $0, %xmm26, %zmm25, %zmm25
15936 ; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload
15937 ; AVX512DQ-FCP-NEXT: vmovdqa32 %zmm26, %zmm25 {%k1}
15938 ; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload
15939 ; AVX512DQ-FCP-NEXT: vinserti32x4 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm26, %zmm26 # 16-byte Folded Reload
15940 ; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload
15941 ; AVX512DQ-FCP-NEXT: vmovdqa32 %zmm27, %zmm26 {%k1}
15942 ; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload
15943 ; AVX512DQ-FCP-NEXT: vinserti32x4 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm27, %zmm27 # 16-byte Folded Reload
15944 ; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
15945 ; AVX512DQ-FCP-NEXT: vmovdqa32 %zmm28, %zmm27 {%k1}
15946 ; AVX512DQ-FCP-NEXT: vinserti32x4 $0, %xmm19, %zmm31, %zmm19
15947 ; AVX512DQ-FCP-NEXT: vmovdqa32 %zmm30, %zmm19 {%k1}
15948 ; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
15949 ; AVX512DQ-FCP-NEXT: vinserti32x4 $0, %xmm17, %zmm28, %zmm17
15950 ; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
15951 ; AVX512DQ-FCP-NEXT: vmovdqa32 %zmm28, %zmm17 {%k1}
15952 ; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
15953 ; AVX512DQ-FCP-NEXT: vinserti32x4 $0, %xmm5, %zmm28, %zmm5
15954 ; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
15955 ; AVX512DQ-FCP-NEXT: vmovdqa32 %zmm28, %zmm5 {%k1}
15956 ; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
15957 ; AVX512DQ-FCP-NEXT: vinserti32x4 $0, %xmm1, %zmm28, %zmm1
15958 ; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
15959 ; AVX512DQ-FCP-NEXT: vmovdqa32 %zmm28, %zmm1 {%k1}
15960 ; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
15961 ; AVX512DQ-FCP-NEXT: vinserti32x4 $0, %xmm6, %zmm28, %zmm6
15962 ; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
15963 ; AVX512DQ-FCP-NEXT: vmovdqa32 %zmm28, %zmm6 {%k1}
15964 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm2, 192(%rsi)
15965 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm8, 128(%rsi)
15966 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm22, 64(%rsi)
15967 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm13, (%rsi)
15968 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm3, 192(%rdx)
15969 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm14, (%rdx)
15970 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm23, 64(%rdx)
15971 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm10, 128(%rdx)
15972 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm4, 192(%rcx)
15973 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm15, (%rcx)
15974 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm24, 64(%rcx)
15975 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm11, 128(%rcx)
15976 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm7, 192(%r8)
15977 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm16, (%r8)
15978 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm29, 64(%r8)
15979 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm12, 128(%r8)
15980 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm18, 192(%r9)
15981 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm20, (%r9)
15982 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm9, 64(%r9)
15983 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm0, 128(%r9)
15984 ; AVX512DQ-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
15985 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm19, 192(%rax)
15986 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm27, (%rax)
15987 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm26, 64(%rax)
15988 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm25, 128(%rax)
15989 ; AVX512DQ-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
15990 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm6, 128(%rax)
15991 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm1, 192(%rax)
15992 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm5, (%rax)
15993 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm17, 64(%rax)
15994 ; AVX512DQ-FCP-NEXT: addq $3400, %rsp # imm = 0xD48
15995 ; AVX512DQ-FCP-NEXT: vzeroupper
15996 ; AVX512DQ-FCP-NEXT: retq
15998 ; AVX512BW-LABEL: load_i32_stride7_vf64:
15999 ; AVX512BW: # %bb.0:
16000 ; AVX512BW-NEXT: subq $3400, %rsp # imm = 0xD48
16001 ; AVX512BW-NEXT: vmovdqa64 1728(%rdi), %zmm2
16002 ; AVX512BW-NEXT: vmovdqa64 1664(%rdi), %zmm17
16003 ; AVX512BW-NEXT: vmovdqa64 1600(%rdi), %zmm11
16004 ; AVX512BW-NEXT: vmovdqa64 1280(%rdi), %zmm7
16005 ; AVX512BW-NEXT: vmovdqa64 1216(%rdi), %zmm5
16006 ; AVX512BW-NEXT: vmovdqa64 1152(%rdi), %zmm12
16007 ; AVX512BW-NEXT: vmovdqa64 832(%rdi), %zmm6
16008 ; AVX512BW-NEXT: vmovdqa64 768(%rdi), %zmm8
16009 ; AVX512BW-NEXT: vmovdqa64 704(%rdi), %zmm13
16010 ; AVX512BW-NEXT: vmovdqa64 384(%rdi), %zmm20
16011 ; AVX512BW-NEXT: vmovdqa64 320(%rdi), %zmm4
16012 ; AVX512BW-NEXT: vmovdqa64 256(%rdi), %zmm14
16013 ; AVX512BW-NEXT: vbroadcasti32x4 {{.*#+}} zmm1 = [20,27,6,13,20,27,6,13,20,27,6,13,20,27,6,13]
16014 ; AVX512BW-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
16015 ; AVX512BW-NEXT: vmovdqa64 %zmm14, %zmm3
16016 ; AVX512BW-NEXT: vpermt2d %zmm4, %zmm1, %zmm3
16017 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm0 = [0,0,10,11,12,13,18,25,0,0,10,11,12,13,18,25]
16018 ; AVX512BW-NEXT: # zmm0 = mem[0,1,2,3,0,1,2,3]
16019 ; AVX512BW-NEXT: vpermt2d %zmm20, %zmm0, %zmm3
16020 ; AVX512BW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16021 ; AVX512BW-NEXT: vmovdqa64 %zmm13, %zmm3
16022 ; AVX512BW-NEXT: vpermt2d %zmm8, %zmm1, %zmm3
16023 ; AVX512BW-NEXT: vpermt2d %zmm6, %zmm0, %zmm3
16024 ; AVX512BW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16025 ; AVX512BW-NEXT: vmovdqa64 %zmm12, %zmm3
16026 ; AVX512BW-NEXT: vpermt2d %zmm5, %zmm1, %zmm3
16027 ; AVX512BW-NEXT: vpermt2d %zmm7, %zmm0, %zmm3
16028 ; AVX512BW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16029 ; AVX512BW-NEXT: vpermi2d %zmm17, %zmm11, %zmm1
16030 ; AVX512BW-NEXT: vpermt2d %zmm2, %zmm0, %zmm1
16031 ; AVX512BW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16032 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm1 = [0,0,7,14,21,28,0,0,0,0,7,14,21,28,0,0]
16033 ; AVX512BW-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3]
16034 ; AVX512BW-NEXT: vmovdqa64 %zmm12, %zmm3
16035 ; AVX512BW-NEXT: vpermt2d %zmm5, %zmm1, %zmm3
16036 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm0 = [0,9,10,11,12,13,19,26,0,9,10,11,12,13,19,26]
16037 ; AVX512BW-NEXT: # zmm0 = mem[0,1,2,3,0,1,2,3]
16038 ; AVX512BW-NEXT: vpermt2d %zmm7, %zmm0, %zmm3
16039 ; AVX512BW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16040 ; AVX512BW-NEXT: vmovdqa64 %zmm13, %zmm3
16041 ; AVX512BW-NEXT: vpermt2d %zmm8, %zmm1, %zmm3
16042 ; AVX512BW-NEXT: vpermt2d %zmm6, %zmm0, %zmm3
16043 ; AVX512BW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16044 ; AVX512BW-NEXT: vmovdqa64 %zmm14, %zmm3
16045 ; AVX512BW-NEXT: vpermt2d %zmm4, %zmm1, %zmm3
16046 ; AVX512BW-NEXT: vpermt2d %zmm20, %zmm0, %zmm3
16047 ; AVX512BW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16048 ; AVX512BW-NEXT: vpermi2d %zmm17, %zmm11, %zmm1
16049 ; AVX512BW-NEXT: vpermt2d %zmm2, %zmm0, %zmm1
16050 ; AVX512BW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16051 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm1 = [0,1,8,15,22,29,0,0,0,1,8,15,22,29,0,0]
16052 ; AVX512BW-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3]
16053 ; AVX512BW-NEXT: vmovdqa64 %zmm12, %zmm3
16054 ; AVX512BW-NEXT: vpermt2d %zmm5, %zmm1, %zmm3
16055 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm0 = [0,9,10,11,12,13,20,27,0,9,10,11,12,13,20,27]
16056 ; AVX512BW-NEXT: # zmm0 = mem[0,1,2,3,0,1,2,3]
16057 ; AVX512BW-NEXT: vpermt2d %zmm7, %zmm0, %zmm3
16058 ; AVX512BW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16059 ; AVX512BW-NEXT: vmovdqa64 %zmm13, %zmm3
16060 ; AVX512BW-NEXT: vpermt2d %zmm8, %zmm1, %zmm3
16061 ; AVX512BW-NEXT: vpermt2d %zmm6, %zmm0, %zmm3
16062 ; AVX512BW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16063 ; AVX512BW-NEXT: vmovdqa64 %zmm14, %zmm3
16064 ; AVX512BW-NEXT: vpermt2d %zmm4, %zmm1, %zmm3
16065 ; AVX512BW-NEXT: vpermt2d %zmm20, %zmm0, %zmm3
16066 ; AVX512BW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16067 ; AVX512BW-NEXT: vpermi2d %zmm17, %zmm11, %zmm1
16068 ; AVX512BW-NEXT: vpermt2d %zmm2, %zmm0, %zmm1
16069 ; AVX512BW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16070 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm1 = [0,18,25,0,7,14,0,0,0,18,25,0,7,14,0,0]
16071 ; AVX512BW-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3]
16072 ; AVX512BW-NEXT: vmovdqa64 %zmm5, %zmm3
16073 ; AVX512BW-NEXT: vpermt2d %zmm12, %zmm1, %zmm3
16074 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm0 = [0,9,10,11,12,13,21,28,0,9,10,11,12,13,21,28]
16075 ; AVX512BW-NEXT: # zmm0 = mem[0,1,2,3,0,1,2,3]
16076 ; AVX512BW-NEXT: vpermt2d %zmm7, %zmm0, %zmm3
16077 ; AVX512BW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16078 ; AVX512BW-NEXT: vmovdqa64 %zmm8, %zmm3
16079 ; AVX512BW-NEXT: vpermt2d %zmm13, %zmm1, %zmm3
16080 ; AVX512BW-NEXT: vpermt2d %zmm6, %zmm0, %zmm3
16081 ; AVX512BW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16082 ; AVX512BW-NEXT: vmovdqa64 %zmm4, %zmm3
16083 ; AVX512BW-NEXT: vpermt2d %zmm14, %zmm1, %zmm3
16084 ; AVX512BW-NEXT: vpermt2d %zmm20, %zmm0, %zmm3
16085 ; AVX512BW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16086 ; AVX512BW-NEXT: vpermi2d %zmm11, %zmm17, %zmm1
16087 ; AVX512BW-NEXT: vpermt2d %zmm2, %zmm0, %zmm1
16088 ; AVX512BW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16089 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm1 = [0,19,26,1,8,15,0,0,0,19,26,1,8,15,0,0]
16090 ; AVX512BW-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3]
16091 ; AVX512BW-NEXT: vmovdqa64 %zmm5, %zmm3
16092 ; AVX512BW-NEXT: vpermt2d %zmm12, %zmm1, %zmm3
16093 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm0 = [0,9,10,11,12,13,22,29,0,9,10,11,12,13,22,29]
16094 ; AVX512BW-NEXT: # zmm0 = mem[0,1,2,3,0,1,2,3]
16095 ; AVX512BW-NEXT: vpermt2d %zmm7, %zmm0, %zmm3
16096 ; AVX512BW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16097 ; AVX512BW-NEXT: vmovdqa64 %zmm8, %zmm3
16098 ; AVX512BW-NEXT: vpermt2d %zmm13, %zmm1, %zmm3
16099 ; AVX512BW-NEXT: vpermt2d %zmm6, %zmm0, %zmm3
16100 ; AVX512BW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16101 ; AVX512BW-NEXT: vmovdqa64 %zmm4, %zmm3
16102 ; AVX512BW-NEXT: vpermt2d %zmm14, %zmm1, %zmm3
16103 ; AVX512BW-NEXT: vpermt2d %zmm20, %zmm0, %zmm3
16104 ; AVX512BW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16105 ; AVX512BW-NEXT: vpermi2d %zmm11, %zmm17, %zmm1
16106 ; AVX512BW-NEXT: vpermt2d %zmm2, %zmm0, %zmm1
16107 ; AVX512BW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16108 ; AVX512BW-NEXT: vmovdqa64 1024(%rdi), %zmm3
16109 ; AVX512BW-NEXT: vmovdqa64 1088(%rdi), %zmm15
16110 ; AVX512BW-NEXT: vbroadcasti32x4 {{.*#+}} zmm30 = [25,4,11,18,25,4,11,18,25,4,11,18,25,4,11,18]
16111 ; AVX512BW-NEXT: # zmm30 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
16112 ; AVX512BW-NEXT: vmovdqa64 %zmm3, %zmm0
16113 ; AVX512BW-NEXT: vpermt2d %zmm15, %zmm30, %zmm0
16114 ; AVX512BW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16115 ; AVX512BW-NEXT: vmovdqa64 576(%rdi), %zmm9
16116 ; AVX512BW-NEXT: vmovdqa64 640(%rdi), %zmm16
16117 ; AVX512BW-NEXT: vmovdqa64 %zmm9, %zmm0
16118 ; AVX512BW-NEXT: vpermt2d %zmm16, %zmm30, %zmm0
16119 ; AVX512BW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16120 ; AVX512BW-NEXT: vmovdqa64 128(%rdi), %zmm0
16121 ; AVX512BW-NEXT: vmovdqa64 192(%rdi), %zmm18
16122 ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm1
16123 ; AVX512BW-NEXT: vpermt2d %zmm18, %zmm30, %zmm1
16124 ; AVX512BW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16125 ; AVX512BW-NEXT: vmovdqa64 1472(%rdi), %zmm1
16126 ; AVX512BW-NEXT: vmovdqa64 1536(%rdi), %zmm19
16127 ; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm10
16128 ; AVX512BW-NEXT: vpermt2d %zmm19, %zmm30, %zmm10
16129 ; AVX512BW-NEXT: vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16130 ; AVX512BW-NEXT: vmovdqa64 %zmm12, %zmm21
16131 ; AVX512BW-NEXT: vpermt2d %zmm5, %zmm30, %zmm21
16132 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm10 = [0,9,10,11,12,16,23,30,0,9,10,11,12,16,23,30]
16133 ; AVX512BW-NEXT: # zmm10 = mem[0,1,2,3,0,1,2,3]
16134 ; AVX512BW-NEXT: vpermt2d %zmm7, %zmm10, %zmm21
16135 ; AVX512BW-NEXT: vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16136 ; AVX512BW-NEXT: vmovdqa64 %zmm13, %zmm21
16137 ; AVX512BW-NEXT: vpermt2d %zmm8, %zmm30, %zmm21
16138 ; AVX512BW-NEXT: vpermt2d %zmm6, %zmm10, %zmm21
16139 ; AVX512BW-NEXT: vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16140 ; AVX512BW-NEXT: vmovdqa64 %zmm14, %zmm21
16141 ; AVX512BW-NEXT: vpermt2d %zmm4, %zmm30, %zmm21
16142 ; AVX512BW-NEXT: vpermt2d %zmm20, %zmm10, %zmm21
16143 ; AVX512BW-NEXT: vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16144 ; AVX512BW-NEXT: vpermi2d %zmm17, %zmm11, %zmm30
16145 ; AVX512BW-NEXT: vpermt2d %zmm2, %zmm10, %zmm30
16146 ; AVX512BW-NEXT: vbroadcasti32x4 {{.*#+}} zmm10 = [26,5,12,19,26,5,12,19,26,5,12,19,26,5,12,19]
16147 ; AVX512BW-NEXT: # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
16148 ; AVX512BW-NEXT: vpermt2d %zmm8, %zmm10, %zmm13
16149 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm8 = [0,9,10,11,12,17,24,31,0,9,10,11,12,17,24,31]
16150 ; AVX512BW-NEXT: # zmm8 = mem[0,1,2,3,0,1,2,3]
16151 ; AVX512BW-NEXT: vpermt2d %zmm6, %zmm8, %zmm13
16152 ; AVX512BW-NEXT: vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16153 ; AVX512BW-NEXT: vpermt2d %zmm4, %zmm10, %zmm14
16154 ; AVX512BW-NEXT: vpermt2d %zmm20, %zmm8, %zmm14
16155 ; AVX512BW-NEXT: vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16156 ; AVX512BW-NEXT: vpermt2d %zmm17, %zmm10, %zmm11
16157 ; AVX512BW-NEXT: vpermt2d %zmm2, %zmm8, %zmm11
16158 ; AVX512BW-NEXT: vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16159 ; AVX512BW-NEXT: vpermt2d %zmm5, %zmm10, %zmm12
16160 ; AVX512BW-NEXT: vpermt2d %zmm7, %zmm8, %zmm12
16161 ; AVX512BW-NEXT: vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16162 ; AVX512BW-NEXT: vmovdqa64 %zmm3, %zmm2
16163 ; AVX512BW-NEXT: vpermt2d %zmm15, %zmm10, %zmm2
16164 ; AVX512BW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16165 ; AVX512BW-NEXT: vmovdqa64 %zmm9, %zmm2
16166 ; AVX512BW-NEXT: vpermt2d %zmm16, %zmm10, %zmm2
16167 ; AVX512BW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16168 ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm2
16169 ; AVX512BW-NEXT: vpermt2d %zmm18, %zmm10, %zmm2
16170 ; AVX512BW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16171 ; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm2
16172 ; AVX512BW-NEXT: vpermt2d %zmm19, %zmm10, %zmm2
16173 ; AVX512BW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16174 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm25 = [8,15,0,0,0,19,26,1,8,15,0,0,0,19,26,1]
16175 ; AVX512BW-NEXT: # zmm25 = mem[0,1,2,3,0,1,2,3]
16176 ; AVX512BW-NEXT: vmovdqa64 %zmm16, %zmm2
16177 ; AVX512BW-NEXT: vpermt2d %zmm9, %zmm25, %zmm2
16178 ; AVX512BW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16179 ; AVX512BW-NEXT: vbroadcasti32x4 {{.*#+}} zmm27 = [27,6,13,20,27,6,13,20,27,6,13,20,27,6,13,20]
16180 ; AVX512BW-NEXT: # zmm27 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
16181 ; AVX512BW-NEXT: vmovdqa64 %zmm9, %zmm2
16182 ; AVX512BW-NEXT: vpermt2d %zmm16, %zmm27, %zmm2
16183 ; AVX512BW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16184 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm28 = [28,0,0,0,0,7,14,21,28,0,0,0,0,7,14,21]
16185 ; AVX512BW-NEXT: # zmm28 = mem[0,1,2,3,0,1,2,3]
16186 ; AVX512BW-NEXT: vmovdqa64 %zmm9, %zmm2
16187 ; AVX512BW-NEXT: vpermt2d %zmm16, %zmm28, %zmm2
16188 ; AVX512BW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16189 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm31 = [29,0,0,0,1,8,15,22,29,0,0,0,1,8,15,22]
16190 ; AVX512BW-NEXT: # zmm31 = mem[0,1,2,3,0,1,2,3]
16191 ; AVX512BW-NEXT: vmovdqa64 %zmm9, %zmm2
16192 ; AVX512BW-NEXT: vpermt2d %zmm16, %zmm31, %zmm2
16193 ; AVX512BW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16194 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm2 = [14,0,0,0,18,25,0,7,14,0,0,0,18,25,0,7]
16195 ; AVX512BW-NEXT: # zmm2 = mem[0,1,2,3,0,1,2,3]
16196 ; AVX512BW-NEXT: vpermt2d %zmm9, %zmm2, %zmm16
16197 ; AVX512BW-NEXT: vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16198 ; AVX512BW-NEXT: vmovdqa64 %zmm18, %zmm4
16199 ; AVX512BW-NEXT: vpermt2d %zmm0, %zmm25, %zmm4
16200 ; AVX512BW-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16201 ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm4
16202 ; AVX512BW-NEXT: vpermt2d %zmm18, %zmm27, %zmm4
16203 ; AVX512BW-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16204 ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm4
16205 ; AVX512BW-NEXT: vpermt2d %zmm18, %zmm28, %zmm4
16206 ; AVX512BW-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16207 ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm4
16208 ; AVX512BW-NEXT: vpermt2d %zmm18, %zmm31, %zmm4
16209 ; AVX512BW-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16210 ; AVX512BW-NEXT: vpermt2d %zmm0, %zmm2, %zmm18
16211 ; AVX512BW-NEXT: vmovdqu64 %zmm18, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16212 ; AVX512BW-NEXT: vmovdqa64 %zmm15, %zmm0
16213 ; AVX512BW-NEXT: vpermt2d %zmm3, %zmm25, %zmm0
16214 ; AVX512BW-NEXT: vmovdqu64 %zmm0, (%rsp) # 64-byte Spill
16215 ; AVX512BW-NEXT: vpermi2d %zmm1, %zmm19, %zmm25
16216 ; AVX512BW-NEXT: vmovdqa64 %zmm3, %zmm0
16217 ; AVX512BW-NEXT: vpermt2d %zmm15, %zmm27, %zmm0
16218 ; AVX512BW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16219 ; AVX512BW-NEXT: vpermi2d %zmm19, %zmm1, %zmm27
16220 ; AVX512BW-NEXT: vmovdqa64 %zmm3, %zmm0
16221 ; AVX512BW-NEXT: vpermt2d %zmm15, %zmm28, %zmm0
16222 ; AVX512BW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16223 ; AVX512BW-NEXT: vpermi2d %zmm19, %zmm1, %zmm28
16224 ; AVX512BW-NEXT: vmovdqa64 %zmm3, %zmm0
16225 ; AVX512BW-NEXT: vpermt2d %zmm15, %zmm31, %zmm0
16226 ; AVX512BW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16227 ; AVX512BW-NEXT: vpermi2d %zmm19, %zmm1, %zmm31
16228 ; AVX512BW-NEXT: vpermt2d %zmm1, %zmm2, %zmm19
16229 ; AVX512BW-NEXT: vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16230 ; AVX512BW-NEXT: vpermt2d %zmm3, %zmm2, %zmm15
16231 ; AVX512BW-NEXT: vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16232 ; AVX512BW-NEXT: vmovdqa64 512(%rdi), %zmm0
16233 ; AVX512BW-NEXT: vmovdqa64 448(%rdi), %zmm17
16234 ; AVX512BW-NEXT: vpmovsxbd {{.*#+}} ymm2 = [0,7,14,21,28,0,0,0]
16235 ; AVX512BW-NEXT: vmovdqa64 %zmm17, %zmm22
16236 ; AVX512BW-NEXT: vpermt2d %zmm0, %zmm2, %zmm22
16237 ; AVX512BW-NEXT: vpmovsxbd {{.*#+}} ymm3 = [1,8,15,22,29,0,0,0]
16238 ; AVX512BW-NEXT: vmovdqa64 %zmm17, %zmm23
16239 ; AVX512BW-NEXT: vpermt2d %zmm0, %zmm3, %zmm23
16240 ; AVX512BW-NEXT: vpmovsxbd {{.*#+}} ymm4 = [18,25,0,7,14,0,0,0]
16241 ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm24
16242 ; AVX512BW-NEXT: vpermt2d %zmm17, %zmm4, %zmm24
16243 ; AVX512BW-NEXT: vpmovsxbd {{.*#+}} ymm7 = [19,26,1,8,15,0,0,0]
16244 ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm29
16245 ; AVX512BW-NEXT: vpermt2d %zmm17, %zmm7, %zmm29
16246 ; AVX512BW-NEXT: vpmovsxbd {{.*#+}} xmm18 = [4,11,18,25]
16247 ; AVX512BW-NEXT: vmovdqa64 %zmm17, %zmm1
16248 ; AVX512BW-NEXT: vpermt2d %zmm0, %zmm18, %zmm1
16249 ; AVX512BW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16250 ; AVX512BW-NEXT: vpmovsxbd {{.*#+}} xmm19 = [5,12,19,26]
16251 ; AVX512BW-NEXT: vmovdqa64 %zmm17, %zmm1
16252 ; AVX512BW-NEXT: vpermt2d %zmm0, %zmm19, %zmm1
16253 ; AVX512BW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16254 ; AVX512BW-NEXT: vpmovsxbd {{.*#+}} xmm20 = [6,13,20,27]
16255 ; AVX512BW-NEXT: vpermt2d %zmm0, %zmm20, %zmm17
16256 ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm5
16257 ; AVX512BW-NEXT: vmovdqa64 64(%rdi), %zmm0
16258 ; AVX512BW-NEXT: vmovdqa64 %zmm5, %zmm13
16259 ; AVX512BW-NEXT: vpermt2d %zmm0, %zmm2, %zmm13
16260 ; AVX512BW-NEXT: vmovdqa64 %zmm5, %zmm14
16261 ; AVX512BW-NEXT: vpermt2d %zmm0, %zmm3, %zmm14
16262 ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm15
16263 ; AVX512BW-NEXT: vpermt2d %zmm5, %zmm4, %zmm15
16264 ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm16
16265 ; AVX512BW-NEXT: vpermt2d %zmm5, %zmm7, %zmm16
16266 ; AVX512BW-NEXT: vmovdqa64 %zmm5, %zmm1
16267 ; AVX512BW-NEXT: vpermt2d %zmm0, %zmm18, %zmm1
16268 ; AVX512BW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16269 ; AVX512BW-NEXT: vmovdqa64 %zmm5, %zmm1
16270 ; AVX512BW-NEXT: vpermt2d %zmm0, %zmm19, %zmm1
16271 ; AVX512BW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16272 ; AVX512BW-NEXT: vpermt2d %zmm0, %zmm20, %zmm5
16273 ; AVX512BW-NEXT: vmovdqa64 960(%rdi), %zmm9
16274 ; AVX512BW-NEXT: vmovdqa64 896(%rdi), %zmm6
16275 ; AVX512BW-NEXT: vmovdqa64 %zmm6, %zmm8
16276 ; AVX512BW-NEXT: vpermt2d %zmm9, %zmm2, %zmm8
16277 ; AVX512BW-NEXT: vmovdqa64 1408(%rdi), %zmm0
16278 ; AVX512BW-NEXT: vmovdqa64 1344(%rdi), %zmm1
16279 ; AVX512BW-NEXT: vpermi2d %zmm0, %zmm1, %zmm2
16280 ; AVX512BW-NEXT: vmovdqa64 %zmm6, %zmm10
16281 ; AVX512BW-NEXT: vpermt2d %zmm9, %zmm3, %zmm10
16282 ; AVX512BW-NEXT: vpermi2d %zmm0, %zmm1, %zmm3
16283 ; AVX512BW-NEXT: vmovdqa64 %zmm9, %zmm11
16284 ; AVX512BW-NEXT: vpermt2d %zmm6, %zmm4, %zmm11
16285 ; AVX512BW-NEXT: vpermi2d %zmm1, %zmm0, %zmm4
16286 ; AVX512BW-NEXT: vmovdqa64 %zmm9, %zmm12
16287 ; AVX512BW-NEXT: vpermt2d %zmm6, %zmm7, %zmm12
16288 ; AVX512BW-NEXT: vpermi2d %zmm1, %zmm0, %zmm7
16289 ; AVX512BW-NEXT: vmovdqa64 %zmm6, %zmm21
16290 ; AVX512BW-NEXT: vpermt2d %zmm9, %zmm18, %zmm21
16291 ; AVX512BW-NEXT: vpermi2d %zmm0, %zmm1, %zmm18
16292 ; AVX512BW-NEXT: vmovdqa64 %zmm6, %zmm26
16293 ; AVX512BW-NEXT: vpermt2d %zmm9, %zmm19, %zmm26
16294 ; AVX512BW-NEXT: vpermi2d %zmm0, %zmm1, %zmm19
16295 ; AVX512BW-NEXT: vpermt2d %zmm0, %zmm20, %zmm1
16296 ; AVX512BW-NEXT: vpermt2d %zmm9, %zmm20, %zmm6
16297 ; AVX512BW-NEXT: movw $992, %ax # imm = 0x3E0
16298 ; AVX512BW-NEXT: kmovd %eax, %k1
16299 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
16300 ; AVX512BW-NEXT: vmovdqa32 %zmm0, %zmm13 {%k1}
16301 ; AVX512BW-NEXT: movb $-32, %al
16302 ; AVX512BW-NEXT: kmovd %eax, %k2
16303 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
16304 ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm13 {%k2}
16305 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
16306 ; AVX512BW-NEXT: vmovdqa32 %zmm0, %zmm22 {%k1}
16307 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
16308 ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm22 {%k2}
16309 ; AVX512BW-NEXT: vmovdqu64 (%rsp), %zmm0 # 64-byte Reload
16310 ; AVX512BW-NEXT: vmovdqa32 %zmm0, %zmm8 {%k1}
16311 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
16312 ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm8 {%k2}
16313 ; AVX512BW-NEXT: vmovdqa32 %zmm25, %zmm2 {%k1}
16314 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
16315 ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm2 {%k2}
16316 ; AVX512BW-NEXT: movw $480, %ax # imm = 0x1E0
16317 ; AVX512BW-NEXT: kmovd %eax, %k2
16318 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
16319 ; AVX512BW-NEXT: vmovdqa32 %zmm0, %zmm10 {%k2}
16320 ; AVX512BW-NEXT: movw $-512, %ax # imm = 0xFE00
16321 ; AVX512BW-NEXT: kmovd %eax, %k1
16322 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
16323 ; AVX512BW-NEXT: vmovdqa32 %zmm0, %zmm10 {%k1}
16324 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
16325 ; AVX512BW-NEXT: vmovdqa32 %zmm0, %zmm23 {%k2}
16326 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
16327 ; AVX512BW-NEXT: vmovdqa32 %zmm0, %zmm23 {%k1}
16328 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
16329 ; AVX512BW-NEXT: vmovdqa32 %zmm0, %zmm14 {%k2}
16330 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
16331 ; AVX512BW-NEXT: vmovdqa32 %zmm0, %zmm14 {%k1}
16332 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
16333 ; AVX512BW-NEXT: vmovdqa32 %zmm0, %zmm3 {%k2}
16334 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
16335 ; AVX512BW-NEXT: vmovdqa32 %zmm0, %zmm3 {%k1}
16336 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
16337 ; AVX512BW-NEXT: vmovdqa32 %zmm0, %zmm11 {%k2}
16338 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
16339 ; AVX512BW-NEXT: vmovdqa32 %zmm0, %zmm11 {%k1}
16340 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
16341 ; AVX512BW-NEXT: vmovdqa32 %zmm0, %zmm24 {%k2}
16342 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
16343 ; AVX512BW-NEXT: vmovdqa32 %zmm0, %zmm24 {%k1}
16344 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
16345 ; AVX512BW-NEXT: vmovdqa32 %zmm0, %zmm15 {%k2}
16346 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
16347 ; AVX512BW-NEXT: vmovdqa32 %zmm0, %zmm15 {%k1}
16348 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
16349 ; AVX512BW-NEXT: vmovdqa32 %zmm0, %zmm4 {%k2}
16350 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
16351 ; AVX512BW-NEXT: vmovdqa32 %zmm0, %zmm4 {%k1}
16352 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
16353 ; AVX512BW-NEXT: vmovdqa32 %zmm0, %zmm12 {%k2}
16354 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
16355 ; AVX512BW-NEXT: vmovdqa32 %zmm0, %zmm12 {%k1}
16356 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
16357 ; AVX512BW-NEXT: vmovdqa32 %zmm0, %zmm29 {%k2}
16358 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
16359 ; AVX512BW-NEXT: vmovdqa32 %zmm0, %zmm29 {%k1}
16360 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
16361 ; AVX512BW-NEXT: vmovdqa32 %zmm0, %zmm16 {%k2}
16362 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
16363 ; AVX512BW-NEXT: vmovdqa32 %zmm0, %zmm16 {%k1}
16364 ; AVX512BW-NEXT: vmovdqa32 %zmm27, %zmm7 {%k2}
16365 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
16366 ; AVX512BW-NEXT: vmovdqa32 %zmm0, %zmm7 {%k1}
16367 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
16368 ; AVX512BW-NEXT: vinserti32x4 $0, %xmm21, %zmm0, %zmm0
16369 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
16370 ; AVX512BW-NEXT: vmovdqa32 %zmm9, %zmm0 {%k1}
16371 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
16372 ; AVX512BW-NEXT: vinserti32x4 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm9, %zmm9 # 16-byte Folded Reload
16373 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
16374 ; AVX512BW-NEXT: vmovdqa32 %zmm20, %zmm9 {%k1}
16375 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
16376 ; AVX512BW-NEXT: vinserti32x4 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm20, %zmm20 # 16-byte Folded Reload
16377 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload
16378 ; AVX512BW-NEXT: vmovdqa32 %zmm25, %zmm20 {%k1}
16379 ; AVX512BW-NEXT: vinserti32x4 $0, %xmm18, %zmm28, %zmm18
16380 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload
16381 ; AVX512BW-NEXT: vmovdqa32 %zmm25, %zmm18 {%k1}
16382 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload
16383 ; AVX512BW-NEXT: vinserti32x4 $0, %xmm26, %zmm25, %zmm25
16384 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload
16385 ; AVX512BW-NEXT: vmovdqa32 %zmm26, %zmm25 {%k1}
16386 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload
16387 ; AVX512BW-NEXT: vinserti32x4 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm26, %zmm26 # 16-byte Folded Reload
16388 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload
16389 ; AVX512BW-NEXT: vmovdqa32 %zmm27, %zmm26 {%k1}
16390 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload
16391 ; AVX512BW-NEXT: vinserti32x4 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm27, %zmm27 # 16-byte Folded Reload
16392 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
16393 ; AVX512BW-NEXT: vmovdqa32 %zmm28, %zmm27 {%k1}
16394 ; AVX512BW-NEXT: vinserti32x4 $0, %xmm19, %zmm31, %zmm19
16395 ; AVX512BW-NEXT: vmovdqa32 %zmm30, %zmm19 {%k1}
16396 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
16397 ; AVX512BW-NEXT: vinserti32x4 $0, %xmm17, %zmm28, %zmm17
16398 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
16399 ; AVX512BW-NEXT: vmovdqa32 %zmm28, %zmm17 {%k1}
16400 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
16401 ; AVX512BW-NEXT: vinserti32x4 $0, %xmm5, %zmm28, %zmm5
16402 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
16403 ; AVX512BW-NEXT: vmovdqa32 %zmm28, %zmm5 {%k1}
16404 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
16405 ; AVX512BW-NEXT: vinserti32x4 $0, %xmm1, %zmm28, %zmm1
16406 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
16407 ; AVX512BW-NEXT: vmovdqa32 %zmm28, %zmm1 {%k1}
16408 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
16409 ; AVX512BW-NEXT: vinserti32x4 $0, %xmm6, %zmm28, %zmm6
16410 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
16411 ; AVX512BW-NEXT: vmovdqa32 %zmm28, %zmm6 {%k1}
16412 ; AVX512BW-NEXT: vmovdqa64 %zmm2, 192(%rsi)
16413 ; AVX512BW-NEXT: vmovdqa64 %zmm8, 128(%rsi)
16414 ; AVX512BW-NEXT: vmovdqa64 %zmm22, 64(%rsi)
16415 ; AVX512BW-NEXT: vmovdqa64 %zmm13, (%rsi)
16416 ; AVX512BW-NEXT: vmovdqa64 %zmm3, 192(%rdx)
16417 ; AVX512BW-NEXT: vmovdqa64 %zmm14, (%rdx)
16418 ; AVX512BW-NEXT: vmovdqa64 %zmm23, 64(%rdx)
16419 ; AVX512BW-NEXT: vmovdqa64 %zmm10, 128(%rdx)
16420 ; AVX512BW-NEXT: vmovdqa64 %zmm4, 192(%rcx)
16421 ; AVX512BW-NEXT: vmovdqa64 %zmm15, (%rcx)
16422 ; AVX512BW-NEXT: vmovdqa64 %zmm24, 64(%rcx)
16423 ; AVX512BW-NEXT: vmovdqa64 %zmm11, 128(%rcx)
16424 ; AVX512BW-NEXT: vmovdqa64 %zmm7, 192(%r8)
16425 ; AVX512BW-NEXT: vmovdqa64 %zmm16, (%r8)
16426 ; AVX512BW-NEXT: vmovdqa64 %zmm29, 64(%r8)
16427 ; AVX512BW-NEXT: vmovdqa64 %zmm12, 128(%r8)
16428 ; AVX512BW-NEXT: vmovdqa64 %zmm18, 192(%r9)
16429 ; AVX512BW-NEXT: vmovdqa64 %zmm20, (%r9)
16430 ; AVX512BW-NEXT: vmovdqa64 %zmm9, 64(%r9)
16431 ; AVX512BW-NEXT: vmovdqa64 %zmm0, 128(%r9)
16432 ; AVX512BW-NEXT: movq {{[0-9]+}}(%rsp), %rax
16433 ; AVX512BW-NEXT: vmovdqa64 %zmm19, 192(%rax)
16434 ; AVX512BW-NEXT: vmovdqa64 %zmm27, (%rax)
16435 ; AVX512BW-NEXT: vmovdqa64 %zmm26, 64(%rax)
16436 ; AVX512BW-NEXT: vmovdqa64 %zmm25, 128(%rax)
16437 ; AVX512BW-NEXT: movq {{[0-9]+}}(%rsp), %rax
16438 ; AVX512BW-NEXT: vmovdqa64 %zmm6, 128(%rax)
16439 ; AVX512BW-NEXT: vmovdqa64 %zmm1, 192(%rax)
16440 ; AVX512BW-NEXT: vmovdqa64 %zmm5, (%rax)
16441 ; AVX512BW-NEXT: vmovdqa64 %zmm17, 64(%rax)
16442 ; AVX512BW-NEXT: addq $3400, %rsp # imm = 0xD48
16443 ; AVX512BW-NEXT: vzeroupper
16444 ; AVX512BW-NEXT: retq
16446 ; AVX512BW-FCP-LABEL: load_i32_stride7_vf64:
16447 ; AVX512BW-FCP: # %bb.0:
16448 ; AVX512BW-FCP-NEXT: subq $3400, %rsp # imm = 0xD48
16449 ; AVX512BW-FCP-NEXT: vmovdqa64 1728(%rdi), %zmm2
16450 ; AVX512BW-FCP-NEXT: vmovdqa64 1664(%rdi), %zmm17
16451 ; AVX512BW-FCP-NEXT: vmovdqa64 1600(%rdi), %zmm11
16452 ; AVX512BW-FCP-NEXT: vmovdqa64 1280(%rdi), %zmm7
16453 ; AVX512BW-FCP-NEXT: vmovdqa64 1216(%rdi), %zmm5
16454 ; AVX512BW-FCP-NEXT: vmovdqa64 1152(%rdi), %zmm12
16455 ; AVX512BW-FCP-NEXT: vmovdqa64 832(%rdi), %zmm6
16456 ; AVX512BW-FCP-NEXT: vmovdqa64 768(%rdi), %zmm8
16457 ; AVX512BW-FCP-NEXT: vmovdqa64 704(%rdi), %zmm13
16458 ; AVX512BW-FCP-NEXT: vmovdqa64 384(%rdi), %zmm20
16459 ; AVX512BW-FCP-NEXT: vmovdqa64 320(%rdi), %zmm4
16460 ; AVX512BW-FCP-NEXT: vmovdqa64 256(%rdi), %zmm14
16461 ; AVX512BW-FCP-NEXT: vbroadcasti32x4 {{.*#+}} zmm1 = [20,27,6,13,20,27,6,13,20,27,6,13,20,27,6,13]
16462 ; AVX512BW-FCP-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
16463 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm14, %zmm3
16464 ; AVX512BW-FCP-NEXT: vpermt2d %zmm4, %zmm1, %zmm3
16465 ; AVX512BW-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm0 = [0,0,10,11,12,13,18,25,0,0,10,11,12,13,18,25]
16466 ; AVX512BW-FCP-NEXT: # zmm0 = mem[0,1,2,3,0,1,2,3]
16467 ; AVX512BW-FCP-NEXT: vpermt2d %zmm20, %zmm0, %zmm3
16468 ; AVX512BW-FCP-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16469 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm13, %zmm3
16470 ; AVX512BW-FCP-NEXT: vpermt2d %zmm8, %zmm1, %zmm3
16471 ; AVX512BW-FCP-NEXT: vpermt2d %zmm6, %zmm0, %zmm3
16472 ; AVX512BW-FCP-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16473 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm12, %zmm3
16474 ; AVX512BW-FCP-NEXT: vpermt2d %zmm5, %zmm1, %zmm3
16475 ; AVX512BW-FCP-NEXT: vpermt2d %zmm7, %zmm0, %zmm3
16476 ; AVX512BW-FCP-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16477 ; AVX512BW-FCP-NEXT: vpermi2d %zmm17, %zmm11, %zmm1
16478 ; AVX512BW-FCP-NEXT: vpermt2d %zmm2, %zmm0, %zmm1
16479 ; AVX512BW-FCP-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16480 ; AVX512BW-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm1 = [0,0,7,14,21,28,0,0,0,0,7,14,21,28,0,0]
16481 ; AVX512BW-FCP-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3]
16482 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm12, %zmm3
16483 ; AVX512BW-FCP-NEXT: vpermt2d %zmm5, %zmm1, %zmm3
16484 ; AVX512BW-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm0 = [0,9,10,11,12,13,19,26,0,9,10,11,12,13,19,26]
16485 ; AVX512BW-FCP-NEXT: # zmm0 = mem[0,1,2,3,0,1,2,3]
16486 ; AVX512BW-FCP-NEXT: vpermt2d %zmm7, %zmm0, %zmm3
16487 ; AVX512BW-FCP-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16488 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm13, %zmm3
16489 ; AVX512BW-FCP-NEXT: vpermt2d %zmm8, %zmm1, %zmm3
16490 ; AVX512BW-FCP-NEXT: vpermt2d %zmm6, %zmm0, %zmm3
16491 ; AVX512BW-FCP-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16492 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm14, %zmm3
16493 ; AVX512BW-FCP-NEXT: vpermt2d %zmm4, %zmm1, %zmm3
16494 ; AVX512BW-FCP-NEXT: vpermt2d %zmm20, %zmm0, %zmm3
16495 ; AVX512BW-FCP-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16496 ; AVX512BW-FCP-NEXT: vpermi2d %zmm17, %zmm11, %zmm1
16497 ; AVX512BW-FCP-NEXT: vpermt2d %zmm2, %zmm0, %zmm1
16498 ; AVX512BW-FCP-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16499 ; AVX512BW-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm1 = [0,1,8,15,22,29,0,0,0,1,8,15,22,29,0,0]
16500 ; AVX512BW-FCP-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3]
16501 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm12, %zmm3
16502 ; AVX512BW-FCP-NEXT: vpermt2d %zmm5, %zmm1, %zmm3
16503 ; AVX512BW-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm0 = [0,9,10,11,12,13,20,27,0,9,10,11,12,13,20,27]
16504 ; AVX512BW-FCP-NEXT: # zmm0 = mem[0,1,2,3,0,1,2,3]
16505 ; AVX512BW-FCP-NEXT: vpermt2d %zmm7, %zmm0, %zmm3
16506 ; AVX512BW-FCP-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16507 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm13, %zmm3
16508 ; AVX512BW-FCP-NEXT: vpermt2d %zmm8, %zmm1, %zmm3
16509 ; AVX512BW-FCP-NEXT: vpermt2d %zmm6, %zmm0, %zmm3
16510 ; AVX512BW-FCP-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16511 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm14, %zmm3
16512 ; AVX512BW-FCP-NEXT: vpermt2d %zmm4, %zmm1, %zmm3
16513 ; AVX512BW-FCP-NEXT: vpermt2d %zmm20, %zmm0, %zmm3
16514 ; AVX512BW-FCP-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16515 ; AVX512BW-FCP-NEXT: vpermi2d %zmm17, %zmm11, %zmm1
16516 ; AVX512BW-FCP-NEXT: vpermt2d %zmm2, %zmm0, %zmm1
16517 ; AVX512BW-FCP-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16518 ; AVX512BW-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm1 = [0,18,25,0,7,14,0,0,0,18,25,0,7,14,0,0]
16519 ; AVX512BW-FCP-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3]
16520 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm5, %zmm3
16521 ; AVX512BW-FCP-NEXT: vpermt2d %zmm12, %zmm1, %zmm3
16522 ; AVX512BW-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm0 = [0,9,10,11,12,13,21,28,0,9,10,11,12,13,21,28]
16523 ; AVX512BW-FCP-NEXT: # zmm0 = mem[0,1,2,3,0,1,2,3]
16524 ; AVX512BW-FCP-NEXT: vpermt2d %zmm7, %zmm0, %zmm3
16525 ; AVX512BW-FCP-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16526 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm8, %zmm3
16527 ; AVX512BW-FCP-NEXT: vpermt2d %zmm13, %zmm1, %zmm3
16528 ; AVX512BW-FCP-NEXT: vpermt2d %zmm6, %zmm0, %zmm3
16529 ; AVX512BW-FCP-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16530 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm4, %zmm3
16531 ; AVX512BW-FCP-NEXT: vpermt2d %zmm14, %zmm1, %zmm3
16532 ; AVX512BW-FCP-NEXT: vpermt2d %zmm20, %zmm0, %zmm3
16533 ; AVX512BW-FCP-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16534 ; AVX512BW-FCP-NEXT: vpermi2d %zmm11, %zmm17, %zmm1
16535 ; AVX512BW-FCP-NEXT: vpermt2d %zmm2, %zmm0, %zmm1
16536 ; AVX512BW-FCP-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16537 ; AVX512BW-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm1 = [0,19,26,1,8,15,0,0,0,19,26,1,8,15,0,0]
16538 ; AVX512BW-FCP-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3]
16539 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm5, %zmm3
16540 ; AVX512BW-FCP-NEXT: vpermt2d %zmm12, %zmm1, %zmm3
16541 ; AVX512BW-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm0 = [0,9,10,11,12,13,22,29,0,9,10,11,12,13,22,29]
16542 ; AVX512BW-FCP-NEXT: # zmm0 = mem[0,1,2,3,0,1,2,3]
16543 ; AVX512BW-FCP-NEXT: vpermt2d %zmm7, %zmm0, %zmm3
16544 ; AVX512BW-FCP-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16545 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm8, %zmm3
16546 ; AVX512BW-FCP-NEXT: vpermt2d %zmm13, %zmm1, %zmm3
16547 ; AVX512BW-FCP-NEXT: vpermt2d %zmm6, %zmm0, %zmm3
16548 ; AVX512BW-FCP-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16549 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm4, %zmm3
16550 ; AVX512BW-FCP-NEXT: vpermt2d %zmm14, %zmm1, %zmm3
16551 ; AVX512BW-FCP-NEXT: vpermt2d %zmm20, %zmm0, %zmm3
16552 ; AVX512BW-FCP-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16553 ; AVX512BW-FCP-NEXT: vpermi2d %zmm11, %zmm17, %zmm1
16554 ; AVX512BW-FCP-NEXT: vpermt2d %zmm2, %zmm0, %zmm1
16555 ; AVX512BW-FCP-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16556 ; AVX512BW-FCP-NEXT: vmovdqa64 1024(%rdi), %zmm3
16557 ; AVX512BW-FCP-NEXT: vmovdqa64 1088(%rdi), %zmm15
16558 ; AVX512BW-FCP-NEXT: vbroadcasti32x4 {{.*#+}} zmm30 = [25,4,11,18,25,4,11,18,25,4,11,18,25,4,11,18]
16559 ; AVX512BW-FCP-NEXT: # zmm30 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
16560 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm3, %zmm0
16561 ; AVX512BW-FCP-NEXT: vpermt2d %zmm15, %zmm30, %zmm0
16562 ; AVX512BW-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16563 ; AVX512BW-FCP-NEXT: vmovdqa64 576(%rdi), %zmm9
16564 ; AVX512BW-FCP-NEXT: vmovdqa64 640(%rdi), %zmm16
16565 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm9, %zmm0
16566 ; AVX512BW-FCP-NEXT: vpermt2d %zmm16, %zmm30, %zmm0
16567 ; AVX512BW-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16568 ; AVX512BW-FCP-NEXT: vmovdqa64 128(%rdi), %zmm0
16569 ; AVX512BW-FCP-NEXT: vmovdqa64 192(%rdi), %zmm18
16570 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm0, %zmm1
16571 ; AVX512BW-FCP-NEXT: vpermt2d %zmm18, %zmm30, %zmm1
16572 ; AVX512BW-FCP-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16573 ; AVX512BW-FCP-NEXT: vmovdqa64 1472(%rdi), %zmm1
16574 ; AVX512BW-FCP-NEXT: vmovdqa64 1536(%rdi), %zmm19
16575 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm1, %zmm10
16576 ; AVX512BW-FCP-NEXT: vpermt2d %zmm19, %zmm30, %zmm10
16577 ; AVX512BW-FCP-NEXT: vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16578 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm12, %zmm21
16579 ; AVX512BW-FCP-NEXT: vpermt2d %zmm5, %zmm30, %zmm21
16580 ; AVX512BW-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm10 = [0,9,10,11,12,16,23,30,0,9,10,11,12,16,23,30]
16581 ; AVX512BW-FCP-NEXT: # zmm10 = mem[0,1,2,3,0,1,2,3]
16582 ; AVX512BW-FCP-NEXT: vpermt2d %zmm7, %zmm10, %zmm21
16583 ; AVX512BW-FCP-NEXT: vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16584 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm13, %zmm21
16585 ; AVX512BW-FCP-NEXT: vpermt2d %zmm8, %zmm30, %zmm21
16586 ; AVX512BW-FCP-NEXT: vpermt2d %zmm6, %zmm10, %zmm21
16587 ; AVX512BW-FCP-NEXT: vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16588 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm14, %zmm21
16589 ; AVX512BW-FCP-NEXT: vpermt2d %zmm4, %zmm30, %zmm21
16590 ; AVX512BW-FCP-NEXT: vpermt2d %zmm20, %zmm10, %zmm21
16591 ; AVX512BW-FCP-NEXT: vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16592 ; AVX512BW-FCP-NEXT: vpermi2d %zmm17, %zmm11, %zmm30
16593 ; AVX512BW-FCP-NEXT: vpermt2d %zmm2, %zmm10, %zmm30
16594 ; AVX512BW-FCP-NEXT: vbroadcasti32x4 {{.*#+}} zmm10 = [26,5,12,19,26,5,12,19,26,5,12,19,26,5,12,19]
16595 ; AVX512BW-FCP-NEXT: # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
16596 ; AVX512BW-FCP-NEXT: vpermt2d %zmm8, %zmm10, %zmm13
16597 ; AVX512BW-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm8 = [0,9,10,11,12,17,24,31,0,9,10,11,12,17,24,31]
16598 ; AVX512BW-FCP-NEXT: # zmm8 = mem[0,1,2,3,0,1,2,3]
16599 ; AVX512BW-FCP-NEXT: vpermt2d %zmm6, %zmm8, %zmm13
16600 ; AVX512BW-FCP-NEXT: vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16601 ; AVX512BW-FCP-NEXT: vpermt2d %zmm4, %zmm10, %zmm14
16602 ; AVX512BW-FCP-NEXT: vpermt2d %zmm20, %zmm8, %zmm14
16603 ; AVX512BW-FCP-NEXT: vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16604 ; AVX512BW-FCP-NEXT: vpermt2d %zmm17, %zmm10, %zmm11
16605 ; AVX512BW-FCP-NEXT: vpermt2d %zmm2, %zmm8, %zmm11
16606 ; AVX512BW-FCP-NEXT: vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16607 ; AVX512BW-FCP-NEXT: vpermt2d %zmm5, %zmm10, %zmm12
16608 ; AVX512BW-FCP-NEXT: vpermt2d %zmm7, %zmm8, %zmm12
16609 ; AVX512BW-FCP-NEXT: vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16610 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm3, %zmm2
16611 ; AVX512BW-FCP-NEXT: vpermt2d %zmm15, %zmm10, %zmm2
16612 ; AVX512BW-FCP-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16613 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm9, %zmm2
16614 ; AVX512BW-FCP-NEXT: vpermt2d %zmm16, %zmm10, %zmm2
16615 ; AVX512BW-FCP-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16616 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm0, %zmm2
16617 ; AVX512BW-FCP-NEXT: vpermt2d %zmm18, %zmm10, %zmm2
16618 ; AVX512BW-FCP-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16619 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm1, %zmm2
16620 ; AVX512BW-FCP-NEXT: vpermt2d %zmm19, %zmm10, %zmm2
16621 ; AVX512BW-FCP-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16622 ; AVX512BW-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm25 = [8,15,0,0,0,19,26,1,8,15,0,0,0,19,26,1]
16623 ; AVX512BW-FCP-NEXT: # zmm25 = mem[0,1,2,3,0,1,2,3]
16624 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm16, %zmm2
16625 ; AVX512BW-FCP-NEXT: vpermt2d %zmm9, %zmm25, %zmm2
16626 ; AVX512BW-FCP-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16627 ; AVX512BW-FCP-NEXT: vbroadcasti32x4 {{.*#+}} zmm27 = [27,6,13,20,27,6,13,20,27,6,13,20,27,6,13,20]
16628 ; AVX512BW-FCP-NEXT: # zmm27 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
16629 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm9, %zmm2
16630 ; AVX512BW-FCP-NEXT: vpermt2d %zmm16, %zmm27, %zmm2
16631 ; AVX512BW-FCP-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16632 ; AVX512BW-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm28 = [28,0,0,0,0,7,14,21,28,0,0,0,0,7,14,21]
16633 ; AVX512BW-FCP-NEXT: # zmm28 = mem[0,1,2,3,0,1,2,3]
16634 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm9, %zmm2
16635 ; AVX512BW-FCP-NEXT: vpermt2d %zmm16, %zmm28, %zmm2
16636 ; AVX512BW-FCP-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16637 ; AVX512BW-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm31 = [29,0,0,0,1,8,15,22,29,0,0,0,1,8,15,22]
16638 ; AVX512BW-FCP-NEXT: # zmm31 = mem[0,1,2,3,0,1,2,3]
16639 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm9, %zmm2
16640 ; AVX512BW-FCP-NEXT: vpermt2d %zmm16, %zmm31, %zmm2
16641 ; AVX512BW-FCP-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16642 ; AVX512BW-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm2 = [14,0,0,0,18,25,0,7,14,0,0,0,18,25,0,7]
16643 ; AVX512BW-FCP-NEXT: # zmm2 = mem[0,1,2,3,0,1,2,3]
16644 ; AVX512BW-FCP-NEXT: vpermt2d %zmm9, %zmm2, %zmm16
16645 ; AVX512BW-FCP-NEXT: vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16646 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm18, %zmm4
16647 ; AVX512BW-FCP-NEXT: vpermt2d %zmm0, %zmm25, %zmm4
16648 ; AVX512BW-FCP-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16649 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm0, %zmm4
16650 ; AVX512BW-FCP-NEXT: vpermt2d %zmm18, %zmm27, %zmm4
16651 ; AVX512BW-FCP-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16652 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm0, %zmm4
16653 ; AVX512BW-FCP-NEXT: vpermt2d %zmm18, %zmm28, %zmm4
16654 ; AVX512BW-FCP-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16655 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm0, %zmm4
16656 ; AVX512BW-FCP-NEXT: vpermt2d %zmm18, %zmm31, %zmm4
16657 ; AVX512BW-FCP-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16658 ; AVX512BW-FCP-NEXT: vpermt2d %zmm0, %zmm2, %zmm18
16659 ; AVX512BW-FCP-NEXT: vmovdqu64 %zmm18, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16660 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm15, %zmm0
16661 ; AVX512BW-FCP-NEXT: vpermt2d %zmm3, %zmm25, %zmm0
16662 ; AVX512BW-FCP-NEXT: vmovdqu64 %zmm0, (%rsp) # 64-byte Spill
16663 ; AVX512BW-FCP-NEXT: vpermi2d %zmm1, %zmm19, %zmm25
16664 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm3, %zmm0
16665 ; AVX512BW-FCP-NEXT: vpermt2d %zmm15, %zmm27, %zmm0
16666 ; AVX512BW-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16667 ; AVX512BW-FCP-NEXT: vpermi2d %zmm19, %zmm1, %zmm27
16668 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm3, %zmm0
16669 ; AVX512BW-FCP-NEXT: vpermt2d %zmm15, %zmm28, %zmm0
16670 ; AVX512BW-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16671 ; AVX512BW-FCP-NEXT: vpermi2d %zmm19, %zmm1, %zmm28
16672 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm3, %zmm0
16673 ; AVX512BW-FCP-NEXT: vpermt2d %zmm15, %zmm31, %zmm0
16674 ; AVX512BW-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16675 ; AVX512BW-FCP-NEXT: vpermi2d %zmm19, %zmm1, %zmm31
16676 ; AVX512BW-FCP-NEXT: vpermt2d %zmm1, %zmm2, %zmm19
16677 ; AVX512BW-FCP-NEXT: vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16678 ; AVX512BW-FCP-NEXT: vpermt2d %zmm3, %zmm2, %zmm15
16679 ; AVX512BW-FCP-NEXT: vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16680 ; AVX512BW-FCP-NEXT: vmovdqa64 512(%rdi), %zmm0
16681 ; AVX512BW-FCP-NEXT: vmovdqa64 448(%rdi), %zmm17
16682 ; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm2 = [0,7,14,21,28,0,0,0]
16683 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm17, %zmm22
16684 ; AVX512BW-FCP-NEXT: vpermt2d %zmm0, %zmm2, %zmm22
16685 ; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm3 = [1,8,15,22,29,0,0,0]
16686 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm17, %zmm23
16687 ; AVX512BW-FCP-NEXT: vpermt2d %zmm0, %zmm3, %zmm23
16688 ; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm4 = [18,25,0,7,14,0,0,0]
16689 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm0, %zmm24
16690 ; AVX512BW-FCP-NEXT: vpermt2d %zmm17, %zmm4, %zmm24
16691 ; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm7 = [19,26,1,8,15,0,0,0]
16692 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm0, %zmm29
16693 ; AVX512BW-FCP-NEXT: vpermt2d %zmm17, %zmm7, %zmm29
16694 ; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm18 = [4,11,18,25]
16695 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm17, %zmm1
16696 ; AVX512BW-FCP-NEXT: vpermt2d %zmm0, %zmm18, %zmm1
16697 ; AVX512BW-FCP-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16698 ; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm19 = [5,12,19,26]
16699 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm17, %zmm1
16700 ; AVX512BW-FCP-NEXT: vpermt2d %zmm0, %zmm19, %zmm1
16701 ; AVX512BW-FCP-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16702 ; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm20 = [6,13,20,27]
16703 ; AVX512BW-FCP-NEXT: vpermt2d %zmm0, %zmm20, %zmm17
16704 ; AVX512BW-FCP-NEXT: vmovdqa64 (%rdi), %zmm5
16705 ; AVX512BW-FCP-NEXT: vmovdqa64 64(%rdi), %zmm0
16706 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm5, %zmm13
16707 ; AVX512BW-FCP-NEXT: vpermt2d %zmm0, %zmm2, %zmm13
16708 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm5, %zmm14
16709 ; AVX512BW-FCP-NEXT: vpermt2d %zmm0, %zmm3, %zmm14
16710 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm0, %zmm15
16711 ; AVX512BW-FCP-NEXT: vpermt2d %zmm5, %zmm4, %zmm15
16712 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm0, %zmm16
16713 ; AVX512BW-FCP-NEXT: vpermt2d %zmm5, %zmm7, %zmm16
16714 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm5, %zmm1
16715 ; AVX512BW-FCP-NEXT: vpermt2d %zmm0, %zmm18, %zmm1
16716 ; AVX512BW-FCP-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16717 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm5, %zmm1
16718 ; AVX512BW-FCP-NEXT: vpermt2d %zmm0, %zmm19, %zmm1
16719 ; AVX512BW-FCP-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16720 ; AVX512BW-FCP-NEXT: vpermt2d %zmm0, %zmm20, %zmm5
16721 ; AVX512BW-FCP-NEXT: vmovdqa64 960(%rdi), %zmm9
16722 ; AVX512BW-FCP-NEXT: vmovdqa64 896(%rdi), %zmm6
16723 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm6, %zmm8
16724 ; AVX512BW-FCP-NEXT: vpermt2d %zmm9, %zmm2, %zmm8
16725 ; AVX512BW-FCP-NEXT: vmovdqa64 1408(%rdi), %zmm0
16726 ; AVX512BW-FCP-NEXT: vmovdqa64 1344(%rdi), %zmm1
16727 ; AVX512BW-FCP-NEXT: vpermi2d %zmm0, %zmm1, %zmm2
16728 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm6, %zmm10
16729 ; AVX512BW-FCP-NEXT: vpermt2d %zmm9, %zmm3, %zmm10
16730 ; AVX512BW-FCP-NEXT: vpermi2d %zmm0, %zmm1, %zmm3
16731 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm9, %zmm11
16732 ; AVX512BW-FCP-NEXT: vpermt2d %zmm6, %zmm4, %zmm11
16733 ; AVX512BW-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm4
16734 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm9, %zmm12
16735 ; AVX512BW-FCP-NEXT: vpermt2d %zmm6, %zmm7, %zmm12
16736 ; AVX512BW-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm7
16737 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm6, %zmm21
16738 ; AVX512BW-FCP-NEXT: vpermt2d %zmm9, %zmm18, %zmm21
16739 ; AVX512BW-FCP-NEXT: vpermi2d %zmm0, %zmm1, %zmm18
16740 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm6, %zmm26
16741 ; AVX512BW-FCP-NEXT: vpermt2d %zmm9, %zmm19, %zmm26
16742 ; AVX512BW-FCP-NEXT: vpermi2d %zmm0, %zmm1, %zmm19
16743 ; AVX512BW-FCP-NEXT: vpermt2d %zmm0, %zmm20, %zmm1
16744 ; AVX512BW-FCP-NEXT: vpermt2d %zmm9, %zmm20, %zmm6
16745 ; AVX512BW-FCP-NEXT: movw $992, %ax # imm = 0x3E0
16746 ; AVX512BW-FCP-NEXT: kmovd %eax, %k1
16747 ; AVX512BW-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
16748 ; AVX512BW-FCP-NEXT: vmovdqa32 %zmm0, %zmm13 {%k1}
16749 ; AVX512BW-FCP-NEXT: movb $-32, %al
16750 ; AVX512BW-FCP-NEXT: kmovd %eax, %k2
16751 ; AVX512BW-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
16752 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm0, %zmm13 {%k2}
16753 ; AVX512BW-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
16754 ; AVX512BW-FCP-NEXT: vmovdqa32 %zmm0, %zmm22 {%k1}
16755 ; AVX512BW-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
16756 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm0, %zmm22 {%k2}
16757 ; AVX512BW-FCP-NEXT: vmovdqu64 (%rsp), %zmm0 # 64-byte Reload
16758 ; AVX512BW-FCP-NEXT: vmovdqa32 %zmm0, %zmm8 {%k1}
16759 ; AVX512BW-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
16760 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm0, %zmm8 {%k2}
16761 ; AVX512BW-FCP-NEXT: vmovdqa32 %zmm25, %zmm2 {%k1}
16762 ; AVX512BW-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
16763 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm0, %zmm2 {%k2}
16764 ; AVX512BW-FCP-NEXT: movw $480, %ax # imm = 0x1E0
16765 ; AVX512BW-FCP-NEXT: kmovd %eax, %k2
16766 ; AVX512BW-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
16767 ; AVX512BW-FCP-NEXT: vmovdqa32 %zmm0, %zmm10 {%k2}
16768 ; AVX512BW-FCP-NEXT: movw $-512, %ax # imm = 0xFE00
16769 ; AVX512BW-FCP-NEXT: kmovd %eax, %k1
16770 ; AVX512BW-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
16771 ; AVX512BW-FCP-NEXT: vmovdqa32 %zmm0, %zmm10 {%k1}
16772 ; AVX512BW-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
16773 ; AVX512BW-FCP-NEXT: vmovdqa32 %zmm0, %zmm23 {%k2}
16774 ; AVX512BW-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
16775 ; AVX512BW-FCP-NEXT: vmovdqa32 %zmm0, %zmm23 {%k1}
16776 ; AVX512BW-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
16777 ; AVX512BW-FCP-NEXT: vmovdqa32 %zmm0, %zmm14 {%k2}
16778 ; AVX512BW-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
16779 ; AVX512BW-FCP-NEXT: vmovdqa32 %zmm0, %zmm14 {%k1}
16780 ; AVX512BW-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
16781 ; AVX512BW-FCP-NEXT: vmovdqa32 %zmm0, %zmm3 {%k2}
16782 ; AVX512BW-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
16783 ; AVX512BW-FCP-NEXT: vmovdqa32 %zmm0, %zmm3 {%k1}
16784 ; AVX512BW-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
16785 ; AVX512BW-FCP-NEXT: vmovdqa32 %zmm0, %zmm11 {%k2}
16786 ; AVX512BW-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
16787 ; AVX512BW-FCP-NEXT: vmovdqa32 %zmm0, %zmm11 {%k1}
16788 ; AVX512BW-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
16789 ; AVX512BW-FCP-NEXT: vmovdqa32 %zmm0, %zmm24 {%k2}
16790 ; AVX512BW-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
16791 ; AVX512BW-FCP-NEXT: vmovdqa32 %zmm0, %zmm24 {%k1}
16792 ; AVX512BW-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
16793 ; AVX512BW-FCP-NEXT: vmovdqa32 %zmm0, %zmm15 {%k2}
16794 ; AVX512BW-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
16795 ; AVX512BW-FCP-NEXT: vmovdqa32 %zmm0, %zmm15 {%k1}
16796 ; AVX512BW-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
16797 ; AVX512BW-FCP-NEXT: vmovdqa32 %zmm0, %zmm4 {%k2}
16798 ; AVX512BW-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
16799 ; AVX512BW-FCP-NEXT: vmovdqa32 %zmm0, %zmm4 {%k1}
16800 ; AVX512BW-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
16801 ; AVX512BW-FCP-NEXT: vmovdqa32 %zmm0, %zmm12 {%k2}
16802 ; AVX512BW-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
16803 ; AVX512BW-FCP-NEXT: vmovdqa32 %zmm0, %zmm12 {%k1}
16804 ; AVX512BW-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
16805 ; AVX512BW-FCP-NEXT: vmovdqa32 %zmm0, %zmm29 {%k2}
16806 ; AVX512BW-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
16807 ; AVX512BW-FCP-NEXT: vmovdqa32 %zmm0, %zmm29 {%k1}
16808 ; AVX512BW-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
16809 ; AVX512BW-FCP-NEXT: vmovdqa32 %zmm0, %zmm16 {%k2}
16810 ; AVX512BW-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
16811 ; AVX512BW-FCP-NEXT: vmovdqa32 %zmm0, %zmm16 {%k1}
16812 ; AVX512BW-FCP-NEXT: vmovdqa32 %zmm27, %zmm7 {%k2}
16813 ; AVX512BW-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
16814 ; AVX512BW-FCP-NEXT: vmovdqa32 %zmm0, %zmm7 {%k1}
16815 ; AVX512BW-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
16816 ; AVX512BW-FCP-NEXT: vinserti32x4 $0, %xmm21, %zmm0, %zmm0
16817 ; AVX512BW-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
16818 ; AVX512BW-FCP-NEXT: vmovdqa32 %zmm9, %zmm0 {%k1}
16819 ; AVX512BW-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
16820 ; AVX512BW-FCP-NEXT: vinserti32x4 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm9, %zmm9 # 16-byte Folded Reload
16821 ; AVX512BW-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
16822 ; AVX512BW-FCP-NEXT: vmovdqa32 %zmm20, %zmm9 {%k1}
16823 ; AVX512BW-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
16824 ; AVX512BW-FCP-NEXT: vinserti32x4 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm20, %zmm20 # 16-byte Folded Reload
16825 ; AVX512BW-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload
16826 ; AVX512BW-FCP-NEXT: vmovdqa32 %zmm25, %zmm20 {%k1}
16827 ; AVX512BW-FCP-NEXT: vinserti32x4 $0, %xmm18, %zmm28, %zmm18
16828 ; AVX512BW-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload
16829 ; AVX512BW-FCP-NEXT: vmovdqa32 %zmm25, %zmm18 {%k1}
16830 ; AVX512BW-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload
16831 ; AVX512BW-FCP-NEXT: vinserti32x4 $0, %xmm26, %zmm25, %zmm25
16832 ; AVX512BW-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload
16833 ; AVX512BW-FCP-NEXT: vmovdqa32 %zmm26, %zmm25 {%k1}
16834 ; AVX512BW-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload
16835 ; AVX512BW-FCP-NEXT: vinserti32x4 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm26, %zmm26 # 16-byte Folded Reload
16836 ; AVX512BW-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload
16837 ; AVX512BW-FCP-NEXT: vmovdqa32 %zmm27, %zmm26 {%k1}
16838 ; AVX512BW-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload
16839 ; AVX512BW-FCP-NEXT: vinserti32x4 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm27, %zmm27 # 16-byte Folded Reload
16840 ; AVX512BW-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
16841 ; AVX512BW-FCP-NEXT: vmovdqa32 %zmm28, %zmm27 {%k1}
16842 ; AVX512BW-FCP-NEXT: vinserti32x4 $0, %xmm19, %zmm31, %zmm19
16843 ; AVX512BW-FCP-NEXT: vmovdqa32 %zmm30, %zmm19 {%k1}
16844 ; AVX512BW-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
16845 ; AVX512BW-FCP-NEXT: vinserti32x4 $0, %xmm17, %zmm28, %zmm17
16846 ; AVX512BW-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
16847 ; AVX512BW-FCP-NEXT: vmovdqa32 %zmm28, %zmm17 {%k1}
16848 ; AVX512BW-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
16849 ; AVX512BW-FCP-NEXT: vinserti32x4 $0, %xmm5, %zmm28, %zmm5
16850 ; AVX512BW-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
16851 ; AVX512BW-FCP-NEXT: vmovdqa32 %zmm28, %zmm5 {%k1}
16852 ; AVX512BW-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
16853 ; AVX512BW-FCP-NEXT: vinserti32x4 $0, %xmm1, %zmm28, %zmm1
16854 ; AVX512BW-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
16855 ; AVX512BW-FCP-NEXT: vmovdqa32 %zmm28, %zmm1 {%k1}
16856 ; AVX512BW-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
16857 ; AVX512BW-FCP-NEXT: vinserti32x4 $0, %xmm6, %zmm28, %zmm6
16858 ; AVX512BW-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
16859 ; AVX512BW-FCP-NEXT: vmovdqa32 %zmm28, %zmm6 {%k1}
16860 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm2, 192(%rsi)
16861 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm8, 128(%rsi)
16862 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm22, 64(%rsi)
16863 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm13, (%rsi)
16864 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm3, 192(%rdx)
16865 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm14, (%rdx)
16866 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm23, 64(%rdx)
16867 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm10, 128(%rdx)
16868 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm4, 192(%rcx)
16869 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm15, (%rcx)
16870 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm24, 64(%rcx)
16871 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm11, 128(%rcx)
16872 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm7, 192(%r8)
16873 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm16, (%r8)
16874 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm29, 64(%r8)
16875 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm12, 128(%r8)
16876 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm18, 192(%r9)
16877 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm20, (%r9)
16878 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm9, 64(%r9)
16879 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm0, 128(%r9)
16880 ; AVX512BW-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
16881 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm19, 192(%rax)
16882 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm27, (%rax)
16883 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm26, 64(%rax)
16884 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm25, 128(%rax)
16885 ; AVX512BW-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
16886 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm6, 128(%rax)
16887 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm1, 192(%rax)
16888 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm5, (%rax)
16889 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm17, 64(%rax)
16890 ; AVX512BW-FCP-NEXT: addq $3400, %rsp # imm = 0xD48
16891 ; AVX512BW-FCP-NEXT: vzeroupper
16892 ; AVX512BW-FCP-NEXT: retq
16894 ; AVX512DQ-BW-LABEL: load_i32_stride7_vf64:
16895 ; AVX512DQ-BW: # %bb.0:
16896 ; AVX512DQ-BW-NEXT: subq $3400, %rsp # imm = 0xD48
16897 ; AVX512DQ-BW-NEXT: vmovdqa64 1728(%rdi), %zmm2
16898 ; AVX512DQ-BW-NEXT: vmovdqa64 1664(%rdi), %zmm17
16899 ; AVX512DQ-BW-NEXT: vmovdqa64 1600(%rdi), %zmm11
16900 ; AVX512DQ-BW-NEXT: vmovdqa64 1280(%rdi), %zmm7
16901 ; AVX512DQ-BW-NEXT: vmovdqa64 1216(%rdi), %zmm5
16902 ; AVX512DQ-BW-NEXT: vmovdqa64 1152(%rdi), %zmm12
16903 ; AVX512DQ-BW-NEXT: vmovdqa64 832(%rdi), %zmm6
16904 ; AVX512DQ-BW-NEXT: vmovdqa64 768(%rdi), %zmm8
16905 ; AVX512DQ-BW-NEXT: vmovdqa64 704(%rdi), %zmm13
16906 ; AVX512DQ-BW-NEXT: vmovdqa64 384(%rdi), %zmm20
16907 ; AVX512DQ-BW-NEXT: vmovdqa64 320(%rdi), %zmm4
16908 ; AVX512DQ-BW-NEXT: vmovdqa64 256(%rdi), %zmm14
16909 ; AVX512DQ-BW-NEXT: vbroadcasti32x4 {{.*#+}} zmm1 = [20,27,6,13,20,27,6,13,20,27,6,13,20,27,6,13]
16910 ; AVX512DQ-BW-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
16911 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm14, %zmm3
16912 ; AVX512DQ-BW-NEXT: vpermt2d %zmm4, %zmm1, %zmm3
16913 ; AVX512DQ-BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm0 = [0,0,10,11,12,13,18,25,0,0,10,11,12,13,18,25]
16914 ; AVX512DQ-BW-NEXT: # zmm0 = mem[0,1,2,3,0,1,2,3]
16915 ; AVX512DQ-BW-NEXT: vpermt2d %zmm20, %zmm0, %zmm3
16916 ; AVX512DQ-BW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16917 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm13, %zmm3
16918 ; AVX512DQ-BW-NEXT: vpermt2d %zmm8, %zmm1, %zmm3
16919 ; AVX512DQ-BW-NEXT: vpermt2d %zmm6, %zmm0, %zmm3
16920 ; AVX512DQ-BW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16921 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm12, %zmm3
16922 ; AVX512DQ-BW-NEXT: vpermt2d %zmm5, %zmm1, %zmm3
16923 ; AVX512DQ-BW-NEXT: vpermt2d %zmm7, %zmm0, %zmm3
16924 ; AVX512DQ-BW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16925 ; AVX512DQ-BW-NEXT: vpermi2d %zmm17, %zmm11, %zmm1
16926 ; AVX512DQ-BW-NEXT: vpermt2d %zmm2, %zmm0, %zmm1
16927 ; AVX512DQ-BW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16928 ; AVX512DQ-BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm1 = [0,0,7,14,21,28,0,0,0,0,7,14,21,28,0,0]
16929 ; AVX512DQ-BW-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3]
16930 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm12, %zmm3
16931 ; AVX512DQ-BW-NEXT: vpermt2d %zmm5, %zmm1, %zmm3
16932 ; AVX512DQ-BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm0 = [0,9,10,11,12,13,19,26,0,9,10,11,12,13,19,26]
16933 ; AVX512DQ-BW-NEXT: # zmm0 = mem[0,1,2,3,0,1,2,3]
16934 ; AVX512DQ-BW-NEXT: vpermt2d %zmm7, %zmm0, %zmm3
16935 ; AVX512DQ-BW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16936 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm13, %zmm3
16937 ; AVX512DQ-BW-NEXT: vpermt2d %zmm8, %zmm1, %zmm3
16938 ; AVX512DQ-BW-NEXT: vpermt2d %zmm6, %zmm0, %zmm3
16939 ; AVX512DQ-BW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16940 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm14, %zmm3
16941 ; AVX512DQ-BW-NEXT: vpermt2d %zmm4, %zmm1, %zmm3
16942 ; AVX512DQ-BW-NEXT: vpermt2d %zmm20, %zmm0, %zmm3
16943 ; AVX512DQ-BW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16944 ; AVX512DQ-BW-NEXT: vpermi2d %zmm17, %zmm11, %zmm1
16945 ; AVX512DQ-BW-NEXT: vpermt2d %zmm2, %zmm0, %zmm1
16946 ; AVX512DQ-BW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16947 ; AVX512DQ-BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm1 = [0,1,8,15,22,29,0,0,0,1,8,15,22,29,0,0]
16948 ; AVX512DQ-BW-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3]
16949 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm12, %zmm3
16950 ; AVX512DQ-BW-NEXT: vpermt2d %zmm5, %zmm1, %zmm3
16951 ; AVX512DQ-BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm0 = [0,9,10,11,12,13,20,27,0,9,10,11,12,13,20,27]
16952 ; AVX512DQ-BW-NEXT: # zmm0 = mem[0,1,2,3,0,1,2,3]
16953 ; AVX512DQ-BW-NEXT: vpermt2d %zmm7, %zmm0, %zmm3
16954 ; AVX512DQ-BW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16955 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm13, %zmm3
16956 ; AVX512DQ-BW-NEXT: vpermt2d %zmm8, %zmm1, %zmm3
16957 ; AVX512DQ-BW-NEXT: vpermt2d %zmm6, %zmm0, %zmm3
16958 ; AVX512DQ-BW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16959 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm14, %zmm3
16960 ; AVX512DQ-BW-NEXT: vpermt2d %zmm4, %zmm1, %zmm3
16961 ; AVX512DQ-BW-NEXT: vpermt2d %zmm20, %zmm0, %zmm3
16962 ; AVX512DQ-BW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16963 ; AVX512DQ-BW-NEXT: vpermi2d %zmm17, %zmm11, %zmm1
16964 ; AVX512DQ-BW-NEXT: vpermt2d %zmm2, %zmm0, %zmm1
16965 ; AVX512DQ-BW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16966 ; AVX512DQ-BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm1 = [0,18,25,0,7,14,0,0,0,18,25,0,7,14,0,0]
16967 ; AVX512DQ-BW-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3]
16968 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm5, %zmm3
16969 ; AVX512DQ-BW-NEXT: vpermt2d %zmm12, %zmm1, %zmm3
16970 ; AVX512DQ-BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm0 = [0,9,10,11,12,13,21,28,0,9,10,11,12,13,21,28]
16971 ; AVX512DQ-BW-NEXT: # zmm0 = mem[0,1,2,3,0,1,2,3]
16972 ; AVX512DQ-BW-NEXT: vpermt2d %zmm7, %zmm0, %zmm3
16973 ; AVX512DQ-BW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16974 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm8, %zmm3
16975 ; AVX512DQ-BW-NEXT: vpermt2d %zmm13, %zmm1, %zmm3
16976 ; AVX512DQ-BW-NEXT: vpermt2d %zmm6, %zmm0, %zmm3
16977 ; AVX512DQ-BW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16978 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm4, %zmm3
16979 ; AVX512DQ-BW-NEXT: vpermt2d %zmm14, %zmm1, %zmm3
16980 ; AVX512DQ-BW-NEXT: vpermt2d %zmm20, %zmm0, %zmm3
16981 ; AVX512DQ-BW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16982 ; AVX512DQ-BW-NEXT: vpermi2d %zmm11, %zmm17, %zmm1
16983 ; AVX512DQ-BW-NEXT: vpermt2d %zmm2, %zmm0, %zmm1
16984 ; AVX512DQ-BW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16985 ; AVX512DQ-BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm1 = [0,19,26,1,8,15,0,0,0,19,26,1,8,15,0,0]
16986 ; AVX512DQ-BW-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3]
16987 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm5, %zmm3
16988 ; AVX512DQ-BW-NEXT: vpermt2d %zmm12, %zmm1, %zmm3
16989 ; AVX512DQ-BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm0 = [0,9,10,11,12,13,22,29,0,9,10,11,12,13,22,29]
16990 ; AVX512DQ-BW-NEXT: # zmm0 = mem[0,1,2,3,0,1,2,3]
16991 ; AVX512DQ-BW-NEXT: vpermt2d %zmm7, %zmm0, %zmm3
16992 ; AVX512DQ-BW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16993 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm8, %zmm3
16994 ; AVX512DQ-BW-NEXT: vpermt2d %zmm13, %zmm1, %zmm3
16995 ; AVX512DQ-BW-NEXT: vpermt2d %zmm6, %zmm0, %zmm3
16996 ; AVX512DQ-BW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
16997 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm4, %zmm3
16998 ; AVX512DQ-BW-NEXT: vpermt2d %zmm14, %zmm1, %zmm3
16999 ; AVX512DQ-BW-NEXT: vpermt2d %zmm20, %zmm0, %zmm3
17000 ; AVX512DQ-BW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
17001 ; AVX512DQ-BW-NEXT: vpermi2d %zmm11, %zmm17, %zmm1
17002 ; AVX512DQ-BW-NEXT: vpermt2d %zmm2, %zmm0, %zmm1
17003 ; AVX512DQ-BW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
17004 ; AVX512DQ-BW-NEXT: vmovdqa64 1024(%rdi), %zmm3
17005 ; AVX512DQ-BW-NEXT: vmovdqa64 1088(%rdi), %zmm15
17006 ; AVX512DQ-BW-NEXT: vbroadcasti32x4 {{.*#+}} zmm30 = [25,4,11,18,25,4,11,18,25,4,11,18,25,4,11,18]
17007 ; AVX512DQ-BW-NEXT: # zmm30 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
17008 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm3, %zmm0
17009 ; AVX512DQ-BW-NEXT: vpermt2d %zmm15, %zmm30, %zmm0
17010 ; AVX512DQ-BW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
17011 ; AVX512DQ-BW-NEXT: vmovdqa64 576(%rdi), %zmm9
17012 ; AVX512DQ-BW-NEXT: vmovdqa64 640(%rdi), %zmm16
17013 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm9, %zmm0
17014 ; AVX512DQ-BW-NEXT: vpermt2d %zmm16, %zmm30, %zmm0
17015 ; AVX512DQ-BW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
17016 ; AVX512DQ-BW-NEXT: vmovdqa64 128(%rdi), %zmm0
17017 ; AVX512DQ-BW-NEXT: vmovdqa64 192(%rdi), %zmm18
17018 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm0, %zmm1
17019 ; AVX512DQ-BW-NEXT: vpermt2d %zmm18, %zmm30, %zmm1
17020 ; AVX512DQ-BW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
17021 ; AVX512DQ-BW-NEXT: vmovdqa64 1472(%rdi), %zmm1
17022 ; AVX512DQ-BW-NEXT: vmovdqa64 1536(%rdi), %zmm19
17023 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm1, %zmm10
17024 ; AVX512DQ-BW-NEXT: vpermt2d %zmm19, %zmm30, %zmm10
17025 ; AVX512DQ-BW-NEXT: vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
17026 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm12, %zmm21
17027 ; AVX512DQ-BW-NEXT: vpermt2d %zmm5, %zmm30, %zmm21
17028 ; AVX512DQ-BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm10 = [0,9,10,11,12,16,23,30,0,9,10,11,12,16,23,30]
17029 ; AVX512DQ-BW-NEXT: # zmm10 = mem[0,1,2,3,0,1,2,3]
17030 ; AVX512DQ-BW-NEXT: vpermt2d %zmm7, %zmm10, %zmm21
17031 ; AVX512DQ-BW-NEXT: vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
17032 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm13, %zmm21
17033 ; AVX512DQ-BW-NEXT: vpermt2d %zmm8, %zmm30, %zmm21
17034 ; AVX512DQ-BW-NEXT: vpermt2d %zmm6, %zmm10, %zmm21
17035 ; AVX512DQ-BW-NEXT: vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
17036 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm14, %zmm21
17037 ; AVX512DQ-BW-NEXT: vpermt2d %zmm4, %zmm30, %zmm21
17038 ; AVX512DQ-BW-NEXT: vpermt2d %zmm20, %zmm10, %zmm21
17039 ; AVX512DQ-BW-NEXT: vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
17040 ; AVX512DQ-BW-NEXT: vpermi2d %zmm17, %zmm11, %zmm30
17041 ; AVX512DQ-BW-NEXT: vpermt2d %zmm2, %zmm10, %zmm30
17042 ; AVX512DQ-BW-NEXT: vbroadcasti32x4 {{.*#+}} zmm10 = [26,5,12,19,26,5,12,19,26,5,12,19,26,5,12,19]
17043 ; AVX512DQ-BW-NEXT: # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
17044 ; AVX512DQ-BW-NEXT: vpermt2d %zmm8, %zmm10, %zmm13
17045 ; AVX512DQ-BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm8 = [0,9,10,11,12,17,24,31,0,9,10,11,12,17,24,31]
17046 ; AVX512DQ-BW-NEXT: # zmm8 = mem[0,1,2,3,0,1,2,3]
17047 ; AVX512DQ-BW-NEXT: vpermt2d %zmm6, %zmm8, %zmm13
17048 ; AVX512DQ-BW-NEXT: vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
17049 ; AVX512DQ-BW-NEXT: vpermt2d %zmm4, %zmm10, %zmm14
17050 ; AVX512DQ-BW-NEXT: vpermt2d %zmm20, %zmm8, %zmm14
17051 ; AVX512DQ-BW-NEXT: vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
17052 ; AVX512DQ-BW-NEXT: vpermt2d %zmm17, %zmm10, %zmm11
17053 ; AVX512DQ-BW-NEXT: vpermt2d %zmm2, %zmm8, %zmm11
17054 ; AVX512DQ-BW-NEXT: vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
17055 ; AVX512DQ-BW-NEXT: vpermt2d %zmm5, %zmm10, %zmm12
17056 ; AVX512DQ-BW-NEXT: vpermt2d %zmm7, %zmm8, %zmm12
17057 ; AVX512DQ-BW-NEXT: vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
17058 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm3, %zmm2
17059 ; AVX512DQ-BW-NEXT: vpermt2d %zmm15, %zmm10, %zmm2
17060 ; AVX512DQ-BW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
17061 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm9, %zmm2
17062 ; AVX512DQ-BW-NEXT: vpermt2d %zmm16, %zmm10, %zmm2
17063 ; AVX512DQ-BW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
17064 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm0, %zmm2
17065 ; AVX512DQ-BW-NEXT: vpermt2d %zmm18, %zmm10, %zmm2
17066 ; AVX512DQ-BW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
17067 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm1, %zmm2
17068 ; AVX512DQ-BW-NEXT: vpermt2d %zmm19, %zmm10, %zmm2
17069 ; AVX512DQ-BW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
17070 ; AVX512DQ-BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm25 = [8,15,0,0,0,19,26,1,8,15,0,0,0,19,26,1]
17071 ; AVX512DQ-BW-NEXT: # zmm25 = mem[0,1,2,3,0,1,2,3]
17072 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm16, %zmm2
17073 ; AVX512DQ-BW-NEXT: vpermt2d %zmm9, %zmm25, %zmm2
17074 ; AVX512DQ-BW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
17075 ; AVX512DQ-BW-NEXT: vbroadcasti32x4 {{.*#+}} zmm27 = [27,6,13,20,27,6,13,20,27,6,13,20,27,6,13,20]
17076 ; AVX512DQ-BW-NEXT: # zmm27 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
17077 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm9, %zmm2
17078 ; AVX512DQ-BW-NEXT: vpermt2d %zmm16, %zmm27, %zmm2
17079 ; AVX512DQ-BW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
17080 ; AVX512DQ-BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm28 = [28,0,0,0,0,7,14,21,28,0,0,0,0,7,14,21]
17081 ; AVX512DQ-BW-NEXT: # zmm28 = mem[0,1,2,3,0,1,2,3]
17082 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm9, %zmm2
17083 ; AVX512DQ-BW-NEXT: vpermt2d %zmm16, %zmm28, %zmm2
17084 ; AVX512DQ-BW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
17085 ; AVX512DQ-BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm31 = [29,0,0,0,1,8,15,22,29,0,0,0,1,8,15,22]
17086 ; AVX512DQ-BW-NEXT: # zmm31 = mem[0,1,2,3,0,1,2,3]
17087 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm9, %zmm2
17088 ; AVX512DQ-BW-NEXT: vpermt2d %zmm16, %zmm31, %zmm2
17089 ; AVX512DQ-BW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
17090 ; AVX512DQ-BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm2 = [14,0,0,0,18,25,0,7,14,0,0,0,18,25,0,7]
17091 ; AVX512DQ-BW-NEXT: # zmm2 = mem[0,1,2,3,0,1,2,3]
17092 ; AVX512DQ-BW-NEXT: vpermt2d %zmm9, %zmm2, %zmm16
17093 ; AVX512DQ-BW-NEXT: vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
17094 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm18, %zmm4
17095 ; AVX512DQ-BW-NEXT: vpermt2d %zmm0, %zmm25, %zmm4
17096 ; AVX512DQ-BW-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
17097 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm0, %zmm4
17098 ; AVX512DQ-BW-NEXT: vpermt2d %zmm18, %zmm27, %zmm4
17099 ; AVX512DQ-BW-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
17100 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm0, %zmm4
17101 ; AVX512DQ-BW-NEXT: vpermt2d %zmm18, %zmm28, %zmm4
17102 ; AVX512DQ-BW-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
17103 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm0, %zmm4
17104 ; AVX512DQ-BW-NEXT: vpermt2d %zmm18, %zmm31, %zmm4
17105 ; AVX512DQ-BW-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
17106 ; AVX512DQ-BW-NEXT: vpermt2d %zmm0, %zmm2, %zmm18
17107 ; AVX512DQ-BW-NEXT: vmovdqu64 %zmm18, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
17108 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm15, %zmm0
17109 ; AVX512DQ-BW-NEXT: vpermt2d %zmm3, %zmm25, %zmm0
17110 ; AVX512DQ-BW-NEXT: vmovdqu64 %zmm0, (%rsp) # 64-byte Spill
17111 ; AVX512DQ-BW-NEXT: vpermi2d %zmm1, %zmm19, %zmm25
17112 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm3, %zmm0
17113 ; AVX512DQ-BW-NEXT: vpermt2d %zmm15, %zmm27, %zmm0
17114 ; AVX512DQ-BW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
17115 ; AVX512DQ-BW-NEXT: vpermi2d %zmm19, %zmm1, %zmm27
17116 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm3, %zmm0
17117 ; AVX512DQ-BW-NEXT: vpermt2d %zmm15, %zmm28, %zmm0
17118 ; AVX512DQ-BW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
17119 ; AVX512DQ-BW-NEXT: vpermi2d %zmm19, %zmm1, %zmm28
17120 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm3, %zmm0
17121 ; AVX512DQ-BW-NEXT: vpermt2d %zmm15, %zmm31, %zmm0
17122 ; AVX512DQ-BW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
17123 ; AVX512DQ-BW-NEXT: vpermi2d %zmm19, %zmm1, %zmm31
17124 ; AVX512DQ-BW-NEXT: vpermt2d %zmm1, %zmm2, %zmm19
17125 ; AVX512DQ-BW-NEXT: vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
17126 ; AVX512DQ-BW-NEXT: vpermt2d %zmm3, %zmm2, %zmm15
17127 ; AVX512DQ-BW-NEXT: vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
17128 ; AVX512DQ-BW-NEXT: vmovdqa64 512(%rdi), %zmm0
17129 ; AVX512DQ-BW-NEXT: vmovdqa64 448(%rdi), %zmm17
17130 ; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} ymm2 = [0,7,14,21,28,0,0,0]
17131 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm17, %zmm22
17132 ; AVX512DQ-BW-NEXT: vpermt2d %zmm0, %zmm2, %zmm22
17133 ; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} ymm3 = [1,8,15,22,29,0,0,0]
17134 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm17, %zmm23
17135 ; AVX512DQ-BW-NEXT: vpermt2d %zmm0, %zmm3, %zmm23
17136 ; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} ymm4 = [18,25,0,7,14,0,0,0]
17137 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm0, %zmm24
17138 ; AVX512DQ-BW-NEXT: vpermt2d %zmm17, %zmm4, %zmm24
17139 ; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} ymm7 = [19,26,1,8,15,0,0,0]
17140 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm0, %zmm29
17141 ; AVX512DQ-BW-NEXT: vpermt2d %zmm17, %zmm7, %zmm29
17142 ; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} xmm18 = [4,11,18,25]
17143 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm17, %zmm1
17144 ; AVX512DQ-BW-NEXT: vpermt2d %zmm0, %zmm18, %zmm1
17145 ; AVX512DQ-BW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
17146 ; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} xmm19 = [5,12,19,26]
17147 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm17, %zmm1
17148 ; AVX512DQ-BW-NEXT: vpermt2d %zmm0, %zmm19, %zmm1
17149 ; AVX512DQ-BW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
17150 ; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} xmm20 = [6,13,20,27]
17151 ; AVX512DQ-BW-NEXT: vpermt2d %zmm0, %zmm20, %zmm17
17152 ; AVX512DQ-BW-NEXT: vmovdqa64 (%rdi), %zmm5
17153 ; AVX512DQ-BW-NEXT: vmovdqa64 64(%rdi), %zmm0
17154 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm5, %zmm13
17155 ; AVX512DQ-BW-NEXT: vpermt2d %zmm0, %zmm2, %zmm13
17156 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm5, %zmm14
17157 ; AVX512DQ-BW-NEXT: vpermt2d %zmm0, %zmm3, %zmm14
17158 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm0, %zmm15
17159 ; AVX512DQ-BW-NEXT: vpermt2d %zmm5, %zmm4, %zmm15
17160 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm0, %zmm16
17161 ; AVX512DQ-BW-NEXT: vpermt2d %zmm5, %zmm7, %zmm16
17162 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm5, %zmm1
17163 ; AVX512DQ-BW-NEXT: vpermt2d %zmm0, %zmm18, %zmm1
17164 ; AVX512DQ-BW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
17165 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm5, %zmm1
17166 ; AVX512DQ-BW-NEXT: vpermt2d %zmm0, %zmm19, %zmm1
17167 ; AVX512DQ-BW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
17168 ; AVX512DQ-BW-NEXT: vpermt2d %zmm0, %zmm20, %zmm5
17169 ; AVX512DQ-BW-NEXT: vmovdqa64 960(%rdi), %zmm9
17170 ; AVX512DQ-BW-NEXT: vmovdqa64 896(%rdi), %zmm6
17171 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm6, %zmm8
17172 ; AVX512DQ-BW-NEXT: vpermt2d %zmm9, %zmm2, %zmm8
17173 ; AVX512DQ-BW-NEXT: vmovdqa64 1408(%rdi), %zmm0
17174 ; AVX512DQ-BW-NEXT: vmovdqa64 1344(%rdi), %zmm1
17175 ; AVX512DQ-BW-NEXT: vpermi2d %zmm0, %zmm1, %zmm2
17176 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm6, %zmm10
17177 ; AVX512DQ-BW-NEXT: vpermt2d %zmm9, %zmm3, %zmm10
17178 ; AVX512DQ-BW-NEXT: vpermi2d %zmm0, %zmm1, %zmm3
17179 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm9, %zmm11
17180 ; AVX512DQ-BW-NEXT: vpermt2d %zmm6, %zmm4, %zmm11
17181 ; AVX512DQ-BW-NEXT: vpermi2d %zmm1, %zmm0, %zmm4
17182 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm9, %zmm12
17183 ; AVX512DQ-BW-NEXT: vpermt2d %zmm6, %zmm7, %zmm12
17184 ; AVX512DQ-BW-NEXT: vpermi2d %zmm1, %zmm0, %zmm7
17185 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm6, %zmm21
17186 ; AVX512DQ-BW-NEXT: vpermt2d %zmm9, %zmm18, %zmm21
17187 ; AVX512DQ-BW-NEXT: vpermi2d %zmm0, %zmm1, %zmm18
17188 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm6, %zmm26
17189 ; AVX512DQ-BW-NEXT: vpermt2d %zmm9, %zmm19, %zmm26
17190 ; AVX512DQ-BW-NEXT: vpermi2d %zmm0, %zmm1, %zmm19
17191 ; AVX512DQ-BW-NEXT: vpermt2d %zmm0, %zmm20, %zmm1
17192 ; AVX512DQ-BW-NEXT: vpermt2d %zmm9, %zmm20, %zmm6
17193 ; AVX512DQ-BW-NEXT: movw $992, %ax # imm = 0x3E0
17194 ; AVX512DQ-BW-NEXT: kmovd %eax, %k1
17195 ; AVX512DQ-BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
17196 ; AVX512DQ-BW-NEXT: vmovdqa32 %zmm0, %zmm13 {%k1}
17197 ; AVX512DQ-BW-NEXT: movb $-32, %al
17198 ; AVX512DQ-BW-NEXT: kmovd %eax, %k2
17199 ; AVX512DQ-BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
17200 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm0, %zmm13 {%k2}
17201 ; AVX512DQ-BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
17202 ; AVX512DQ-BW-NEXT: vmovdqa32 %zmm0, %zmm22 {%k1}
17203 ; AVX512DQ-BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
17204 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm0, %zmm22 {%k2}
17205 ; AVX512DQ-BW-NEXT: vmovdqu64 (%rsp), %zmm0 # 64-byte Reload
17206 ; AVX512DQ-BW-NEXT: vmovdqa32 %zmm0, %zmm8 {%k1}
17207 ; AVX512DQ-BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
17208 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm0, %zmm8 {%k2}
17209 ; AVX512DQ-BW-NEXT: vmovdqa32 %zmm25, %zmm2 {%k1}
17210 ; AVX512DQ-BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
17211 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm0, %zmm2 {%k2}
17212 ; AVX512DQ-BW-NEXT: movw $480, %ax # imm = 0x1E0
17213 ; AVX512DQ-BW-NEXT: kmovd %eax, %k2
17214 ; AVX512DQ-BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
17215 ; AVX512DQ-BW-NEXT: vmovdqa32 %zmm0, %zmm10 {%k2}
17216 ; AVX512DQ-BW-NEXT: movw $-512, %ax # imm = 0xFE00
17217 ; AVX512DQ-BW-NEXT: kmovd %eax, %k1
17218 ; AVX512DQ-BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
17219 ; AVX512DQ-BW-NEXT: vmovdqa32 %zmm0, %zmm10 {%k1}
17220 ; AVX512DQ-BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
17221 ; AVX512DQ-BW-NEXT: vmovdqa32 %zmm0, %zmm23 {%k2}
17222 ; AVX512DQ-BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
17223 ; AVX512DQ-BW-NEXT: vmovdqa32 %zmm0, %zmm23 {%k1}
17224 ; AVX512DQ-BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
17225 ; AVX512DQ-BW-NEXT: vmovdqa32 %zmm0, %zmm14 {%k2}
17226 ; AVX512DQ-BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
17227 ; AVX512DQ-BW-NEXT: vmovdqa32 %zmm0, %zmm14 {%k1}
17228 ; AVX512DQ-BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
17229 ; AVX512DQ-BW-NEXT: vmovdqa32 %zmm0, %zmm3 {%k2}
17230 ; AVX512DQ-BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
17231 ; AVX512DQ-BW-NEXT: vmovdqa32 %zmm0, %zmm3 {%k1}
17232 ; AVX512DQ-BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
17233 ; AVX512DQ-BW-NEXT: vmovdqa32 %zmm0, %zmm11 {%k2}
17234 ; AVX512DQ-BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
17235 ; AVX512DQ-BW-NEXT: vmovdqa32 %zmm0, %zmm11 {%k1}
17236 ; AVX512DQ-BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
17237 ; AVX512DQ-BW-NEXT: vmovdqa32 %zmm0, %zmm24 {%k2}
17238 ; AVX512DQ-BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
17239 ; AVX512DQ-BW-NEXT: vmovdqa32 %zmm0, %zmm24 {%k1}
17240 ; AVX512DQ-BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
17241 ; AVX512DQ-BW-NEXT: vmovdqa32 %zmm0, %zmm15 {%k2}
17242 ; AVX512DQ-BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
17243 ; AVX512DQ-BW-NEXT: vmovdqa32 %zmm0, %zmm15 {%k1}
17244 ; AVX512DQ-BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
17245 ; AVX512DQ-BW-NEXT: vmovdqa32 %zmm0, %zmm4 {%k2}
17246 ; AVX512DQ-BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
17247 ; AVX512DQ-BW-NEXT: vmovdqa32 %zmm0, %zmm4 {%k1}
17248 ; AVX512DQ-BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
17249 ; AVX512DQ-BW-NEXT: vmovdqa32 %zmm0, %zmm12 {%k2}
17250 ; AVX512DQ-BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
17251 ; AVX512DQ-BW-NEXT: vmovdqa32 %zmm0, %zmm12 {%k1}
17252 ; AVX512DQ-BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
17253 ; AVX512DQ-BW-NEXT: vmovdqa32 %zmm0, %zmm29 {%k2}
17254 ; AVX512DQ-BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
17255 ; AVX512DQ-BW-NEXT: vmovdqa32 %zmm0, %zmm29 {%k1}
17256 ; AVX512DQ-BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
17257 ; AVX512DQ-BW-NEXT: vmovdqa32 %zmm0, %zmm16 {%k2}
17258 ; AVX512DQ-BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
17259 ; AVX512DQ-BW-NEXT: vmovdqa32 %zmm0, %zmm16 {%k1}
17260 ; AVX512DQ-BW-NEXT: vmovdqa32 %zmm27, %zmm7 {%k2}
17261 ; AVX512DQ-BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
17262 ; AVX512DQ-BW-NEXT: vmovdqa32 %zmm0, %zmm7 {%k1}
17263 ; AVX512DQ-BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
17264 ; AVX512DQ-BW-NEXT: vinserti32x4 $0, %xmm21, %zmm0, %zmm0
17265 ; AVX512DQ-BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
17266 ; AVX512DQ-BW-NEXT: vmovdqa32 %zmm9, %zmm0 {%k1}
17267 ; AVX512DQ-BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
17268 ; AVX512DQ-BW-NEXT: vinserti32x4 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm9, %zmm9 # 16-byte Folded Reload
17269 ; AVX512DQ-BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
17270 ; AVX512DQ-BW-NEXT: vmovdqa32 %zmm20, %zmm9 {%k1}
17271 ; AVX512DQ-BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
17272 ; AVX512DQ-BW-NEXT: vinserti32x4 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm20, %zmm20 # 16-byte Folded Reload
17273 ; AVX512DQ-BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload
17274 ; AVX512DQ-BW-NEXT: vmovdqa32 %zmm25, %zmm20 {%k1}
17275 ; AVX512DQ-BW-NEXT: vinserti32x4 $0, %xmm18, %zmm28, %zmm18
17276 ; AVX512DQ-BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload
17277 ; AVX512DQ-BW-NEXT: vmovdqa32 %zmm25, %zmm18 {%k1}
17278 ; AVX512DQ-BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload
17279 ; AVX512DQ-BW-NEXT: vinserti32x4 $0, %xmm26, %zmm25, %zmm25
17280 ; AVX512DQ-BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload
17281 ; AVX512DQ-BW-NEXT: vmovdqa32 %zmm26, %zmm25 {%k1}
17282 ; AVX512DQ-BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload
17283 ; AVX512DQ-BW-NEXT: vinserti32x4 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm26, %zmm26 # 16-byte Folded Reload
17284 ; AVX512DQ-BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload
17285 ; AVX512DQ-BW-NEXT: vmovdqa32 %zmm27, %zmm26 {%k1}
17286 ; AVX512DQ-BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload
17287 ; AVX512DQ-BW-NEXT: vinserti32x4 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm27, %zmm27 # 16-byte Folded Reload
17288 ; AVX512DQ-BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
17289 ; AVX512DQ-BW-NEXT: vmovdqa32 %zmm28, %zmm27 {%k1}
17290 ; AVX512DQ-BW-NEXT: vinserti32x4 $0, %xmm19, %zmm31, %zmm19
17291 ; AVX512DQ-BW-NEXT: vmovdqa32 %zmm30, %zmm19 {%k1}
17292 ; AVX512DQ-BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
17293 ; AVX512DQ-BW-NEXT: vinserti32x4 $0, %xmm17, %zmm28, %zmm17
17294 ; AVX512DQ-BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
17295 ; AVX512DQ-BW-NEXT: vmovdqa32 %zmm28, %zmm17 {%k1}
17296 ; AVX512DQ-BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
17297 ; AVX512DQ-BW-NEXT: vinserti32x4 $0, %xmm5, %zmm28, %zmm5
17298 ; AVX512DQ-BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
17299 ; AVX512DQ-BW-NEXT: vmovdqa32 %zmm28, %zmm5 {%k1}
17300 ; AVX512DQ-BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
17301 ; AVX512DQ-BW-NEXT: vinserti32x4 $0, %xmm1, %zmm28, %zmm1
17302 ; AVX512DQ-BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
17303 ; AVX512DQ-BW-NEXT: vmovdqa32 %zmm28, %zmm1 {%k1}
17304 ; AVX512DQ-BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
17305 ; AVX512DQ-BW-NEXT: vinserti32x4 $0, %xmm6, %zmm28, %zmm6
17306 ; AVX512DQ-BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
17307 ; AVX512DQ-BW-NEXT: vmovdqa32 %zmm28, %zmm6 {%k1}
17308 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm2, 192(%rsi)
17309 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm8, 128(%rsi)
17310 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm22, 64(%rsi)
17311 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm13, (%rsi)
17312 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm3, 192(%rdx)
17313 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm14, (%rdx)
17314 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm23, 64(%rdx)
17315 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm10, 128(%rdx)
17316 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm4, 192(%rcx)
17317 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm15, (%rcx)
17318 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm24, 64(%rcx)
17319 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm11, 128(%rcx)
17320 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm7, 192(%r8)
17321 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm16, (%r8)
17322 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm29, 64(%r8)
17323 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm12, 128(%r8)
17324 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm18, 192(%r9)
17325 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm20, (%r9)
17326 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm9, 64(%r9)
17327 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm0, 128(%r9)
17328 ; AVX512DQ-BW-NEXT: movq {{[0-9]+}}(%rsp), %rax
17329 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm19, 192(%rax)
17330 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm27, (%rax)
17331 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm26, 64(%rax)
17332 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm25, 128(%rax)
17333 ; AVX512DQ-BW-NEXT: movq {{[0-9]+}}(%rsp), %rax
17334 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm6, 128(%rax)
17335 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm1, 192(%rax)
17336 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm5, (%rax)
17337 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm17, 64(%rax)
17338 ; AVX512DQ-BW-NEXT: addq $3400, %rsp # imm = 0xD48
17339 ; AVX512DQ-BW-NEXT: vzeroupper
17340 ; AVX512DQ-BW-NEXT: retq
17342 ; AVX512DQ-BW-FCP-LABEL: load_i32_stride7_vf64:
17343 ; AVX512DQ-BW-FCP: # %bb.0:
17344 ; AVX512DQ-BW-FCP-NEXT: subq $3400, %rsp # imm = 0xD48
17345 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 1728(%rdi), %zmm2
17346 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 1664(%rdi), %zmm17
17347 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 1600(%rdi), %zmm11
17348 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 1280(%rdi), %zmm7
17349 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 1216(%rdi), %zmm5
17350 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 1152(%rdi), %zmm12
17351 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 832(%rdi), %zmm6
17352 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 768(%rdi), %zmm8
17353 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 704(%rdi), %zmm13
17354 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 384(%rdi), %zmm20
17355 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 320(%rdi), %zmm4
17356 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 256(%rdi), %zmm14
17357 ; AVX512DQ-BW-FCP-NEXT: vbroadcasti32x4 {{.*#+}} zmm1 = [20,27,6,13,20,27,6,13,20,27,6,13,20,27,6,13]
17358 ; AVX512DQ-BW-FCP-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
17359 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm14, %zmm3
17360 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm4, %zmm1, %zmm3
17361 ; AVX512DQ-BW-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm0 = [0,0,10,11,12,13,18,25,0,0,10,11,12,13,18,25]
17362 ; AVX512DQ-BW-FCP-NEXT: # zmm0 = mem[0,1,2,3,0,1,2,3]
17363 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm20, %zmm0, %zmm3
17364 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
17365 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm13, %zmm3
17366 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm8, %zmm1, %zmm3
17367 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm6, %zmm0, %zmm3
17368 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
17369 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm12, %zmm3
17370 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm5, %zmm1, %zmm3
17371 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm7, %zmm0, %zmm3
17372 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
17373 ; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm17, %zmm11, %zmm1
17374 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm2, %zmm0, %zmm1
17375 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
17376 ; AVX512DQ-BW-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm1 = [0,0,7,14,21,28,0,0,0,0,7,14,21,28,0,0]
17377 ; AVX512DQ-BW-FCP-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3]
17378 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm12, %zmm3
17379 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm5, %zmm1, %zmm3
17380 ; AVX512DQ-BW-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm0 = [0,9,10,11,12,13,19,26,0,9,10,11,12,13,19,26]
17381 ; AVX512DQ-BW-FCP-NEXT: # zmm0 = mem[0,1,2,3,0,1,2,3]
17382 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm7, %zmm0, %zmm3
17383 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
17384 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm13, %zmm3
17385 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm8, %zmm1, %zmm3
17386 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm6, %zmm0, %zmm3
17387 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
17388 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm14, %zmm3
17389 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm4, %zmm1, %zmm3
17390 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm20, %zmm0, %zmm3
17391 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
17392 ; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm17, %zmm11, %zmm1
17393 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm2, %zmm0, %zmm1
17394 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
17395 ; AVX512DQ-BW-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm1 = [0,1,8,15,22,29,0,0,0,1,8,15,22,29,0,0]
17396 ; AVX512DQ-BW-FCP-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3]
17397 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm12, %zmm3
17398 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm5, %zmm1, %zmm3
17399 ; AVX512DQ-BW-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm0 = [0,9,10,11,12,13,20,27,0,9,10,11,12,13,20,27]
17400 ; AVX512DQ-BW-FCP-NEXT: # zmm0 = mem[0,1,2,3,0,1,2,3]
17401 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm7, %zmm0, %zmm3
17402 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
17403 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm13, %zmm3
17404 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm8, %zmm1, %zmm3
17405 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm6, %zmm0, %zmm3
17406 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
17407 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm14, %zmm3
17408 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm4, %zmm1, %zmm3
17409 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm20, %zmm0, %zmm3
17410 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
17411 ; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm17, %zmm11, %zmm1
17412 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm2, %zmm0, %zmm1
17413 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
17414 ; AVX512DQ-BW-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm1 = [0,18,25,0,7,14,0,0,0,18,25,0,7,14,0,0]
17415 ; AVX512DQ-BW-FCP-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3]
17416 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm5, %zmm3
17417 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm12, %zmm1, %zmm3
17418 ; AVX512DQ-BW-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm0 = [0,9,10,11,12,13,21,28,0,9,10,11,12,13,21,28]
17419 ; AVX512DQ-BW-FCP-NEXT: # zmm0 = mem[0,1,2,3,0,1,2,3]
17420 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm7, %zmm0, %zmm3
17421 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
17422 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm8, %zmm3
17423 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm13, %zmm1, %zmm3
17424 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm6, %zmm0, %zmm3
17425 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
17426 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm4, %zmm3
17427 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm14, %zmm1, %zmm3
17428 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm20, %zmm0, %zmm3
17429 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
17430 ; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm11, %zmm17, %zmm1
17431 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm2, %zmm0, %zmm1
17432 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
17433 ; AVX512DQ-BW-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm1 = [0,19,26,1,8,15,0,0,0,19,26,1,8,15,0,0]
17434 ; AVX512DQ-BW-FCP-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3]
17435 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm5, %zmm3
17436 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm12, %zmm1, %zmm3
17437 ; AVX512DQ-BW-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm0 = [0,9,10,11,12,13,22,29,0,9,10,11,12,13,22,29]
17438 ; AVX512DQ-BW-FCP-NEXT: # zmm0 = mem[0,1,2,3,0,1,2,3]
17439 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm7, %zmm0, %zmm3
17440 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
17441 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm8, %zmm3
17442 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm13, %zmm1, %zmm3
17443 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm6, %zmm0, %zmm3
17444 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
17445 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm4, %zmm3
17446 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm14, %zmm1, %zmm3
17447 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm20, %zmm0, %zmm3
17448 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
17449 ; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm11, %zmm17, %zmm1
17450 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm2, %zmm0, %zmm1
17451 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
17452 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 1024(%rdi), %zmm3
17453 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 1088(%rdi), %zmm15
17454 ; AVX512DQ-BW-FCP-NEXT: vbroadcasti32x4 {{.*#+}} zmm30 = [25,4,11,18,25,4,11,18,25,4,11,18,25,4,11,18]
17455 ; AVX512DQ-BW-FCP-NEXT: # zmm30 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
17456 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm3, %zmm0
17457 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm15, %zmm30, %zmm0
17458 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
17459 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 576(%rdi), %zmm9
17460 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 640(%rdi), %zmm16
17461 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm9, %zmm0
17462 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm16, %zmm30, %zmm0
17463 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
17464 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 128(%rdi), %zmm0
17465 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 192(%rdi), %zmm18
17466 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm0, %zmm1
17467 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm18, %zmm30, %zmm1
17468 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
17469 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 1472(%rdi), %zmm1
17470 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 1536(%rdi), %zmm19
17471 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm1, %zmm10
17472 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm19, %zmm30, %zmm10
17473 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
17474 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm12, %zmm21
17475 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm5, %zmm30, %zmm21
17476 ; AVX512DQ-BW-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm10 = [0,9,10,11,12,16,23,30,0,9,10,11,12,16,23,30]
17477 ; AVX512DQ-BW-FCP-NEXT: # zmm10 = mem[0,1,2,3,0,1,2,3]
17478 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm7, %zmm10, %zmm21
17479 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
17480 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm13, %zmm21
17481 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm8, %zmm30, %zmm21
17482 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm6, %zmm10, %zmm21
17483 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
17484 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm14, %zmm21
17485 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm4, %zmm30, %zmm21
17486 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm20, %zmm10, %zmm21
17487 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
17488 ; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm17, %zmm11, %zmm30
17489 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm2, %zmm10, %zmm30
17490 ; AVX512DQ-BW-FCP-NEXT: vbroadcasti32x4 {{.*#+}} zmm10 = [26,5,12,19,26,5,12,19,26,5,12,19,26,5,12,19]
17491 ; AVX512DQ-BW-FCP-NEXT: # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
17492 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm8, %zmm10, %zmm13
17493 ; AVX512DQ-BW-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm8 = [0,9,10,11,12,17,24,31,0,9,10,11,12,17,24,31]
17494 ; AVX512DQ-BW-FCP-NEXT: # zmm8 = mem[0,1,2,3,0,1,2,3]
17495 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm6, %zmm8, %zmm13
17496 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
17497 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm4, %zmm10, %zmm14
17498 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm20, %zmm8, %zmm14
17499 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
17500 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm17, %zmm10, %zmm11
17501 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm2, %zmm8, %zmm11
17502 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
17503 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm5, %zmm10, %zmm12
17504 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm7, %zmm8, %zmm12
17505 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
17506 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm3, %zmm2
17507 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm15, %zmm10, %zmm2
17508 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
17509 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm9, %zmm2
17510 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm16, %zmm10, %zmm2
17511 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
17512 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm0, %zmm2
17513 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm18, %zmm10, %zmm2
17514 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
17515 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm1, %zmm2
17516 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm19, %zmm10, %zmm2
17517 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
17518 ; AVX512DQ-BW-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm25 = [8,15,0,0,0,19,26,1,8,15,0,0,0,19,26,1]
17519 ; AVX512DQ-BW-FCP-NEXT: # zmm25 = mem[0,1,2,3,0,1,2,3]
17520 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm16, %zmm2
17521 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm9, %zmm25, %zmm2
17522 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
17523 ; AVX512DQ-BW-FCP-NEXT: vbroadcasti32x4 {{.*#+}} zmm27 = [27,6,13,20,27,6,13,20,27,6,13,20,27,6,13,20]
17524 ; AVX512DQ-BW-FCP-NEXT: # zmm27 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
17525 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm9, %zmm2
17526 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm16, %zmm27, %zmm2
17527 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
17528 ; AVX512DQ-BW-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm28 = [28,0,0,0,0,7,14,21,28,0,0,0,0,7,14,21]
17529 ; AVX512DQ-BW-FCP-NEXT: # zmm28 = mem[0,1,2,3,0,1,2,3]
17530 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm9, %zmm2
17531 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm16, %zmm28, %zmm2
17532 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
17533 ; AVX512DQ-BW-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm31 = [29,0,0,0,1,8,15,22,29,0,0,0,1,8,15,22]
17534 ; AVX512DQ-BW-FCP-NEXT: # zmm31 = mem[0,1,2,3,0,1,2,3]
17535 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm9, %zmm2
17536 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm16, %zmm31, %zmm2
17537 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
17538 ; AVX512DQ-BW-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm2 = [14,0,0,0,18,25,0,7,14,0,0,0,18,25,0,7]
17539 ; AVX512DQ-BW-FCP-NEXT: # zmm2 = mem[0,1,2,3,0,1,2,3]
17540 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm9, %zmm2, %zmm16
17541 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
17542 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm18, %zmm4
17543 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm0, %zmm25, %zmm4
17544 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
17545 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm0, %zmm4
17546 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm18, %zmm27, %zmm4
17547 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
17548 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm0, %zmm4
17549 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm18, %zmm28, %zmm4
17550 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
17551 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm0, %zmm4
17552 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm18, %zmm31, %zmm4
17553 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
17554 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm0, %zmm2, %zmm18
17555 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 %zmm18, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
17556 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm15, %zmm0
17557 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm3, %zmm25, %zmm0
17558 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 %zmm0, (%rsp) # 64-byte Spill
17559 ; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm1, %zmm19, %zmm25
17560 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm3, %zmm0
17561 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm15, %zmm27, %zmm0
17562 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
17563 ; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm19, %zmm1, %zmm27
17564 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm3, %zmm0
17565 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm15, %zmm28, %zmm0
17566 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
17567 ; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm19, %zmm1, %zmm28
17568 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm3, %zmm0
17569 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm15, %zmm31, %zmm0
17570 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
17571 ; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm19, %zmm1, %zmm31
17572 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm1, %zmm2, %zmm19
17573 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
17574 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm3, %zmm2, %zmm15
17575 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
17576 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 512(%rdi), %zmm0
17577 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 448(%rdi), %zmm17
17578 ; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm2 = [0,7,14,21,28,0,0,0]
17579 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm17, %zmm22
17580 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm0, %zmm2, %zmm22
17581 ; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm3 = [1,8,15,22,29,0,0,0]
17582 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm17, %zmm23
17583 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm0, %zmm3, %zmm23
17584 ; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm4 = [18,25,0,7,14,0,0,0]
17585 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm0, %zmm24
17586 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm17, %zmm4, %zmm24
17587 ; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm7 = [19,26,1,8,15,0,0,0]
17588 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm0, %zmm29
17589 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm17, %zmm7, %zmm29
17590 ; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm18 = [4,11,18,25]
17591 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm17, %zmm1
17592 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm0, %zmm18, %zmm1
17593 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
17594 ; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm19 = [5,12,19,26]
17595 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm17, %zmm1
17596 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm0, %zmm19, %zmm1
17597 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
17598 ; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm20 = [6,13,20,27]
17599 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm0, %zmm20, %zmm17
17600 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rdi), %zmm5
17601 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 64(%rdi), %zmm0
17602 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm5, %zmm13
17603 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm0, %zmm2, %zmm13
17604 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm5, %zmm14
17605 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm0, %zmm3, %zmm14
17606 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm0, %zmm15
17607 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm5, %zmm4, %zmm15
17608 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm0, %zmm16
17609 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm5, %zmm7, %zmm16
17610 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm5, %zmm1
17611 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm0, %zmm18, %zmm1
17612 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
17613 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm5, %zmm1
17614 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm0, %zmm19, %zmm1
17615 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
17616 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm0, %zmm20, %zmm5
17617 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 960(%rdi), %zmm9
17618 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 896(%rdi), %zmm6
17619 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm6, %zmm8
17620 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm9, %zmm2, %zmm8
17621 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 1408(%rdi), %zmm0
17622 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 1344(%rdi), %zmm1
17623 ; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm0, %zmm1, %zmm2
17624 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm6, %zmm10
17625 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm9, %zmm3, %zmm10
17626 ; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm0, %zmm1, %zmm3
17627 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm9, %zmm11
17628 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm6, %zmm4, %zmm11
17629 ; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm4
17630 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm9, %zmm12
17631 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm6, %zmm7, %zmm12
17632 ; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm7
17633 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm6, %zmm21
17634 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm9, %zmm18, %zmm21
17635 ; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm0, %zmm1, %zmm18
17636 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm6, %zmm26
17637 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm9, %zmm19, %zmm26
17638 ; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm0, %zmm1, %zmm19
17639 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm0, %zmm20, %zmm1
17640 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm9, %zmm20, %zmm6
17641 ; AVX512DQ-BW-FCP-NEXT: movw $992, %ax # imm = 0x3E0
17642 ; AVX512DQ-BW-FCP-NEXT: kmovd %eax, %k1
17643 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
17644 ; AVX512DQ-BW-FCP-NEXT: vmovdqa32 %zmm0, %zmm13 {%k1}
17645 ; AVX512DQ-BW-FCP-NEXT: movb $-32, %al
17646 ; AVX512DQ-BW-FCP-NEXT: kmovd %eax, %k2
17647 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
17648 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm0, %zmm13 {%k2}
17649 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
17650 ; AVX512DQ-BW-FCP-NEXT: vmovdqa32 %zmm0, %zmm22 {%k1}
17651 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
17652 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm0, %zmm22 {%k2}
17653 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 (%rsp), %zmm0 # 64-byte Reload
17654 ; AVX512DQ-BW-FCP-NEXT: vmovdqa32 %zmm0, %zmm8 {%k1}
17655 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
17656 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm0, %zmm8 {%k2}
17657 ; AVX512DQ-BW-FCP-NEXT: vmovdqa32 %zmm25, %zmm2 {%k1}
17658 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
17659 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm0, %zmm2 {%k2}
17660 ; AVX512DQ-BW-FCP-NEXT: movw $480, %ax # imm = 0x1E0
17661 ; AVX512DQ-BW-FCP-NEXT: kmovd %eax, %k2
17662 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
17663 ; AVX512DQ-BW-FCP-NEXT: vmovdqa32 %zmm0, %zmm10 {%k2}
17664 ; AVX512DQ-BW-FCP-NEXT: movw $-512, %ax # imm = 0xFE00
17665 ; AVX512DQ-BW-FCP-NEXT: kmovd %eax, %k1
17666 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
17667 ; AVX512DQ-BW-FCP-NEXT: vmovdqa32 %zmm0, %zmm10 {%k1}
17668 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
17669 ; AVX512DQ-BW-FCP-NEXT: vmovdqa32 %zmm0, %zmm23 {%k2}
17670 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
17671 ; AVX512DQ-BW-FCP-NEXT: vmovdqa32 %zmm0, %zmm23 {%k1}
17672 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
17673 ; AVX512DQ-BW-FCP-NEXT: vmovdqa32 %zmm0, %zmm14 {%k2}
17674 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
17675 ; AVX512DQ-BW-FCP-NEXT: vmovdqa32 %zmm0, %zmm14 {%k1}
17676 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
17677 ; AVX512DQ-BW-FCP-NEXT: vmovdqa32 %zmm0, %zmm3 {%k2}
17678 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
17679 ; AVX512DQ-BW-FCP-NEXT: vmovdqa32 %zmm0, %zmm3 {%k1}
17680 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
17681 ; AVX512DQ-BW-FCP-NEXT: vmovdqa32 %zmm0, %zmm11 {%k2}
17682 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
17683 ; AVX512DQ-BW-FCP-NEXT: vmovdqa32 %zmm0, %zmm11 {%k1}
17684 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
17685 ; AVX512DQ-BW-FCP-NEXT: vmovdqa32 %zmm0, %zmm24 {%k2}
17686 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
17687 ; AVX512DQ-BW-FCP-NEXT: vmovdqa32 %zmm0, %zmm24 {%k1}
17688 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
17689 ; AVX512DQ-BW-FCP-NEXT: vmovdqa32 %zmm0, %zmm15 {%k2}
17690 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
17691 ; AVX512DQ-BW-FCP-NEXT: vmovdqa32 %zmm0, %zmm15 {%k1}
17692 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
17693 ; AVX512DQ-BW-FCP-NEXT: vmovdqa32 %zmm0, %zmm4 {%k2}
17694 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
17695 ; AVX512DQ-BW-FCP-NEXT: vmovdqa32 %zmm0, %zmm4 {%k1}
17696 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
17697 ; AVX512DQ-BW-FCP-NEXT: vmovdqa32 %zmm0, %zmm12 {%k2}
17698 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
17699 ; AVX512DQ-BW-FCP-NEXT: vmovdqa32 %zmm0, %zmm12 {%k1}
17700 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
17701 ; AVX512DQ-BW-FCP-NEXT: vmovdqa32 %zmm0, %zmm29 {%k2}
17702 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
17703 ; AVX512DQ-BW-FCP-NEXT: vmovdqa32 %zmm0, %zmm29 {%k1}
17704 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
17705 ; AVX512DQ-BW-FCP-NEXT: vmovdqa32 %zmm0, %zmm16 {%k2}
17706 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
17707 ; AVX512DQ-BW-FCP-NEXT: vmovdqa32 %zmm0, %zmm16 {%k1}
17708 ; AVX512DQ-BW-FCP-NEXT: vmovdqa32 %zmm27, %zmm7 {%k2}
17709 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
17710 ; AVX512DQ-BW-FCP-NEXT: vmovdqa32 %zmm0, %zmm7 {%k1}
17711 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
17712 ; AVX512DQ-BW-FCP-NEXT: vinserti32x4 $0, %xmm21, %zmm0, %zmm0
17713 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
17714 ; AVX512DQ-BW-FCP-NEXT: vmovdqa32 %zmm9, %zmm0 {%k1}
17715 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
17716 ; AVX512DQ-BW-FCP-NEXT: vinserti32x4 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm9, %zmm9 # 16-byte Folded Reload
17717 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
17718 ; AVX512DQ-BW-FCP-NEXT: vmovdqa32 %zmm20, %zmm9 {%k1}
17719 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
17720 ; AVX512DQ-BW-FCP-NEXT: vinserti32x4 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm20, %zmm20 # 16-byte Folded Reload
17721 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload
17722 ; AVX512DQ-BW-FCP-NEXT: vmovdqa32 %zmm25, %zmm20 {%k1}
17723 ; AVX512DQ-BW-FCP-NEXT: vinserti32x4 $0, %xmm18, %zmm28, %zmm18
17724 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload
17725 ; AVX512DQ-BW-FCP-NEXT: vmovdqa32 %zmm25, %zmm18 {%k1}
17726 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload
17727 ; AVX512DQ-BW-FCP-NEXT: vinserti32x4 $0, %xmm26, %zmm25, %zmm25
17728 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload
17729 ; AVX512DQ-BW-FCP-NEXT: vmovdqa32 %zmm26, %zmm25 {%k1}
17730 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload
17731 ; AVX512DQ-BW-FCP-NEXT: vinserti32x4 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm26, %zmm26 # 16-byte Folded Reload
17732 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload
17733 ; AVX512DQ-BW-FCP-NEXT: vmovdqa32 %zmm27, %zmm26 {%k1}
17734 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload
17735 ; AVX512DQ-BW-FCP-NEXT: vinserti32x4 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm27, %zmm27 # 16-byte Folded Reload
17736 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
17737 ; AVX512DQ-BW-FCP-NEXT: vmovdqa32 %zmm28, %zmm27 {%k1}
17738 ; AVX512DQ-BW-FCP-NEXT: vinserti32x4 $0, %xmm19, %zmm31, %zmm19
17739 ; AVX512DQ-BW-FCP-NEXT: vmovdqa32 %zmm30, %zmm19 {%k1}
17740 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
17741 ; AVX512DQ-BW-FCP-NEXT: vinserti32x4 $0, %xmm17, %zmm28, %zmm17
17742 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
17743 ; AVX512DQ-BW-FCP-NEXT: vmovdqa32 %zmm28, %zmm17 {%k1}
17744 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
17745 ; AVX512DQ-BW-FCP-NEXT: vinserti32x4 $0, %xmm5, %zmm28, %zmm5
17746 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
17747 ; AVX512DQ-BW-FCP-NEXT: vmovdqa32 %zmm28, %zmm5 {%k1}
17748 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
17749 ; AVX512DQ-BW-FCP-NEXT: vinserti32x4 $0, %xmm1, %zmm28, %zmm1
17750 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
17751 ; AVX512DQ-BW-FCP-NEXT: vmovdqa32 %zmm28, %zmm1 {%k1}
17752 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
17753 ; AVX512DQ-BW-FCP-NEXT: vinserti32x4 $0, %xmm6, %zmm28, %zmm6
17754 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
17755 ; AVX512DQ-BW-FCP-NEXT: vmovdqa32 %zmm28, %zmm6 {%k1}
17756 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm2, 192(%rsi)
17757 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm8, 128(%rsi)
17758 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm22, 64(%rsi)
17759 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm13, (%rsi)
17760 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm3, 192(%rdx)
17761 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm14, (%rdx)
17762 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm23, 64(%rdx)
17763 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm10, 128(%rdx)
17764 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm4, 192(%rcx)
17765 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm15, (%rcx)
17766 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm24, 64(%rcx)
17767 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm11, 128(%rcx)
17768 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm7, 192(%r8)
17769 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm16, (%r8)
17770 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm29, 64(%r8)
17771 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm12, 128(%r8)
17772 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm18, 192(%r9)
17773 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm20, (%r9)
17774 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm9, 64(%r9)
17775 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm0, 128(%r9)
17776 ; AVX512DQ-BW-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
17777 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm19, 192(%rax)
17778 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm27, (%rax)
17779 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm26, 64(%rax)
17780 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm25, 128(%rax)
17781 ; AVX512DQ-BW-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
17782 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm6, 128(%rax)
17783 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm1, 192(%rax)
17784 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm5, (%rax)
17785 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm17, 64(%rax)
17786 ; AVX512DQ-BW-FCP-NEXT: addq $3400, %rsp # imm = 0xD48
17787 ; AVX512DQ-BW-FCP-NEXT: vzeroupper
17788 ; AVX512DQ-BW-FCP-NEXT: retq
17789 %wide.vec = load <448 x i32>, ptr %in.vec, align 64
17790 %strided.vec0 = shufflevector <448 x i32> %wide.vec, <448 x i32> poison, <64 x i32> <i32 0, i32 7, i32 14, i32 21, i32 28, i32 35, i32 42, i32 49, i32 56, i32 63, i32 70, i32 77, i32 84, i32 91, i32 98, i32 105, i32 112, i32 119, i32 126, i32 133, i32 140, i32 147, i32 154, i32 161, i32 168, i32 175, i32 182, i32 189, i32 196, i32 203, i32 210, i32 217, i32 224, i32 231, i32 238, i32 245, i32 252, i32 259, i32 266, i32 273, i32 280, i32 287, i32 294, i32 301, i32 308, i32 315, i32 322, i32 329, i32 336, i32 343, i32 350, i32 357, i32 364, i32 371, i32 378, i32 385, i32 392, i32 399, i32 406, i32 413, i32 420, i32 427, i32 434, i32 441>
17791 %strided.vec1 = shufflevector <448 x i32> %wide.vec, <448 x i32> poison, <64 x i32> <i32 1, i32 8, i32 15, i32 22, i32 29, i32 36, i32 43, i32 50, i32 57, i32 64, i32 71, i32 78, i32 85, i32 92, i32 99, i32 106, i32 113, i32 120, i32 127, i32 134, i32 141, i32 148, i32 155, i32 162, i32 169, i32 176, i32 183, i32 190, i32 197, i32 204, i32 211, i32 218, i32 225, i32 232, i32 239, i32 246, i32 253, i32 260, i32 267, i32 274, i32 281, i32 288, i32 295, i32 302, i32 309, i32 316, i32 323, i32 330, i32 337, i32 344, i32 351, i32 358, i32 365, i32 372, i32 379, i32 386, i32 393, i32 400, i32 407, i32 414, i32 421, i32 428, i32 435, i32 442>
17792 %strided.vec2 = shufflevector <448 x i32> %wide.vec, <448 x i32> poison, <64 x i32> <i32 2, i32 9, i32 16, i32 23, i32 30, i32 37, i32 44, i32 51, i32 58, i32 65, i32 72, i32 79, i32 86, i32 93, i32 100, i32 107, i32 114, i32 121, i32 128, i32 135, i32 142, i32 149, i32 156, i32 163, i32 170, i32 177, i32 184, i32 191, i32 198, i32 205, i32 212, i32 219, i32 226, i32 233, i32 240, i32 247, i32 254, i32 261, i32 268, i32 275, i32 282, i32 289, i32 296, i32 303, i32 310, i32 317, i32 324, i32 331, i32 338, i32 345, i32 352, i32 359, i32 366, i32 373, i32 380, i32 387, i32 394, i32 401, i32 408, i32 415, i32 422, i32 429, i32 436, i32 443>
17793 %strided.vec3 = shufflevector <448 x i32> %wide.vec, <448 x i32> poison, <64 x i32> <i32 3, i32 10, i32 17, i32 24, i32 31, i32 38, i32 45, i32 52, i32 59, i32 66, i32 73, i32 80, i32 87, i32 94, i32 101, i32 108, i32 115, i32 122, i32 129, i32 136, i32 143, i32 150, i32 157, i32 164, i32 171, i32 178, i32 185, i32 192, i32 199, i32 206, i32 213, i32 220, i32 227, i32 234, i32 241, i32 248, i32 255, i32 262, i32 269, i32 276, i32 283, i32 290, i32 297, i32 304, i32 311, i32 318, i32 325, i32 332, i32 339, i32 346, i32 353, i32 360, i32 367, i32 374, i32 381, i32 388, i32 395, i32 402, i32 409, i32 416, i32 423, i32 430, i32 437, i32 444>
17794 %strided.vec4 = shufflevector <448 x i32> %wide.vec, <448 x i32> poison, <64 x i32> <i32 4, i32 11, i32 18, i32 25, i32 32, i32 39, i32 46, i32 53, i32 60, i32 67, i32 74, i32 81, i32 88, i32 95, i32 102, i32 109, i32 116, i32 123, i32 130, i32 137, i32 144, i32 151, i32 158, i32 165, i32 172, i32 179, i32 186, i32 193, i32 200, i32 207, i32 214, i32 221, i32 228, i32 235, i32 242, i32 249, i32 256, i32 263, i32 270, i32 277, i32 284, i32 291, i32 298, i32 305, i32 312, i32 319, i32 326, i32 333, i32 340, i32 347, i32 354, i32 361, i32 368, i32 375, i32 382, i32 389, i32 396, i32 403, i32 410, i32 417, i32 424, i32 431, i32 438, i32 445>
17795 %strided.vec5 = shufflevector <448 x i32> %wide.vec, <448 x i32> poison, <64 x i32> <i32 5, i32 12, i32 19, i32 26, i32 33, i32 40, i32 47, i32 54, i32 61, i32 68, i32 75, i32 82, i32 89, i32 96, i32 103, i32 110, i32 117, i32 124, i32 131, i32 138, i32 145, i32 152, i32 159, i32 166, i32 173, i32 180, i32 187, i32 194, i32 201, i32 208, i32 215, i32 222, i32 229, i32 236, i32 243, i32 250, i32 257, i32 264, i32 271, i32 278, i32 285, i32 292, i32 299, i32 306, i32 313, i32 320, i32 327, i32 334, i32 341, i32 348, i32 355, i32 362, i32 369, i32 376, i32 383, i32 390, i32 397, i32 404, i32 411, i32 418, i32 425, i32 432, i32 439, i32 446>
17796 %strided.vec6 = shufflevector <448 x i32> %wide.vec, <448 x i32> poison, <64 x i32> <i32 6, i32 13, i32 20, i32 27, i32 34, i32 41, i32 48, i32 55, i32 62, i32 69, i32 76, i32 83, i32 90, i32 97, i32 104, i32 111, i32 118, i32 125, i32 132, i32 139, i32 146, i32 153, i32 160, i32 167, i32 174, i32 181, i32 188, i32 195, i32 202, i32 209, i32 216, i32 223, i32 230, i32 237, i32 244, i32 251, i32 258, i32 265, i32 272, i32 279, i32 286, i32 293, i32 300, i32 307, i32 314, i32 321, i32 328, i32 335, i32 342, i32 349, i32 356, i32 363, i32 370, i32 377, i32 384, i32 391, i32 398, i32 405, i32 412, i32 419, i32 426, i32 433, i32 440, i32 447>
17797 store <64 x i32> %strided.vec0, ptr %out.vec0, align 64
17798 store <64 x i32> %strided.vec1, ptr %out.vec1, align 64
17799 store <64 x i32> %strided.vec2, ptr %out.vec2, align 64
17800 store <64 x i32> %strided.vec3, ptr %out.vec3, align 64
17801 store <64 x i32> %strided.vec4, ptr %out.vec4, align 64
17802 store <64 x i32> %strided.vec5, ptr %out.vec5, align 64
17803 store <64 x i32> %strided.vec6, ptr %out.vec6, align 64