1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+sse2 | FileCheck %s --check-prefixes=SSE,FALLBACK0
3 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1,AVX1-ONLY,FALLBACK1
4 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-SLOW,FALLBACK2
5 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST,FALLBACK3
6 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST-PERLANE,FALLBACK4
7 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
8 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
9 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
10 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
11 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
12 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
13 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
14 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
16 ; These patterns are produced by LoopVectorizer for interleaved loads.
18 define void @load_i32_stride6_vf2(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2, ptr %out.vec3, ptr %out.vec4, ptr %out.vec5) nounwind {
19 ; SSE-LABEL: load_i32_stride6_vf2:
21 ; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax
22 ; SSE-NEXT: movdqa (%rdi), %xmm1
23 ; SSE-NEXT: movdqa 16(%rdi), %xmm0
24 ; SSE-NEXT: movdqa 32(%rdi), %xmm2
25 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,2,3]
26 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm1[1,1,1,1]
27 ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm1[2,3,2,3]
28 ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm1[3,3,3,3]
29 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
30 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm0[3,3,3,3]
31 ; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
32 ; SSE-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm2[0],xmm5[1],xmm2[1]
33 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,1,1,1]
34 ; SSE-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm3[0],xmm6[1],xmm3[1]
35 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm2[2,3,2,3]
36 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[3,3,3,3]
37 ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm0[1,1,1,1]
38 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
39 ; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm2[0],xmm7[1],xmm2[1]
40 ; SSE-NEXT: movq %xmm1, (%rsi)
41 ; SSE-NEXT: movq %xmm4, (%rdx)
42 ; SSE-NEXT: movq %xmm5, (%rcx)
43 ; SSE-NEXT: movq %xmm6, (%r8)
44 ; SSE-NEXT: movq %xmm0, (%r9)
45 ; SSE-NEXT: movq %xmm7, (%rax)
48 ; AVX1-ONLY-LABEL: load_i32_stride6_vf2:
50 ; AVX1-ONLY-NEXT: movq {{[0-9]+}}(%rsp), %rax
51 ; AVX1-ONLY-NEXT: vmovaps (%rdi), %xmm0
52 ; AVX1-ONLY-NEXT: vmovaps 16(%rdi), %xmm1
53 ; AVX1-ONLY-NEXT: vmovaps 32(%rdi), %xmm2
54 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm3 = xmm1[2,2,3,3]
55 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm3 = xmm0[0],xmm3[1],xmm0[2,3]
56 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm4 = xmm0[0,1],xmm1[2,3]
57 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm4 = xmm4[1,3,2,3]
58 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm5 = xmm2[0,1],xmm0[2,3]
59 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm5 = xmm5[2,0,2,3]
60 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
61 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2,3]
62 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm6 = xmm2[2,2,3,3]
63 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm6 = xmm1[0],xmm6[1],xmm1[2,3]
64 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3]
65 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,3,2,3]
66 ; AVX1-ONLY-NEXT: vmovlps %xmm3, (%rsi)
67 ; AVX1-ONLY-NEXT: vmovlps %xmm4, (%rdx)
68 ; AVX1-ONLY-NEXT: vmovlps %xmm5, (%rcx)
69 ; AVX1-ONLY-NEXT: vmovlps %xmm0, (%r8)
70 ; AVX1-ONLY-NEXT: vmovlps %xmm6, (%r9)
71 ; AVX1-ONLY-NEXT: vmovlps %xmm1, (%rax)
72 ; AVX1-ONLY-NEXT: retq
74 ; AVX2-ONLY-LABEL: load_i32_stride6_vf2:
76 ; AVX2-ONLY-NEXT: movq {{[0-9]+}}(%rsp), %rax
77 ; AVX2-ONLY-NEXT: vmovaps 32(%rdi), %ymm0
78 ; AVX2-ONLY-NEXT: vmovaps (%rdi), %xmm1
79 ; AVX2-ONLY-NEXT: vmovaps 16(%rdi), %xmm2
80 ; AVX2-ONLY-NEXT: vmovaps 32(%rdi), %xmm3
81 ; AVX2-ONLY-NEXT: vshufps {{.*#+}} xmm4 = xmm2[2,2,3,3]
82 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm4 = xmm1[0],xmm4[1],xmm1[2,3]
83 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm2 = xmm1[0,1],xmm2[2,3]
84 ; AVX2-ONLY-NEXT: vshufps {{.*#+}} xmm2 = xmm2[1,3,2,3]
85 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm5 = xmm3[0,1],xmm1[2,3]
86 ; AVX2-ONLY-NEXT: vshufps {{.*#+}} xmm5 = xmm5[2,0,2,3]
87 ; AVX2-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm1[3,3,3,3]
88 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0],xmm3[1],xmm1[2,3]
89 ; AVX2-ONLY-NEXT: vmovddup {{.*#+}} xmm3 = [4,2,4,2]
90 ; AVX2-ONLY-NEXT: # xmm3 = mem[0,0]
91 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],mem[4,5,6,7]
92 ; AVX2-ONLY-NEXT: vpermps %ymm0, %ymm3, %ymm3
93 ; AVX2-ONLY-NEXT: vmovddup {{.*#+}} xmm6 = [5,3,5,3]
94 ; AVX2-ONLY-NEXT: # xmm6 = mem[0,0]
95 ; AVX2-ONLY-NEXT: vpermps %ymm0, %ymm6, %ymm0
96 ; AVX2-ONLY-NEXT: vmovlps %xmm4, (%rsi)
97 ; AVX2-ONLY-NEXT: vmovlps %xmm2, (%rdx)
98 ; AVX2-ONLY-NEXT: vmovlps %xmm5, (%rcx)
99 ; AVX2-ONLY-NEXT: vmovlps %xmm1, (%r8)
100 ; AVX2-ONLY-NEXT: vmovlps %xmm3, (%r9)
101 ; AVX2-ONLY-NEXT: vmovlps %xmm0, (%rax)
102 ; AVX2-ONLY-NEXT: vzeroupper
103 ; AVX2-ONLY-NEXT: retq
105 ; AVX512-SLOW-LABEL: load_i32_stride6_vf2:
106 ; AVX512-SLOW: # %bb.0:
107 ; AVX512-SLOW-NEXT: movq {{[0-9]+}}(%rsp), %rax
108 ; AVX512-SLOW-NEXT: vmovdqa (%rdi), %xmm0
109 ; AVX512-SLOW-NEXT: vmovaps 16(%rdi), %xmm1
110 ; AVX512-SLOW-NEXT: vmovdqa 32(%rdi), %xmm2
111 ; AVX512-SLOW-NEXT: vextractps $2, %xmm1, %r10d
112 ; AVX512-SLOW-NEXT: vpinsrd $1, %r10d, %xmm0, %xmm3
113 ; AVX512-SLOW-NEXT: vextractps $3, %xmm1, %r10d
114 ; AVX512-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
115 ; AVX512-SLOW-NEXT: vpinsrd $1, %r10d, %xmm1, %xmm1
116 ; AVX512-SLOW-NEXT: vpbroadcastd 8(%rdi), %xmm4
117 ; AVX512-SLOW-NEXT: vmovd %xmm2, %r10d
118 ; AVX512-SLOW-NEXT: vpinsrd $1, %r10d, %xmm4, %xmm4
119 ; AVX512-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[3,3,3,3]
120 ; AVX512-SLOW-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2,3]
121 ; AVX512-SLOW-NEXT: vmovddup {{.*#+}} xmm2 = [4,2,4,2]
122 ; AVX512-SLOW-NEXT: # xmm2 = mem[0,0]
123 ; AVX512-SLOW-NEXT: vmovaps 32(%rdi), %ymm5
124 ; AVX512-SLOW-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3],mem[4,5,6,7]
125 ; AVX512-SLOW-NEXT: vpermps %ymm5, %ymm2, %ymm2
126 ; AVX512-SLOW-NEXT: vmovddup {{.*#+}} xmm6 = [5,3,5,3]
127 ; AVX512-SLOW-NEXT: # xmm6 = mem[0,0]
128 ; AVX512-SLOW-NEXT: vpermps %ymm5, %ymm6, %ymm5
129 ; AVX512-SLOW-NEXT: vmovq %xmm3, (%rsi)
130 ; AVX512-SLOW-NEXT: vmovq %xmm1, (%rdx)
131 ; AVX512-SLOW-NEXT: vmovq %xmm4, (%rcx)
132 ; AVX512-SLOW-NEXT: vmovq %xmm0, (%r8)
133 ; AVX512-SLOW-NEXT: vmovlps %xmm2, (%r9)
134 ; AVX512-SLOW-NEXT: vmovlps %xmm5, (%rax)
135 ; AVX512-SLOW-NEXT: vzeroupper
136 ; AVX512-SLOW-NEXT: retq
138 ; AVX512-FAST-LABEL: load_i32_stride6_vf2:
139 ; AVX512-FAST: # %bb.0:
140 ; AVX512-FAST-NEXT: movq {{[0-9]+}}(%rsp), %rax
141 ; AVX512-FAST-NEXT: vpbroadcastq {{.*#+}} xmm0 = [0,6,0,6]
142 ; AVX512-FAST-NEXT: vmovdqa (%rdi), %xmm1
143 ; AVX512-FAST-NEXT: vmovdqa 16(%rdi), %xmm2
144 ; AVX512-FAST-NEXT: vmovdqa 32(%rdi), %xmm3
145 ; AVX512-FAST-NEXT: vpermi2d %xmm2, %xmm1, %xmm0
146 ; AVX512-FAST-NEXT: vpbroadcastq {{.*#+}} xmm4 = [1,7,1,7]
147 ; AVX512-FAST-NEXT: vpermi2d %xmm2, %xmm1, %xmm4
148 ; AVX512-FAST-NEXT: vpbroadcastq {{.*#+}} xmm2 = [2,4,2,4]
149 ; AVX512-FAST-NEXT: vpermi2d %xmm3, %xmm1, %xmm2
150 ; AVX512-FAST-NEXT: vpbroadcastq {{.*#+}} xmm5 = [3,5,3,5]
151 ; AVX512-FAST-NEXT: vpermi2d %xmm3, %xmm1, %xmm5
152 ; AVX512-FAST-NEXT: vmovddup {{.*#+}} xmm1 = [4,2,4,2]
153 ; AVX512-FAST-NEXT: # xmm1 = mem[0,0]
154 ; AVX512-FAST-NEXT: vmovaps 32(%rdi), %ymm3
155 ; AVX512-FAST-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3],mem[4,5,6,7]
156 ; AVX512-FAST-NEXT: vpermps %ymm3, %ymm1, %ymm1
157 ; AVX512-FAST-NEXT: vmovddup {{.*#+}} xmm6 = [5,3,5,3]
158 ; AVX512-FAST-NEXT: # xmm6 = mem[0,0]
159 ; AVX512-FAST-NEXT: vpermps %ymm3, %ymm6, %ymm3
160 ; AVX512-FAST-NEXT: vmovq %xmm0, (%rsi)
161 ; AVX512-FAST-NEXT: vmovq %xmm4, (%rdx)
162 ; AVX512-FAST-NEXT: vmovq %xmm2, (%rcx)
163 ; AVX512-FAST-NEXT: vmovq %xmm5, (%r8)
164 ; AVX512-FAST-NEXT: vmovlps %xmm1, (%r9)
165 ; AVX512-FAST-NEXT: vmovlps %xmm3, (%rax)
166 ; AVX512-FAST-NEXT: vzeroupper
167 ; AVX512-FAST-NEXT: retq
169 ; AVX512BW-SLOW-LABEL: load_i32_stride6_vf2:
170 ; AVX512BW-SLOW: # %bb.0:
171 ; AVX512BW-SLOW-NEXT: movq {{[0-9]+}}(%rsp), %rax
172 ; AVX512BW-SLOW-NEXT: vmovdqa (%rdi), %xmm0
173 ; AVX512BW-SLOW-NEXT: vmovaps 16(%rdi), %xmm1
174 ; AVX512BW-SLOW-NEXT: vmovdqa 32(%rdi), %xmm2
175 ; AVX512BW-SLOW-NEXT: vextractps $2, %xmm1, %r10d
176 ; AVX512BW-SLOW-NEXT: vpinsrd $1, %r10d, %xmm0, %xmm3
177 ; AVX512BW-SLOW-NEXT: vextractps $3, %xmm1, %r10d
178 ; AVX512BW-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
179 ; AVX512BW-SLOW-NEXT: vpinsrd $1, %r10d, %xmm1, %xmm1
180 ; AVX512BW-SLOW-NEXT: vpbroadcastd 8(%rdi), %xmm4
181 ; AVX512BW-SLOW-NEXT: vmovd %xmm2, %r10d
182 ; AVX512BW-SLOW-NEXT: vpinsrd $1, %r10d, %xmm4, %xmm4
183 ; AVX512BW-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[3,3,3,3]
184 ; AVX512BW-SLOW-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2,3]
185 ; AVX512BW-SLOW-NEXT: vmovddup {{.*#+}} xmm2 = [4,2,4,2]
186 ; AVX512BW-SLOW-NEXT: # xmm2 = mem[0,0]
187 ; AVX512BW-SLOW-NEXT: vmovaps 32(%rdi), %ymm5
188 ; AVX512BW-SLOW-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3],mem[4,5,6,7]
189 ; AVX512BW-SLOW-NEXT: vpermps %ymm5, %ymm2, %ymm2
190 ; AVX512BW-SLOW-NEXT: vmovddup {{.*#+}} xmm6 = [5,3,5,3]
191 ; AVX512BW-SLOW-NEXT: # xmm6 = mem[0,0]
192 ; AVX512BW-SLOW-NEXT: vpermps %ymm5, %ymm6, %ymm5
193 ; AVX512BW-SLOW-NEXT: vmovq %xmm3, (%rsi)
194 ; AVX512BW-SLOW-NEXT: vmovq %xmm1, (%rdx)
195 ; AVX512BW-SLOW-NEXT: vmovq %xmm4, (%rcx)
196 ; AVX512BW-SLOW-NEXT: vmovq %xmm0, (%r8)
197 ; AVX512BW-SLOW-NEXT: vmovlps %xmm2, (%r9)
198 ; AVX512BW-SLOW-NEXT: vmovlps %xmm5, (%rax)
199 ; AVX512BW-SLOW-NEXT: vzeroupper
200 ; AVX512BW-SLOW-NEXT: retq
201 %wide.vec = load <12 x i32>, ptr %in.vec, align 64
202 %strided.vec0 = shufflevector <12 x i32> %wide.vec, <12 x i32> poison, <2 x i32> <i32 0, i32 6>
203 %strided.vec1 = shufflevector <12 x i32> %wide.vec, <12 x i32> poison, <2 x i32> <i32 1, i32 7>
204 %strided.vec2 = shufflevector <12 x i32> %wide.vec, <12 x i32> poison, <2 x i32> <i32 2, i32 8>
205 %strided.vec3 = shufflevector <12 x i32> %wide.vec, <12 x i32> poison, <2 x i32> <i32 3, i32 9>
206 %strided.vec4 = shufflevector <12 x i32> %wide.vec, <12 x i32> poison, <2 x i32> <i32 4, i32 10>
207 %strided.vec5 = shufflevector <12 x i32> %wide.vec, <12 x i32> poison, <2 x i32> <i32 5, i32 11>
208 store <2 x i32> %strided.vec0, ptr %out.vec0, align 64
209 store <2 x i32> %strided.vec1, ptr %out.vec1, align 64
210 store <2 x i32> %strided.vec2, ptr %out.vec2, align 64
211 store <2 x i32> %strided.vec3, ptr %out.vec3, align 64
212 store <2 x i32> %strided.vec4, ptr %out.vec4, align 64
213 store <2 x i32> %strided.vec5, ptr %out.vec5, align 64
217 define void @load_i32_stride6_vf4(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2, ptr %out.vec3, ptr %out.vec4, ptr %out.vec5) nounwind {
218 ; SSE-LABEL: load_i32_stride6_vf4:
220 ; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax
221 ; SSE-NEXT: movdqa 80(%rdi), %xmm1
222 ; SSE-NEXT: movdqa 64(%rdi), %xmm0
223 ; SSE-NEXT: movdqa (%rdi), %xmm4
224 ; SSE-NEXT: movdqa 16(%rdi), %xmm2
225 ; SSE-NEXT: movdqa 48(%rdi), %xmm3
226 ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm2[2,3,2,3]
227 ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm4[1,1,1,1]
228 ; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm4[2,3,2,3]
229 ; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm4[3,3,3,3]
230 ; SSE-NEXT: movdqa %xmm4, %xmm6
231 ; SSE-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm5[0],xmm6[1],xmm5[1]
232 ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm0[2,2,3,3]
233 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm3[0,0,1,1]
234 ; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
235 ; SSE-NEXT: movsd {{.*#+}} xmm4 = xmm6[0],xmm4[1]
236 ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm2[3,3,3,3]
237 ; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm5[0],xmm7[1],xmm5[1]
238 ; SSE-NEXT: pshufd {{.*#+}} xmm10 = xmm0[2,3,2,3]
239 ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm3[2,2,3,3]
240 ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm3[2,3,2,3]
241 ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm10[0],xmm3[1],xmm10[1]
242 ; SSE-NEXT: movdqa 32(%rdi), %xmm10
243 ; SSE-NEXT: movsd {{.*#+}} xmm3 = xmm7[0],xmm3[1]
244 ; SSE-NEXT: punpckldq {{.*#+}} xmm8 = xmm8[0],xmm10[0],xmm8[1],xmm10[1]
245 ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm1[0,0,1,1]
246 ; SSE-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm7[0],xmm5[1],xmm7[1]
247 ; SSE-NEXT: movsd {{.*#+}} xmm5 = xmm8[0],xmm5[1]
248 ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm10[1,1,1,1]
249 ; SSE-NEXT: punpckldq {{.*#+}} xmm9 = xmm9[0],xmm7[0],xmm9[1],xmm7[1]
250 ; SSE-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm1[0],xmm6[1],xmm1[1]
251 ; SSE-NEXT: movsd {{.*#+}} xmm6 = xmm9[0],xmm6[1]
252 ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm10[2,3,2,3]
253 ; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm2[1,1,1,1]
254 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm7[0],xmm2[1],xmm7[1]
255 ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm1[2,2,3,3]
256 ; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm0[0,0,1,1]
257 ; SSE-NEXT: punpckldq {{.*#+}} xmm9 = xmm9[0],xmm7[0],xmm9[1],xmm7[1]
258 ; SSE-NEXT: movsd {{.*#+}} xmm9 = xmm2[0],xmm9[1]
259 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm10[3,3,3,3]
260 ; SSE-NEXT: punpckldq {{.*#+}} xmm8 = xmm8[0],xmm2[0],xmm8[1],xmm2[1]
261 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
262 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
263 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm8[0],xmm0[1]
264 ; SSE-NEXT: movapd %xmm4, (%rsi)
265 ; SSE-NEXT: movapd %xmm3, (%rdx)
266 ; SSE-NEXT: movapd %xmm5, (%rcx)
267 ; SSE-NEXT: movapd %xmm6, (%r8)
268 ; SSE-NEXT: movapd %xmm9, (%r9)
269 ; SSE-NEXT: movapd %xmm0, (%rax)
272 ; AVX1-ONLY-LABEL: load_i32_stride6_vf4:
273 ; AVX1-ONLY: # %bb.0:
274 ; AVX1-ONLY-NEXT: movq {{[0-9]+}}(%rsp), %rax
275 ; AVX1-ONLY-NEXT: vmovaps 32(%rdi), %ymm0
276 ; AVX1-ONLY-NEXT: vmovaps (%rdi), %ymm1
277 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm1[0,1,2,3],ymm0[4,5],ymm1[6,7]
278 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm2, %xmm3
279 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm4 = xmm2[0,1],xmm3[2,3]
280 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm4 = xmm4[0,2],xmm3[0,3]
281 ; AVX1-ONLY-NEXT: vmovaps 64(%rdi), %xmm5
282 ; AVX1-ONLY-NEXT: vinsertps {{.*#+}} xmm4 = xmm4[0,1,2],xmm5[2]
283 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm2 = xmm2[1,0],xmm3[3,0]
284 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm2 = xmm2[0,2],xmm3[1,3]
285 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm2 = xmm2[0,1,2],xmm5[3]
286 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
287 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm0, %xmm1
288 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm3 = xmm0[2,0],xmm1[2,3]
289 ; AVX1-ONLY-NEXT: vmovaps 80(%rdi), %xmm6
290 ; AVX1-ONLY-NEXT: vinsertps {{.*#+}} xmm3 = xmm3[0,1,2],xmm6[0]
291 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm0[3,1],xmm1[3,3]
292 ; AVX1-ONLY-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm6[1]
293 ; AVX1-ONLY-NEXT: vmovaps 32(%rdi), %xmm1
294 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm7 = xmm1[2,2,3,3]
295 ; AVX1-ONLY-NEXT: vmovaps 16(%rdi), %xmm8
296 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm7 = xmm8[0],xmm7[1],xmm8[2,3]
297 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm9 = xmm5[0,1],xmm6[2,3]
298 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm9 = xmm9[0,1,0,2]
299 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm7 = xmm7[0,1],xmm9[2,3]
300 ; AVX1-ONLY-NEXT: vpmovzxdq {{.*#+}} xmm5 = xmm5[0],zero,xmm5[1],zero
301 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,5],xmm6[6,7]
302 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm8[0,1],xmm1[2,3]
303 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,3,2,3]
304 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm5[2,3]
305 ; AVX1-ONLY-NEXT: vmovaps %xmm4, (%rsi)
306 ; AVX1-ONLY-NEXT: vmovaps %xmm2, (%rdx)
307 ; AVX1-ONLY-NEXT: vmovaps %xmm3, (%rcx)
308 ; AVX1-ONLY-NEXT: vmovaps %xmm0, (%r8)
309 ; AVX1-ONLY-NEXT: vmovaps %xmm7, (%r9)
310 ; AVX1-ONLY-NEXT: vmovaps %xmm1, (%rax)
311 ; AVX1-ONLY-NEXT: vzeroupper
312 ; AVX1-ONLY-NEXT: retq
314 ; AVX2-ONLY-LABEL: load_i32_stride6_vf4:
315 ; AVX2-ONLY: # %bb.0:
316 ; AVX2-ONLY-NEXT: movq {{[0-9]+}}(%rsp), %rax
317 ; AVX2-ONLY-NEXT: vmovdqa (%rdi), %ymm1
318 ; AVX2-ONLY-NEXT: vmovdqa 32(%rdi), %ymm2
319 ; AVX2-ONLY-NEXT: vmovdqa {{.*#+}} xmm0 = <0,6,4,u>
320 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm3 = ymm1[0,1,2,3],ymm2[4,5],ymm1[6,7]
321 ; AVX2-ONLY-NEXT: vpermd %ymm3, %ymm0, %ymm0
322 ; AVX2-ONLY-NEXT: vmovdqa 64(%rdi), %xmm4
323 ; AVX2-ONLY-NEXT: vpshufd {{.*#+}} xmm5 = xmm4[2,2,2,2]
324 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1,2],xmm5[3]
325 ; AVX2-ONLY-NEXT: vmovdqa {{.*#+}} xmm5 = <1,7,5,u>
326 ; AVX2-ONLY-NEXT: vpermd %ymm3, %ymm5, %ymm3
327 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm3 = xmm3[0,1,2],xmm4[3]
328 ; AVX2-ONLY-NEXT: vmovdqa (%rdi), %xmm5
329 ; AVX2-ONLY-NEXT: vpshufd {{.*#+}} xmm6 = xmm5[2,3,2,3]
330 ; AVX2-ONLY-NEXT: vpshufd {{.*#+}} ymm7 = ymm2[0,0,2,3,4,4,6,7]
331 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm6 = ymm6[0],ymm7[1,2,3],ymm6[4],ymm7[5,6,7]
332 ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm6 = ymm6[0,3,2,3]
333 ; AVX2-ONLY-NEXT: vmovdqa 80(%rdi), %xmm7
334 ; AVX2-ONLY-NEXT: vpbroadcastd %xmm7, %xmm8
335 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm6 = xmm6[0,1,2],xmm8[3]
336 ; AVX2-ONLY-NEXT: vpshufd {{.*#+}} xmm5 = xmm5[3,3,3,3]
337 ; AVX2-ONLY-NEXT: vpshufd {{.*#+}} ymm8 = ymm2[0,1,3,3,4,5,7,7]
338 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm5 = ymm5[0],ymm8[1,2,3],ymm5[4],ymm8[5,6,7]
339 ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm5 = ymm5[0,3,2,3]
340 ; AVX2-ONLY-NEXT: vpbroadcastd 84(%rdi), %xmm8
341 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm5 = xmm5[0,1,2],xmm8[3]
342 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm8 = xmm4[0,1],xmm7[2,3]
343 ; AVX2-ONLY-NEXT: vpshufd {{.*#+}} xmm8 = xmm8[0,1,0,2]
344 ; AVX2-ONLY-NEXT: vpbroadcastq {{.*#+}} xmm9 = [4,2,4,2]
345 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
346 ; AVX2-ONLY-NEXT: vpermd %ymm1, %ymm9, %ymm2
347 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm2 = xmm2[0,1],xmm8[2,3]
348 ; AVX2-ONLY-NEXT: vpmovzxdq {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero
349 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm4 = xmm4[0,1,2],xmm7[3]
350 ; AVX2-ONLY-NEXT: vpbroadcastq {{.*#+}} xmm7 = [5,3,5,3]
351 ; AVX2-ONLY-NEXT: vpermd %ymm1, %ymm7, %ymm1
352 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0,1],xmm4[2,3]
353 ; AVX2-ONLY-NEXT: vmovdqa %xmm0, (%rsi)
354 ; AVX2-ONLY-NEXT: vmovdqa %xmm3, (%rdx)
355 ; AVX2-ONLY-NEXT: vmovdqa %xmm6, (%rcx)
356 ; AVX2-ONLY-NEXT: vmovdqa %xmm5, (%r8)
357 ; AVX2-ONLY-NEXT: vmovdqa %xmm2, (%r9)
358 ; AVX2-ONLY-NEXT: vmovdqa %xmm1, (%rax)
359 ; AVX2-ONLY-NEXT: vzeroupper
360 ; AVX2-ONLY-NEXT: retq
362 ; AVX512-LABEL: load_i32_stride6_vf4:
364 ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax
365 ; AVX512-NEXT: vmovdqa64 (%rdi), %zmm0
366 ; AVX512-NEXT: vmovdqa64 64(%rdi), %zmm1
367 ; AVX512-NEXT: vmovdqa {{.*#+}} xmm2 = [0,6,12,18]
368 ; AVX512-NEXT: vpermi2d %zmm1, %zmm0, %zmm2
369 ; AVX512-NEXT: vmovdqa {{.*#+}} xmm3 = [1,7,13,19]
370 ; AVX512-NEXT: vpermi2d %zmm1, %zmm0, %zmm3
371 ; AVX512-NEXT: vmovdqa {{.*#+}} xmm4 = [2,8,14,20]
372 ; AVX512-NEXT: vpermi2d %zmm1, %zmm0, %zmm4
373 ; AVX512-NEXT: vmovdqa {{.*#+}} xmm5 = [3,9,15,21]
374 ; AVX512-NEXT: vpermi2d %zmm1, %zmm0, %zmm5
375 ; AVX512-NEXT: vmovdqa {{.*#+}} xmm6 = [4,10,16,22]
376 ; AVX512-NEXT: vpermi2d %zmm1, %zmm0, %zmm6
377 ; AVX512-NEXT: vmovdqa {{.*#+}} xmm7 = [5,11,17,23]
378 ; AVX512-NEXT: vpermi2d %zmm1, %zmm0, %zmm7
379 ; AVX512-NEXT: vmovdqa %xmm2, (%rsi)
380 ; AVX512-NEXT: vmovdqa %xmm3, (%rdx)
381 ; AVX512-NEXT: vmovdqa %xmm4, (%rcx)
382 ; AVX512-NEXT: vmovdqa %xmm5, (%r8)
383 ; AVX512-NEXT: vmovdqa %xmm6, (%r9)
384 ; AVX512-NEXT: vmovdqa %xmm7, (%rax)
385 ; AVX512-NEXT: vzeroupper
387 %wide.vec = load <24 x i32>, ptr %in.vec, align 64
388 %strided.vec0 = shufflevector <24 x i32> %wide.vec, <24 x i32> poison, <4 x i32> <i32 0, i32 6, i32 12, i32 18>
389 %strided.vec1 = shufflevector <24 x i32> %wide.vec, <24 x i32> poison, <4 x i32> <i32 1, i32 7, i32 13, i32 19>
390 %strided.vec2 = shufflevector <24 x i32> %wide.vec, <24 x i32> poison, <4 x i32> <i32 2, i32 8, i32 14, i32 20>
391 %strided.vec3 = shufflevector <24 x i32> %wide.vec, <24 x i32> poison, <4 x i32> <i32 3, i32 9, i32 15, i32 21>
392 %strided.vec4 = shufflevector <24 x i32> %wide.vec, <24 x i32> poison, <4 x i32> <i32 4, i32 10, i32 16, i32 22>
393 %strided.vec5 = shufflevector <24 x i32> %wide.vec, <24 x i32> poison, <4 x i32> <i32 5, i32 11, i32 17, i32 23>
394 store <4 x i32> %strided.vec0, ptr %out.vec0, align 64
395 store <4 x i32> %strided.vec1, ptr %out.vec1, align 64
396 store <4 x i32> %strided.vec2, ptr %out.vec2, align 64
397 store <4 x i32> %strided.vec3, ptr %out.vec3, align 64
398 store <4 x i32> %strided.vec4, ptr %out.vec4, align 64
399 store <4 x i32> %strided.vec5, ptr %out.vec5, align 64
403 define void @load_i32_stride6_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2, ptr %out.vec3, ptr %out.vec4, ptr %out.vec5) nounwind {
404 ; SSE-LABEL: load_i32_stride6_vf8:
406 ; SSE-NEXT: movdqa 144(%rdi), %xmm4
407 ; SSE-NEXT: movdqa 160(%rdi), %xmm2
408 ; SSE-NEXT: movdqa 96(%rdi), %xmm6
409 ; SSE-NEXT: movdqa 112(%rdi), %xmm3
410 ; SSE-NEXT: movdqa 64(%rdi), %xmm5
411 ; SSE-NEXT: movdqa (%rdi), %xmm10
412 ; SSE-NEXT: movdqa 16(%rdi), %xmm0
413 ; SSE-NEXT: movdqa 48(%rdi), %xmm8
414 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
415 ; SSE-NEXT: movdqa %xmm0, %xmm11
416 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
417 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm10[1,1,1,1]
418 ; SSE-NEXT: pshufd {{.*#+}} xmm13 = xmm10[2,3,2,3]
419 ; SSE-NEXT: movdqa %xmm10, %xmm7
420 ; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm1[0],xmm7[1],xmm1[1]
421 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm5[2,2,3,3]
422 ; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm8[0,0,1,1]
423 ; SSE-NEXT: punpckldq {{.*#+}} xmm9 = xmm9[0],xmm1[0],xmm9[1],xmm1[1]
424 ; SSE-NEXT: movsd {{.*#+}} xmm9 = xmm7[0],xmm9[1]
425 ; SSE-NEXT: movapd %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
426 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm3[2,3,2,3]
427 ; SSE-NEXT: pshufd {{.*#+}} xmm15 = xmm6[1,1,1,1]
428 ; SSE-NEXT: movdqa %xmm6, %xmm9
429 ; SSE-NEXT: punpckldq {{.*#+}} xmm9 = xmm9[0],xmm1[0],xmm9[1],xmm1[1]
430 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm2[2,2,3,3]
431 ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm4[0,0,1,1]
432 ; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm1[0],xmm7[1],xmm1[1]
433 ; SSE-NEXT: movsd {{.*#+}} xmm7 = xmm9[0],xmm7[1]
434 ; SSE-NEXT: movapd %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
435 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm11[3,3,3,3]
436 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
437 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm5[2,3,2,3]
438 ; SSE-NEXT: movdqa %xmm5, %xmm9
439 ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm8[2,2,3,3]
440 ; SSE-NEXT: movdqa %xmm8, %xmm11
441 ; SSE-NEXT: punpckldq {{.*#+}} xmm11 = xmm11[0],xmm1[0],xmm11[1],xmm1[1]
442 ; SSE-NEXT: movsd {{.*#+}} xmm11 = xmm0[0],xmm11[1]
443 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm3[3,3,3,3]
444 ; SSE-NEXT: movdqa %xmm3, %xmm1
445 ; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
446 ; SSE-NEXT: punpckldq {{.*#+}} xmm15 = xmm15[0],xmm0[0],xmm15[1],xmm0[1]
447 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,2,3]
448 ; SSE-NEXT: movdqa %xmm4, %xmm12
449 ; SSE-NEXT: punpckldq {{.*#+}} xmm12 = xmm12[0],xmm0[0],xmm12[1],xmm0[1]
450 ; SSE-NEXT: movdqa 80(%rdi), %xmm14
451 ; SSE-NEXT: movsd {{.*#+}} xmm12 = xmm15[0],xmm12[1]
452 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm14[0,0,1,1]
453 ; SSE-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1]
454 ; SSE-NEXT: movdqa 32(%rdi), %xmm7
455 ; SSE-NEXT: punpckldq {{.*#+}} xmm13 = xmm13[0],xmm7[0],xmm13[1],xmm7[1]
456 ; SSE-NEXT: movsd {{.*#+}} xmm5 = xmm13[0],xmm5[1]
457 ; SSE-NEXT: movapd %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
458 ; SSE-NEXT: pshufd {{.*#+}} xmm13 = xmm4[2,2,3,3]
459 ; SSE-NEXT: movdqa 176(%rdi), %xmm15
460 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm15[0,0,1,1]
461 ; SSE-NEXT: punpckldq {{.*#+}} xmm13 = xmm13[0],xmm0[0],xmm13[1],xmm0[1]
462 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm6[2,3,2,3]
463 ; SSE-NEXT: movdqa 128(%rdi), %xmm5
464 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1]
465 ; SSE-NEXT: movsd {{.*#+}} xmm13 = xmm0[0],xmm13[1]
466 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm10[3,3,3,3]
467 ; SSE-NEXT: pshufd {{.*#+}} xmm10 = xmm7[1,1,1,1]
468 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm10[0],xmm0[1],xmm10[1]
469 ; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm8[2,3,2,3]
470 ; SSE-NEXT: punpckldq {{.*#+}} xmm8 = xmm8[0],xmm14[0],xmm8[1],xmm14[1]
471 ; SSE-NEXT: movsd {{.*#+}} xmm8 = xmm0[0],xmm8[1]
472 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm6[3,3,3,3]
473 ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm5[1,1,1,1]
474 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1]
475 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[2,3,2,3]
476 ; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm15[0],xmm4[1],xmm15[1]
477 ; SSE-NEXT: movsd {{.*#+}} xmm4 = xmm0[0],xmm4[1]
478 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm7[2,3,2,3]
479 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
480 ; SSE-NEXT: movdqa %xmm3, %xmm10
481 ; SSE-NEXT: punpckldq {{.*#+}} xmm10 = xmm10[0],xmm0[0],xmm10[1],xmm0[1]
482 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm14[2,2,3,3]
483 ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm9[0,0,1,1]
484 ; SSE-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm0[0],xmm6[1],xmm0[1]
485 ; SSE-NEXT: movsd {{.*#+}} xmm6 = xmm10[0],xmm6[1]
486 ; SSE-NEXT: pshufd {{.*#+}} xmm10 = xmm5[2,3,2,3]
487 ; SSE-NEXT: movdqa %xmm1, %xmm0
488 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm10[0],xmm0[1],xmm10[1]
489 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm15[2,2,3,3]
490 ; SSE-NEXT: pshufd {{.*#+}} xmm10 = xmm2[0,0,1,1]
491 ; SSE-NEXT: punpckldq {{.*#+}} xmm10 = xmm10[0],xmm1[0],xmm10[1],xmm1[1]
492 ; SSE-NEXT: movsd {{.*#+}} xmm10 = xmm0[0],xmm10[1]
493 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,1,1,1]
494 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm7[3,3,3,3]
495 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
496 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm14[2,3,2,3]
497 ; SSE-NEXT: punpckldq {{.*#+}} xmm9 = xmm9[0],xmm1[0],xmm9[1],xmm1[1]
498 ; SSE-NEXT: movsd {{.*#+}} xmm9 = xmm0[0],xmm9[1]
499 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
500 ; SSE-NEXT: # xmm0 = mem[1,1,1,1]
501 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm5[3,3,3,3]
502 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
503 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm15[2,3,2,3]
504 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
505 ; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm0[0],xmm2[1]
506 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
507 ; SSE-NEXT: movaps %xmm0, 16(%rsi)
508 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
509 ; SSE-NEXT: movaps %xmm0, (%rsi)
510 ; SSE-NEXT: movapd %xmm12, 16(%rdx)
511 ; SSE-NEXT: movapd %xmm11, (%rdx)
512 ; SSE-NEXT: movapd %xmm13, 16(%rcx)
513 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
514 ; SSE-NEXT: movaps %xmm0, (%rcx)
515 ; SSE-NEXT: movapd %xmm4, 16(%r8)
516 ; SSE-NEXT: movapd %xmm8, (%r8)
517 ; SSE-NEXT: movapd %xmm10, 16(%r9)
518 ; SSE-NEXT: movapd %xmm6, (%r9)
519 ; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax
520 ; SSE-NEXT: movapd %xmm2, 16(%rax)
521 ; SSE-NEXT: movapd %xmm9, (%rax)
524 ; AVX1-ONLY-LABEL: load_i32_stride6_vf8:
525 ; AVX1-ONLY: # %bb.0:
526 ; AVX1-ONLY-NEXT: movq {{[0-9]+}}(%rsp), %rax
527 ; AVX1-ONLY-NEXT: vmovapd 160(%rdi), %ymm3
528 ; AVX1-ONLY-NEXT: vmovapd 128(%rdi), %ymm4
529 ; AVX1-ONLY-NEXT: vmovaps 32(%rdi), %ymm6
530 ; AVX1-ONLY-NEXT: vmovaps (%rdi), %ymm7
531 ; AVX1-ONLY-NEXT: vmovaps 96(%rdi), %ymm0
532 ; AVX1-ONLY-NEXT: vmovaps 64(%rdi), %ymm1
533 ; AVX1-ONLY-NEXT: vinsertf128 $1, 96(%rdi), %ymm1, %ymm5
534 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm2 = ymm0[2,0],ymm5[0,0],ymm0[6,4],ymm5[4,4]
535 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm2 = ymm2[2,0],ymm5[2,2],ymm2[6,4],ymm5[6,6]
536 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm8 = ymm7[0,1,2,3],ymm6[4,5],ymm7[6,7]
537 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm8, %xmm9
538 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm10 = xmm8[0,1],xmm9[2,3]
539 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm10 = xmm10[0,2],xmm9[0,3]
540 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm10[0,1,2],ymm2[3,4,5,6,7]
541 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm10 = ymm4[2,3],ymm3[0,1]
542 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm11 = ymm10[0],ymm4[0],ymm10[3],ymm4[2]
543 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm11 = ymm11[0,1,2,0,4,5,6,4]
544 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5],ymm11[6,7]
545 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm11 = ymm0[3,0],ymm5[1,0],ymm0[7,4],ymm5[5,4]
546 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm5 = ymm11[2,0],ymm5[2,3],ymm11[6,4],ymm5[6,7]
547 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm8 = xmm8[1,0],xmm9[3,0]
548 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm8 = xmm8[0,2],xmm9[1,3]
549 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm5 = ymm8[0,1,2],ymm5[3,4,5,6,7]
550 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm8 = ymm10[3,1],ymm4[1,3],ymm10[7,5],ymm4[5,7]
551 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm8 = ymm8[0,1,2,0,4,5,6,4]
552 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3,4,5],ymm8[6,7]
553 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm8 = ymm4[0,1],ymm3[2],ymm4[3]
554 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm9 = ymm8[2,3,0,1]
555 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm10 = ymm9[0,0],ymm8[2,0],ymm9[4,4],ymm8[6,4]
556 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm7 = ymm6[0,1],ymm7[2,3],ymm6[4,5,6,7]
557 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm7, %xmm11
558 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm6 = xmm7[2,0],xmm11[2,3]
559 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm12 = ymm0[2,1],ymm1[2,0],ymm0[6,5],ymm1[6,4]
560 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm12 = ymm12[2,3,0,1]
561 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2],ymm12[3,4,5,6,7]
562 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3,4],ymm10[5,6,7]
563 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm8 = ymm9[0,1],ymm8[3,1],ymm9[4,5],ymm8[7,5]
564 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm7 = xmm7[3,1],xmm11[3,3]
565 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm9 = ymm0[3,1],ymm1[2,1],ymm0[7,5],ymm1[6,5]
566 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm9 = ymm9[2,3,0,1]
567 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2],ymm9[3,4,5,6,7]
568 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3,4],ymm8[5,6,7]
569 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm3 = ymm3[0],ymm4[1],ymm3[2,3]
570 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm3[2,3,0,1]
571 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm8 = ymm3[2,0],ymm4[0,0],ymm3[6,4],ymm4[4,4]
572 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm8 = ymm4[0,2],ymm8[2,0],ymm4[4,6],ymm8[6,4]
573 ; AVX1-ONLY-NEXT: vmovaps 32(%rdi), %xmm9
574 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm10 = xmm9[2,2,3,3]
575 ; AVX1-ONLY-NEXT: vmovaps 16(%rdi), %xmm11
576 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm10 = xmm11[0],xmm10[1],xmm11[2,3]
577 ; AVX1-ONLY-NEXT: vmovapd 80(%rdi), %xmm12
578 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm13 = ymm12[1],ymm1[0],ymm12[2],ymm1[2]
579 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm13 = ymm0[0,1],ymm13[2,0],ymm0[4,5],ymm13[6,4]
580 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0,1],ymm13[2,3,4,5,6,7]
581 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm8 = ymm10[0,1,2,3,4],ymm8[5,6,7]
582 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm3 = ymm3[3,0],ymm4[1,0],ymm3[7,4],ymm4[5,4]
583 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm3 = ymm4[0,3],ymm3[2,0],ymm4[4,7],ymm3[6,4]
584 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm4 = xmm11[0,1],xmm9[2,3]
585 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm4 = xmm4[1,3,2,3]
586 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm12[3,1],ymm1[1,3],ymm12[7,5],ymm1[5,7]
587 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[1,1],ymm1[2,0],ymm0[5,5],ymm1[6,4]
588 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm4[0,1],ymm0[2,3,4,5,6,7]
589 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm3[5,6,7]
590 ; AVX1-ONLY-NEXT: vmovaps %ymm2, (%rsi)
591 ; AVX1-ONLY-NEXT: vmovaps %ymm5, (%rdx)
592 ; AVX1-ONLY-NEXT: vmovaps %ymm6, (%rcx)
593 ; AVX1-ONLY-NEXT: vmovaps %ymm7, (%r8)
594 ; AVX1-ONLY-NEXT: vmovaps %ymm8, (%r9)
595 ; AVX1-ONLY-NEXT: vmovaps %ymm0, (%rax)
596 ; AVX1-ONLY-NEXT: vzeroupper
597 ; AVX1-ONLY-NEXT: retq
599 ; AVX2-SLOW-LABEL: load_i32_stride6_vf8:
600 ; AVX2-SLOW: # %bb.0:
601 ; AVX2-SLOW-NEXT: movq {{[0-9]+}}(%rsp), %rax
602 ; AVX2-SLOW-NEXT: vmovaps 128(%rdi), %ymm0
603 ; AVX2-SLOW-NEXT: vmovaps 160(%rdi), %ymm1
604 ; AVX2-SLOW-NEXT: vmovaps 96(%rdi), %ymm5
605 ; AVX2-SLOW-NEXT: vmovaps (%rdi), %ymm3
606 ; AVX2-SLOW-NEXT: vmovaps 32(%rdi), %ymm4
607 ; AVX2-SLOW-NEXT: vmovaps 64(%rdi), %ymm7
608 ; AVX2-SLOW-NEXT: vmovaps {{.*#+}} xmm2 = <0,6,4,u>
609 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm6 = ymm3[0,1,2,3],ymm4[4,5],ymm3[6,7]
610 ; AVX2-SLOW-NEXT: vpermps %ymm6, %ymm2, %ymm2
611 ; AVX2-SLOW-NEXT: vperm2f128 {{.*#+}} ymm8 = ymm7[0,1],ymm5[0,1]
612 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1,2,3,4,5],ymm5[6,7]
613 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm9 = ymm8[0,2,2,2,4,6,6,6]
614 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2],ymm9[3,4,5,6,7]
615 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm9 = ymm1[0,1,2,3],ymm0[4,5,6,7]
616 ; AVX2-SLOW-NEXT: vbroadcastsd {{.*#+}} ymm10 = [4,2,4,2,4,2,4,2]
617 ; AVX2-SLOW-NEXT: vpermps %ymm9, %ymm10, %ymm10
618 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5],ymm10[6,7]
619 ; AVX2-SLOW-NEXT: vmovaps {{.*#+}} xmm10 = <1,7,5,u>
620 ; AVX2-SLOW-NEXT: vpermps %ymm6, %ymm10, %ymm6
621 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm8 = ymm8[1,3,2,3,5,7,6,7]
622 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2],ymm8[3,4,5,6,7]
623 ; AVX2-SLOW-NEXT: vbroadcastsd {{.*#+}} ymm8 = [5,3,5,3,5,3,5,3]
624 ; AVX2-SLOW-NEXT: vpermps %ymm9, %ymm8, %ymm8
625 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3,4,5],ymm8[6,7]
626 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm8 = ymm7[0,1],ymm5[2,3],ymm7[4,5],ymm5[6,7]
627 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm8 = ymm8[2,0,2,3,6,4,6,7]
628 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm8 = ymm8[0,2,0,3]
629 ; AVX2-SLOW-NEXT: vmovaps (%rdi), %xmm9
630 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm10 = xmm9[2,3,2,3]
631 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm11 = ymm4[0,0,2,3,4,4,6,7]
632 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0],ymm11[1,2,3],ymm10[4],ymm11[5,6,7]
633 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm10 = ymm10[0,3,2,3]
634 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm8 = ymm10[0,1,2],ymm8[3,4,5,6,7]
635 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm10 = ymm1[0,0,0,0,4,4,4,4]
636 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm11 = ymm0[0,0,2,3,4,4,6,7]
637 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm10 = ymm11[0,1,2],ymm10[3],ymm11[4,5,6],ymm10[7]
638 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm10 = ymm10[0,1,0,3]
639 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1,2,3,4],ymm10[5,6,7]
640 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm10 = ymm5[3,3,3,3,7,7,7,7]
641 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0],ymm7[1],ymm10[2,3,4],ymm7[5],ymm10[6,7]
642 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm10 = ymm10[0,2,0,3]
643 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm9 = xmm9[3,3,3,3]
644 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm11 = ymm4[0,1,3,3,4,5,7,7]
645 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm9 = ymm9[0],ymm11[1,2,3],ymm9[4],ymm11[5,6,7]
646 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm9 = ymm9[0,3,2,3]
647 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm9 = ymm9[0,1,2],ymm10[3,4,5,6,7]
648 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm10 = ymm1[0,1,0,1,4,5,4,5]
649 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm11 = ymm0[0,1,3,3,4,5,7,7]
650 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm10 = ymm11[0,1,2],ymm10[3],ymm11[4,5,6],ymm10[7]
651 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm10 = ymm10[0,1,0,3]
652 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm9 = ymm9[0,1,2,3,4],ymm10[5,6,7]
653 ; AVX2-SLOW-NEXT: vmovaps 80(%rdi), %xmm10
654 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm5 = ymm7[0,1,2,3],ymm5[4,5,6,7]
655 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm7 = ymm5[0,1],ymm10[2,3],ymm5[4,5,6,7]
656 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm7 = ymm7[0,1,0,2,4,5,4,6]
657 ; AVX2-SLOW-NEXT: vmovddup {{.*#+}} xmm11 = [4,2,4,2]
658 ; AVX2-SLOW-NEXT: # xmm11 = mem[0,0]
659 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
660 ; AVX2-SLOW-NEXT: vpermps %ymm3, %ymm11, %ymm4
661 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm7[2,3,4,5,6,7]
662 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5,6,7]
663 ; AVX2-SLOW-NEXT: vbroadcastf128 {{.*#+}} ymm1 = [0,2,0,6,0,2,0,6]
664 ; AVX2-SLOW-NEXT: # ymm1 = mem[0,1,0,1]
665 ; AVX2-SLOW-NEXT: vpermps %ymm0, %ymm1, %ymm1
666 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm4[0,1,2,3,4],ymm1[5,6,7]
667 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm4 = ymm5[1,1,1,1,5,5,5,5]
668 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2],ymm10[3],ymm4[4,5,6,7]
669 ; AVX2-SLOW-NEXT: vmovddup {{.*#+}} xmm5 = [5,3,5,3]
670 ; AVX2-SLOW-NEXT: # xmm5 = mem[0,0]
671 ; AVX2-SLOW-NEXT: vpermps %ymm3, %ymm5, %ymm3
672 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm4[2,3,4,5,6,7]
673 ; AVX2-SLOW-NEXT: vbroadcastf128 {{.*#+}} ymm4 = [0,3,1,7,0,3,1,7]
674 ; AVX2-SLOW-NEXT: # ymm4 = mem[0,1,0,1]
675 ; AVX2-SLOW-NEXT: vpermps %ymm0, %ymm4, %ymm0
676 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm3[0,1,2,3,4],ymm0[5,6,7]
677 ; AVX2-SLOW-NEXT: vmovaps %ymm2, (%rsi)
678 ; AVX2-SLOW-NEXT: vmovaps %ymm6, (%rdx)
679 ; AVX2-SLOW-NEXT: vmovaps %ymm8, (%rcx)
680 ; AVX2-SLOW-NEXT: vmovaps %ymm9, (%r8)
681 ; AVX2-SLOW-NEXT: vmovaps %ymm1, (%r9)
682 ; AVX2-SLOW-NEXT: vmovaps %ymm0, (%rax)
683 ; AVX2-SLOW-NEXT: vzeroupper
684 ; AVX2-SLOW-NEXT: retq
686 ; AVX2-FAST-LABEL: load_i32_stride6_vf8:
687 ; AVX2-FAST: # %bb.0:
688 ; AVX2-FAST-NEXT: movq {{[0-9]+}}(%rsp), %rax
689 ; AVX2-FAST-NEXT: vmovaps 128(%rdi), %ymm0
690 ; AVX2-FAST-NEXT: vmovaps 160(%rdi), %ymm1
691 ; AVX2-FAST-NEXT: vmovaps 96(%rdi), %ymm5
692 ; AVX2-FAST-NEXT: vmovaps (%rdi), %ymm3
693 ; AVX2-FAST-NEXT: vmovaps 32(%rdi), %ymm4
694 ; AVX2-FAST-NEXT: vmovaps 64(%rdi), %ymm7
695 ; AVX2-FAST-NEXT: vmovaps {{.*#+}} xmm2 = <0,6,4,u>
696 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm6 = ymm3[0,1,2,3],ymm4[4,5],ymm3[6,7]
697 ; AVX2-FAST-NEXT: vpermps %ymm6, %ymm2, %ymm2
698 ; AVX2-FAST-NEXT: vperm2f128 {{.*#+}} ymm8 = ymm7[0,1],ymm5[0,1]
699 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1,2,3,4,5],ymm5[6,7]
700 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm9 = ymm8[0,2,2,2,4,6,6,6]
701 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2],ymm9[3,4,5,6,7]
702 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm9 = ymm1[0,1,2,3],ymm0[4,5,6,7]
703 ; AVX2-FAST-NEXT: vbroadcastsd {{.*#+}} ymm10 = [4,2,4,2,4,2,4,2]
704 ; AVX2-FAST-NEXT: vpermps %ymm9, %ymm10, %ymm10
705 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5],ymm10[6,7]
706 ; AVX2-FAST-NEXT: vmovaps {{.*#+}} xmm10 = <1,7,5,u>
707 ; AVX2-FAST-NEXT: vpermps %ymm6, %ymm10, %ymm6
708 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm8 = ymm8[1,3,2,3,5,7,6,7]
709 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2],ymm8[3,4,5,6,7]
710 ; AVX2-FAST-NEXT: vbroadcastsd {{.*#+}} ymm8 = [5,3,5,3,5,3,5,3]
711 ; AVX2-FAST-NEXT: vpermps %ymm9, %ymm8, %ymm8
712 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3,4,5],ymm8[6,7]
713 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm8 = ymm7[0,1],ymm5[2,3],ymm7[4,5],ymm5[6,7]
714 ; AVX2-FAST-NEXT: vbroadcastsd {{.*#+}} ymm9 = [2,4,2,4,2,4,2,4]
715 ; AVX2-FAST-NEXT: vpermps %ymm8, %ymm9, %ymm8
716 ; AVX2-FAST-NEXT: vmovaps (%rdi), %xmm9
717 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm10 = xmm9[2,3,2,3]
718 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm11 = ymm4[0,0,2,3,4,4,6,7]
719 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0],ymm11[1,2,3],ymm10[4],ymm11[5,6,7]
720 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm10 = ymm10[0,3,2,3]
721 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm8 = ymm10[0,1,2],ymm8[3,4,5,6,7]
722 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm10 = ymm1[0,0,0,0,4,4,4,4]
723 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm11 = ymm0[0,0,2,3,4,4,6,7]
724 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm10 = ymm11[0,1,2],ymm10[3],ymm11[4,5,6],ymm10[7]
725 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm10 = ymm10[0,1,0,3]
726 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1,2,3,4],ymm10[5,6,7]
727 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm10 = ymm5[3,3,3,3,7,7,7,7]
728 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0],ymm7[1],ymm10[2,3,4],ymm7[5],ymm10[6,7]
729 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm10 = ymm10[0,2,0,3]
730 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm9 = xmm9[3,3,3,3]
731 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm11 = ymm4[0,1,3,3,4,5,7,7]
732 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm9 = ymm9[0],ymm11[1,2,3],ymm9[4],ymm11[5,6,7]
733 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm9 = ymm9[0,3,2,3]
734 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm9 = ymm9[0,1,2],ymm10[3,4,5,6,7]
735 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm10 = ymm1[0,1,0,1,4,5,4,5]
736 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm11 = ymm0[0,1,3,3,4,5,7,7]
737 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm10 = ymm11[0,1,2],ymm10[3],ymm11[4,5,6],ymm10[7]
738 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm10 = ymm10[0,1,0,3]
739 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm9 = ymm9[0,1,2,3,4],ymm10[5,6,7]
740 ; AVX2-FAST-NEXT: vmovaps 80(%rdi), %xmm10
741 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm5 = ymm7[0,1,2,3],ymm5[4,5,6,7]
742 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm7 = ymm5[0,1],ymm10[2,3],ymm5[4,5,6,7]
743 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm7 = ymm7[0,1,0,2,4,5,4,6]
744 ; AVX2-FAST-NEXT: vmovddup {{.*#+}} xmm11 = [4,2,4,2]
745 ; AVX2-FAST-NEXT: # xmm11 = mem[0,0]
746 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
747 ; AVX2-FAST-NEXT: vpermps %ymm3, %ymm11, %ymm4
748 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm7[2,3,4,5,6,7]
749 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5,6,7]
750 ; AVX2-FAST-NEXT: vbroadcastf128 {{.*#+}} ymm1 = [0,2,0,6,0,2,0,6]
751 ; AVX2-FAST-NEXT: # ymm1 = mem[0,1,0,1]
752 ; AVX2-FAST-NEXT: vpermps %ymm0, %ymm1, %ymm1
753 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm4[0,1,2,3,4],ymm1[5,6,7]
754 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm4 = ymm5[1,1,1,1,5,5,5,5]
755 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2],ymm10[3],ymm4[4,5,6,7]
756 ; AVX2-FAST-NEXT: vmovddup {{.*#+}} xmm5 = [5,3,5,3]
757 ; AVX2-FAST-NEXT: # xmm5 = mem[0,0]
758 ; AVX2-FAST-NEXT: vpermps %ymm3, %ymm5, %ymm3
759 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm4[2,3,4,5,6,7]
760 ; AVX2-FAST-NEXT: vbroadcastf128 {{.*#+}} ymm4 = [0,3,1,7,0,3,1,7]
761 ; AVX2-FAST-NEXT: # ymm4 = mem[0,1,0,1]
762 ; AVX2-FAST-NEXT: vpermps %ymm0, %ymm4, %ymm0
763 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm3[0,1,2,3,4],ymm0[5,6,7]
764 ; AVX2-FAST-NEXT: vmovaps %ymm2, (%rsi)
765 ; AVX2-FAST-NEXT: vmovaps %ymm6, (%rdx)
766 ; AVX2-FAST-NEXT: vmovaps %ymm8, (%rcx)
767 ; AVX2-FAST-NEXT: vmovaps %ymm9, (%r8)
768 ; AVX2-FAST-NEXT: vmovaps %ymm1, (%r9)
769 ; AVX2-FAST-NEXT: vmovaps %ymm0, (%rax)
770 ; AVX2-FAST-NEXT: vzeroupper
771 ; AVX2-FAST-NEXT: retq
773 ; AVX2-FAST-PERLANE-LABEL: load_i32_stride6_vf8:
774 ; AVX2-FAST-PERLANE: # %bb.0:
775 ; AVX2-FAST-PERLANE-NEXT: movq {{[0-9]+}}(%rsp), %rax
776 ; AVX2-FAST-PERLANE-NEXT: vmovaps 128(%rdi), %ymm0
777 ; AVX2-FAST-PERLANE-NEXT: vmovaps 160(%rdi), %ymm1
778 ; AVX2-FAST-PERLANE-NEXT: vmovaps 96(%rdi), %ymm5
779 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rdi), %ymm3
780 ; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%rdi), %ymm4
781 ; AVX2-FAST-PERLANE-NEXT: vmovaps 64(%rdi), %ymm7
782 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{.*#+}} xmm2 = <0,6,4,u>
783 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm6 = ymm3[0,1,2,3],ymm4[4,5],ymm3[6,7]
784 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm6, %ymm2, %ymm2
785 ; AVX2-FAST-PERLANE-NEXT: vperm2f128 {{.*#+}} ymm8 = ymm7[0,1],ymm5[0,1]
786 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1,2,3,4,5],ymm5[6,7]
787 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm9 = ymm8[0,2,2,2,4,6,6,6]
788 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2],ymm9[3,4,5,6,7]
789 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm9 = ymm1[0,1,2,3],ymm0[4,5,6,7]
790 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd {{.*#+}} ymm10 = [4,2,4,2,4,2,4,2]
791 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm9, %ymm10, %ymm10
792 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5],ymm10[6,7]
793 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{.*#+}} xmm10 = <1,7,5,u>
794 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm6, %ymm10, %ymm6
795 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm8 = ymm8[1,3,2,3,5,7,6,7]
796 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2],ymm8[3,4,5,6,7]
797 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd {{.*#+}} ymm8 = [5,3,5,3,5,3,5,3]
798 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm9, %ymm8, %ymm8
799 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3,4,5],ymm8[6,7]
800 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm8 = ymm7[0,1],ymm5[2,3],ymm7[4,5],ymm5[6,7]
801 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm8 = ymm8[2,0,2,3,6,4,6,7]
802 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm8 = ymm8[0,2,0,3]
803 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rdi), %xmm9
804 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm10 = xmm9[2,3,2,3]
805 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm11 = ymm4[0,0,2,3,4,4,6,7]
806 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0],ymm11[1,2,3],ymm10[4],ymm11[5,6,7]
807 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm10 = ymm10[0,3,2,3]
808 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm8 = ymm10[0,1,2],ymm8[3,4,5,6,7]
809 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm10 = ymm1[0,0,0,0,4,4,4,4]
810 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm11 = ymm0[0,0,2,3,4,4,6,7]
811 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm10 = ymm11[0,1,2],ymm10[3],ymm11[4,5,6],ymm10[7]
812 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm10 = ymm10[0,1,0,3]
813 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1,2,3,4],ymm10[5,6,7]
814 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm10 = ymm5[3,3,3,3,7,7,7,7]
815 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0],ymm7[1],ymm10[2,3,4],ymm7[5],ymm10[6,7]
816 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm10 = ymm10[0,2,0,3]
817 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm9 = xmm9[3,3,3,3]
818 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm11 = ymm4[0,1,3,3,4,5,7,7]
819 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm9 = ymm9[0],ymm11[1,2,3],ymm9[4],ymm11[5,6,7]
820 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm9 = ymm9[0,3,2,3]
821 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm9 = ymm9[0,1,2],ymm10[3,4,5,6,7]
822 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm10 = ymm1[0,1,0,1,4,5,4,5]
823 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm11 = ymm0[0,1,3,3,4,5,7,7]
824 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm10 = ymm11[0,1,2],ymm10[3],ymm11[4,5,6],ymm10[7]
825 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm10 = ymm10[0,1,0,3]
826 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm9 = ymm9[0,1,2,3,4],ymm10[5,6,7]
827 ; AVX2-FAST-PERLANE-NEXT: vmovaps 80(%rdi), %xmm10
828 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm5 = ymm7[0,1,2,3],ymm5[4,5,6,7]
829 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm7 = ymm5[0,1],ymm10[2,3],ymm5[4,5,6,7]
830 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm7 = ymm7[0,1,0,2,4,5,4,6]
831 ; AVX2-FAST-PERLANE-NEXT: vmovddup {{.*#+}} xmm11 = [4,2,4,2]
832 ; AVX2-FAST-PERLANE-NEXT: # xmm11 = mem[0,0]
833 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
834 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm3, %ymm11, %ymm4
835 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm7[2,3,4,5,6,7]
836 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5,6,7]
837 ; AVX2-FAST-PERLANE-NEXT: vbroadcastf128 {{.*#+}} ymm1 = [0,2,0,6,0,2,0,6]
838 ; AVX2-FAST-PERLANE-NEXT: # ymm1 = mem[0,1,0,1]
839 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm0, %ymm1, %ymm1
840 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm4[0,1,2,3,4],ymm1[5,6,7]
841 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm4 = ymm5[1,1,1,1,5,5,5,5]
842 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2],ymm10[3],ymm4[4,5,6,7]
843 ; AVX2-FAST-PERLANE-NEXT: vmovddup {{.*#+}} xmm5 = [5,3,5,3]
844 ; AVX2-FAST-PERLANE-NEXT: # xmm5 = mem[0,0]
845 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm3, %ymm5, %ymm3
846 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm4[2,3,4,5,6,7]
847 ; AVX2-FAST-PERLANE-NEXT: vbroadcastf128 {{.*#+}} ymm4 = [0,3,1,7,0,3,1,7]
848 ; AVX2-FAST-PERLANE-NEXT: # ymm4 = mem[0,1,0,1]
849 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm0, %ymm4, %ymm0
850 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm3[0,1,2,3,4],ymm0[5,6,7]
851 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm2, (%rsi)
852 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm6, (%rdx)
853 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm8, (%rcx)
854 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm9, (%r8)
855 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm1, (%r9)
856 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, (%rax)
857 ; AVX2-FAST-PERLANE-NEXT: vzeroupper
858 ; AVX2-FAST-PERLANE-NEXT: retq
860 ; AVX512-LABEL: load_i32_stride6_vf8:
862 ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax
863 ; AVX512-NEXT: vmovdqa64 (%rdi), %zmm2
864 ; AVX512-NEXT: vmovdqa64 64(%rdi), %zmm3
865 ; AVX512-NEXT: vmovdqa 128(%rdi), %ymm0
866 ; AVX512-NEXT: vmovdqa 160(%rdi), %ymm1
867 ; AVX512-NEXT: vpblendd {{.*#+}} ymm4 = ymm1[0,1,2,3],ymm0[4,5,6,7]
868 ; AVX512-NEXT: vmovdqa {{.*#+}} ymm5 = <0,6,12,18,24,30,u,u>
869 ; AVX512-NEXT: vpermi2d %zmm3, %zmm2, %zmm5
870 ; AVX512-NEXT: vmovdqa {{.*#+}} ymm6 = [0,1,2,3,4,5,12,10]
871 ; AVX512-NEXT: vpermi2d %ymm4, %ymm5, %ymm6
872 ; AVX512-NEXT: vmovdqa {{.*#+}} ymm5 = <1,7,13,19,25,31,u,u>
873 ; AVX512-NEXT: vpermi2d %zmm3, %zmm2, %zmm5
874 ; AVX512-NEXT: vmovdqa {{.*#+}} ymm7 = [0,1,2,3,4,5,13,11]
875 ; AVX512-NEXT: vpermi2d %ymm4, %ymm5, %ymm7
876 ; AVX512-NEXT: vbroadcasti128 {{.*#+}} ymm4 = [0,0,6,12,0,0,6,12]
877 ; AVX512-NEXT: # ymm4 = mem[0,1,0,1]
878 ; AVX512-NEXT: vpermi2d %ymm1, %ymm0, %ymm4
879 ; AVX512-NEXT: vmovdqa {{.*#+}} ymm5 = <2,8,14,20,26,u,u,u>
880 ; AVX512-NEXT: vpermi2d %zmm3, %zmm2, %zmm5
881 ; AVX512-NEXT: vpblendd {{.*#+}} ymm4 = ymm5[0,1,2,3,4],ymm4[5,6,7]
882 ; AVX512-NEXT: vbroadcasti128 {{.*#+}} ymm5 = [0,1,7,13,0,1,7,13]
883 ; AVX512-NEXT: # ymm5 = mem[0,1,0,1]
884 ; AVX512-NEXT: vpermi2d %ymm1, %ymm0, %ymm5
885 ; AVX512-NEXT: vmovdqa {{.*#+}} ymm8 = <3,9,15,21,27,u,u,u>
886 ; AVX512-NEXT: vpermi2d %zmm3, %zmm2, %zmm8
887 ; AVX512-NEXT: vpblendd {{.*#+}} ymm5 = ymm8[0,1,2,3,4],ymm5[5,6,7]
888 ; AVX512-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5,6,7]
889 ; AVX512-NEXT: vmovdqa {{.*#+}} ymm1 = <20,26,0,6,12,u,u,u>
890 ; AVX512-NEXT: vpermi2d %zmm2, %zmm3, %zmm1
891 ; AVX512-NEXT: vmovdqa {{.*#+}} ymm8 = [0,1,2,3,4,10,8,14]
892 ; AVX512-NEXT: vpermi2d %ymm0, %ymm1, %ymm8
893 ; AVX512-NEXT: vmovdqa {{.*#+}} ymm1 = <21,27,1,7,13,u,u,u>
894 ; AVX512-NEXT: vpermi2d %zmm2, %zmm3, %zmm1
895 ; AVX512-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,2,3,4,11,9,15]
896 ; AVX512-NEXT: vpermi2d %ymm0, %ymm1, %ymm2
897 ; AVX512-NEXT: vmovdqa %ymm6, (%rsi)
898 ; AVX512-NEXT: vmovdqa %ymm7, (%rdx)
899 ; AVX512-NEXT: vmovdqa %ymm4, (%rcx)
900 ; AVX512-NEXT: vmovdqa %ymm5, (%r8)
901 ; AVX512-NEXT: vmovdqa %ymm8, (%r9)
902 ; AVX512-NEXT: vmovdqa %ymm2, (%rax)
903 ; AVX512-NEXT: vzeroupper
905 %wide.vec = load <48 x i32>, ptr %in.vec, align 64
906 %strided.vec0 = shufflevector <48 x i32> %wide.vec, <48 x i32> poison, <8 x i32> <i32 0, i32 6, i32 12, i32 18, i32 24, i32 30, i32 36, i32 42>
907 %strided.vec1 = shufflevector <48 x i32> %wide.vec, <48 x i32> poison, <8 x i32> <i32 1, i32 7, i32 13, i32 19, i32 25, i32 31, i32 37, i32 43>
908 %strided.vec2 = shufflevector <48 x i32> %wide.vec, <48 x i32> poison, <8 x i32> <i32 2, i32 8, i32 14, i32 20, i32 26, i32 32, i32 38, i32 44>
909 %strided.vec3 = shufflevector <48 x i32> %wide.vec, <48 x i32> poison, <8 x i32> <i32 3, i32 9, i32 15, i32 21, i32 27, i32 33, i32 39, i32 45>
910 %strided.vec4 = shufflevector <48 x i32> %wide.vec, <48 x i32> poison, <8 x i32> <i32 4, i32 10, i32 16, i32 22, i32 28, i32 34, i32 40, i32 46>
911 %strided.vec5 = shufflevector <48 x i32> %wide.vec, <48 x i32> poison, <8 x i32> <i32 5, i32 11, i32 17, i32 23, i32 29, i32 35, i32 41, i32 47>
912 store <8 x i32> %strided.vec0, ptr %out.vec0, align 64
913 store <8 x i32> %strided.vec1, ptr %out.vec1, align 64
914 store <8 x i32> %strided.vec2, ptr %out.vec2, align 64
915 store <8 x i32> %strided.vec3, ptr %out.vec3, align 64
916 store <8 x i32> %strided.vec4, ptr %out.vec4, align 64
917 store <8 x i32> %strided.vec5, ptr %out.vec5, align 64
921 define void @load_i32_stride6_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2, ptr %out.vec3, ptr %out.vec4, ptr %out.vec5) nounwind {
922 ; SSE-LABEL: load_i32_stride6_vf16:
924 ; SSE-NEXT: subq $408, %rsp # imm = 0x198
925 ; SSE-NEXT: movdqa 240(%rdi), %xmm9
926 ; SSE-NEXT: movdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
927 ; SSE-NEXT: movdqa 256(%rdi), %xmm3
928 ; SSE-NEXT: movdqa 192(%rdi), %xmm10
929 ; SSE-NEXT: movdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
930 ; SSE-NEXT: movdqa 208(%rdi), %xmm4
931 ; SSE-NEXT: movdqa 336(%rdi), %xmm14
932 ; SSE-NEXT: movdqa %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
933 ; SSE-NEXT: movdqa 352(%rdi), %xmm5
934 ; SSE-NEXT: movdqa 288(%rdi), %xmm15
935 ; SSE-NEXT: movdqa 304(%rdi), %xmm7
936 ; SSE-NEXT: movdqa 64(%rdi), %xmm12
937 ; SSE-NEXT: movdqa (%rdi), %xmm8
938 ; SSE-NEXT: movdqa 16(%rdi), %xmm1
939 ; SSE-NEXT: movdqa 48(%rdi), %xmm13
940 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
941 ; SSE-NEXT: movdqa %xmm1, %xmm11
942 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
943 ; SSE-NEXT: movdqa %xmm8, %xmm1
944 ; SSE-NEXT: movdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
945 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
946 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm12[2,2,3,3]
947 ; SSE-NEXT: movdqa %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
948 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm13[0,0,1,1]
949 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
950 ; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm1[0],xmm2[1]
951 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
952 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm7[2,3,2,3]
953 ; SSE-NEXT: movdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
954 ; SSE-NEXT: movdqa %xmm15, %xmm1
955 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
956 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm5[2,2,3,3]
957 ; SSE-NEXT: movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
958 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm14[0,0,1,1]
959 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
960 ; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm1[0],xmm2[1]
961 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
962 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm4[2,3,2,3]
963 ; SSE-NEXT: movdqa %xmm4, %xmm14
964 ; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
965 ; SSE-NEXT: movdqa %xmm10, %xmm1
966 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
967 ; SSE-NEXT: movdqa %xmm3, %xmm2
968 ; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
969 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm3[2,2,3,3]
970 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm9[0,0,1,1]
971 ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1]
972 ; SSE-NEXT: movsd {{.*#+}} xmm3 = xmm1[0],xmm3[1]
973 ; SSE-NEXT: movapd %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
974 ; SSE-NEXT: movdqa 96(%rdi), %xmm4
975 ; SSE-NEXT: movdqa 112(%rdi), %xmm1
976 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
977 ; SSE-NEXT: movdqa %xmm1, %xmm10
978 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
979 ; SSE-NEXT: movdqa %xmm4, %xmm1
980 ; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
981 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
982 ; SSE-NEXT: movdqa 144(%rdi), %xmm9
983 ; SSE-NEXT: movdqa 160(%rdi), %xmm3
984 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm3[2,2,3,3]
985 ; SSE-NEXT: movdqa %xmm3, %xmm6
986 ; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
987 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm9[0,0,1,1]
988 ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1]
989 ; SSE-NEXT: movsd {{.*#+}} xmm3 = xmm1[0],xmm3[1]
990 ; SSE-NEXT: movapd %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
991 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm8[1,1,1,1]
992 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm11[3,3,3,3]
993 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
994 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm12[2,3,2,3]
995 ; SSE-NEXT: movdqa %xmm13, %xmm3
996 ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
997 ; SSE-NEXT: movsd {{.*#+}} xmm3 = xmm0[0],xmm3[1]
998 ; SSE-NEXT: movapd %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
999 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm15[1,1,1,1]
1000 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm7[3,3,3,3]
1001 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
1002 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm5[2,3,2,3]
1003 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
1004 ; SSE-NEXT: movdqa %xmm5, %xmm3
1005 ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
1006 ; SSE-NEXT: movsd {{.*#+}} xmm3 = xmm0[0],xmm3[1]
1007 ; SSE-NEXT: movapd %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1008 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
1009 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm7[1,1,1,1]
1010 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm14[3,3,3,3]
1011 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
1012 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm2[2,3,2,3]
1013 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
1014 ; SSE-NEXT: movdqa %xmm3, %xmm2
1015 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
1016 ; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm0[0],xmm2[1]
1017 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1018 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm4[1,1,1,1]
1019 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm10[3,3,3,3]
1020 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
1021 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm6[2,3,2,3]
1022 ; SSE-NEXT: movdqa %xmm9, %xmm14
1023 ; SSE-NEXT: movdqa %xmm9, %xmm2
1024 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
1025 ; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm0[0],xmm2[1]
1026 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1027 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm13[2,2,3,3]
1028 ; SSE-NEXT: movdqa 80(%rdi), %xmm2
1029 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,0,1,1]
1030 ; SSE-NEXT: movdqa %xmm2, %xmm10
1031 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
1032 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
1033 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm12[2,3,2,3]
1034 ; SSE-NEXT: movdqa 32(%rdi), %xmm6
1035 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1]
1036 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
1037 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1038 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm5[2,2,3,3]
1039 ; SSE-NEXT: movdqa 368(%rdi), %xmm0
1040 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1041 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
1042 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
1043 ; SSE-NEXT: movdqa %xmm15, %xmm9
1044 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm15[2,3,2,3]
1045 ; SSE-NEXT: movdqa 320(%rdi), %xmm8
1046 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm8[0],xmm0[1],xmm8[1]
1047 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
1048 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1049 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm3[2,2,3,3]
1050 ; SSE-NEXT: movdqa 272(%rdi), %xmm15
1051 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm15[0,0,1,1]
1052 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
1053 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm7[2,3,2,3]
1054 ; SSE-NEXT: movdqa 224(%rdi), %xmm3
1055 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
1056 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
1057 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1058 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm14[2,2,3,3]
1059 ; SSE-NEXT: movdqa 176(%rdi), %xmm11
1060 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm11[0,0,1,1]
1061 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
1062 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
1063 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm5[2,3,2,3]
1064 ; SSE-NEXT: movdqa 128(%rdi), %xmm4
1065 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
1066 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
1067 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1068 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm12[3,3,3,3]
1069 ; SSE-NEXT: movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1070 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm6[1,1,1,1]
1071 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
1072 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm13[2,3,2,3]
1073 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1074 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
1075 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
1076 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1077 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm9[3,3,3,3]
1078 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm8[1,1,1,1]
1079 ; SSE-NEXT: movdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1080 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
1081 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
1082 ; SSE-NEXT: # xmm1 = mem[2,3,2,3]
1083 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
1084 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
1085 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
1086 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1087 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm7[3,3,3,3]
1088 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm3[1,1,1,1]
1089 ; SSE-NEXT: movdqa %xmm3, %xmm9
1090 ; SSE-NEXT: movdqa %xmm3, (%rsp) # 16-byte Spill
1091 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
1092 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Folded Reload
1093 ; SSE-NEXT: # xmm12 = mem[2,3,2,3]
1094 ; SSE-NEXT: movdqa %xmm15, %xmm3
1095 ; SSE-NEXT: movdqa %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1096 ; SSE-NEXT: punpckldq {{.*#+}} xmm12 = xmm12[0],xmm15[0],xmm12[1],xmm15[1]
1097 ; SSE-NEXT: movsd {{.*#+}} xmm12 = xmm0[0],xmm12[1]
1098 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm5[3,3,3,3]
1099 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm4[1,1,1,1]
1100 ; SSE-NEXT: movdqa %xmm4, %xmm7
1101 ; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1102 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
1103 ; SSE-NEXT: pshufd {{.*#+}} xmm15 = xmm14[2,3,2,3]
1104 ; SSE-NEXT: punpckldq {{.*#+}} xmm15 = xmm15[0],xmm11[0],xmm15[1],xmm11[1]
1105 ; SSE-NEXT: movdqa %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1106 ; SSE-NEXT: movsd {{.*#+}} xmm15 = xmm0[0],xmm15[1]
1107 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm6[2,3,2,3]
1108 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
1109 ; SSE-NEXT: movdqa %xmm6, %xmm1
1110 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
1111 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm10[2,2,3,3]
1112 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
1113 ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm10[0,0,1,1]
1114 ; SSE-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1]
1115 ; SSE-NEXT: movsd {{.*#+}} xmm5 = xmm1[0],xmm5[1]
1116 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm8[2,3,2,3]
1117 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
1118 ; SSE-NEXT: movdqa %xmm14, %xmm1
1119 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
1120 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,2,3,3]
1121 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
1122 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm8[0,0,1,1]
1123 ; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1]
1124 ; SSE-NEXT: movsd {{.*#+}} xmm4 = xmm1[0],xmm4[1]
1125 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm9[2,3,2,3]
1126 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1127 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
1128 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm3[2,2,3,3]
1129 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
1130 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm9[0,0,1,1]
1131 ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
1132 ; SSE-NEXT: movsd {{.*#+}} xmm3 = xmm0[0],xmm3[1]
1133 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm7[2,3,2,3]
1134 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
1135 ; SSE-NEXT: movdqa %xmm7, %xmm1
1136 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
1137 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm11[2,2,3,3]
1138 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
1139 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm13[0,0,1,1]
1140 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
1141 ; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm1[0],xmm2[1]
1142 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm6[1,1,1,1]
1143 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
1144 ; SSE-NEXT: # xmm1 = mem[3,3,3,3]
1145 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
1146 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
1147 ; SSE-NEXT: # xmm1 = mem[2,3,2,3]
1148 ; SSE-NEXT: punpckldq {{.*#+}} xmm10 = xmm10[0],xmm1[0],xmm10[1],xmm1[1]
1149 ; SSE-NEXT: movsd {{.*#+}} xmm10 = xmm0[0],xmm10[1]
1150 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm14[1,1,1,1]
1151 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
1152 ; SSE-NEXT: # xmm1 = mem[3,3,3,3]
1153 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
1154 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
1155 ; SSE-NEXT: # xmm1 = mem[2,3,2,3]
1156 ; SSE-NEXT: punpckldq {{.*#+}} xmm8 = xmm8[0],xmm1[0],xmm8[1],xmm1[1]
1157 ; SSE-NEXT: movsd {{.*#+}} xmm8 = xmm0[0],xmm8[1]
1158 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
1159 ; SSE-NEXT: # xmm0 = mem[1,1,1,1]
1160 ; SSE-NEXT: pshufd $255, (%rsp), %xmm1 # 16-byte Folded Reload
1161 ; SSE-NEXT: # xmm1 = mem[3,3,3,3]
1162 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
1163 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
1164 ; SSE-NEXT: # xmm1 = mem[2,3,2,3]
1165 ; SSE-NEXT: punpckldq {{.*#+}} xmm9 = xmm9[0],xmm1[0],xmm9[1],xmm1[1]
1166 ; SSE-NEXT: movsd {{.*#+}} xmm9 = xmm0[0],xmm9[1]
1167 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm7[1,1,1,1]
1168 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
1169 ; SSE-NEXT: # xmm1 = mem[3,3,3,3]
1170 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
1171 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
1172 ; SSE-NEXT: # xmm1 = mem[2,3,2,3]
1173 ; SSE-NEXT: punpckldq {{.*#+}} xmm13 = xmm13[0],xmm1[0],xmm13[1],xmm1[1]
1174 ; SSE-NEXT: movsd {{.*#+}} xmm13 = xmm0[0],xmm13[1]
1175 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1176 ; SSE-NEXT: movaps %xmm0, 16(%rsi)
1177 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1178 ; SSE-NEXT: movaps %xmm0, 32(%rsi)
1179 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1180 ; SSE-NEXT: movaps %xmm0, 48(%rsi)
1181 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1182 ; SSE-NEXT: movaps %xmm0, (%rsi)
1183 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1184 ; SSE-NEXT: movaps %xmm0, 16(%rdx)
1185 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1186 ; SSE-NEXT: movaps %xmm0, 32(%rdx)
1187 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1188 ; SSE-NEXT: movaps %xmm0, 48(%rdx)
1189 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1190 ; SSE-NEXT: movaps %xmm0, (%rdx)
1191 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1192 ; SSE-NEXT: movaps %xmm0, 16(%rcx)
1193 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1194 ; SSE-NEXT: movaps %xmm0, 32(%rcx)
1195 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1196 ; SSE-NEXT: movaps %xmm0, 48(%rcx)
1197 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1198 ; SSE-NEXT: movaps %xmm0, (%rcx)
1199 ; SSE-NEXT: movapd %xmm15, 16(%r8)
1200 ; SSE-NEXT: movapd %xmm12, 32(%r8)
1201 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1202 ; SSE-NEXT: movaps %xmm0, 48(%r8)
1203 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1204 ; SSE-NEXT: movaps %xmm0, (%r8)
1205 ; SSE-NEXT: movapd %xmm2, 16(%r9)
1206 ; SSE-NEXT: movapd %xmm3, 32(%r9)
1207 ; SSE-NEXT: movapd %xmm4, 48(%r9)
1208 ; SSE-NEXT: movapd %xmm5, (%r9)
1209 ; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax
1210 ; SSE-NEXT: movapd %xmm13, 16(%rax)
1211 ; SSE-NEXT: movapd %xmm9, 32(%rax)
1212 ; SSE-NEXT: movapd %xmm8, 48(%rax)
1213 ; SSE-NEXT: movapd %xmm10, (%rax)
1214 ; SSE-NEXT: addq $408, %rsp # imm = 0x198
1217 ; AVX1-ONLY-LABEL: load_i32_stride6_vf16:
1218 ; AVX1-ONLY: # %bb.0:
1219 ; AVX1-ONLY-NEXT: subq $328, %rsp # imm = 0x148
1220 ; AVX1-ONLY-NEXT: vmovaps 224(%rdi), %ymm12
1221 ; AVX1-ONLY-NEXT: vmovups %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1222 ; AVX1-ONLY-NEXT: vmovaps 288(%rdi), %ymm10
1223 ; AVX1-ONLY-NEXT: vmovaps 256(%rdi), %ymm4
1224 ; AVX1-ONLY-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1225 ; AVX1-ONLY-NEXT: vmovapd 160(%rdi), %ymm1
1226 ; AVX1-ONLY-NEXT: vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1227 ; AVX1-ONLY-NEXT: vmovapd 128(%rdi), %ymm13
1228 ; AVX1-ONLY-NEXT: vmovaps 32(%rdi), %ymm6
1229 ; AVX1-ONLY-NEXT: vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1230 ; AVX1-ONLY-NEXT: vmovaps (%rdi), %ymm14
1231 ; AVX1-ONLY-NEXT: vmovaps 96(%rdi), %ymm3
1232 ; AVX1-ONLY-NEXT: vmovaps 64(%rdi), %ymm0
1233 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1234 ; AVX1-ONLY-NEXT: vinsertf128 $1, 96(%rdi), %ymm0, %ymm2
1235 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm5 = ymm3[2,0],ymm2[0,0],ymm3[6,4],ymm2[4,4]
1236 ; AVX1-ONLY-NEXT: vmovaps %ymm3, %ymm9
1237 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm5 = ymm5[2,0],ymm2[2,2],ymm5[6,4],ymm2[6,6]
1238 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm6 = ymm14[0,1,2,3],ymm6[4,5],ymm14[6,7]
1239 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm6, %xmm7
1240 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm11 = xmm6[0,1],xmm7[2,3]
1241 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm11 = xmm11[0,2],xmm7[0,3]
1242 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm5 = ymm11[0,1,2],ymm5[3,4,5,6,7]
1243 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm8 = ymm13[2,3],ymm1[0,1]
1244 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm11 = ymm8[0],ymm13[0],ymm8[3],ymm13[2]
1245 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm11 = ymm11[0,1,2,0,4,5,6,4]
1246 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm5[0,1,2,3,4,5],ymm11[6,7]
1247 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1248 ; AVX1-ONLY-NEXT: vinsertf128 $1, 288(%rdi), %ymm4, %ymm1
1249 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm5 = ymm10[2,0],ymm1[0,0],ymm10[6,4],ymm1[4,4]
1250 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm5 = ymm5[2,0],ymm1[2,2],ymm5[6,4],ymm1[6,6]
1251 ; AVX1-ONLY-NEXT: vmovaps 192(%rdi), %ymm15
1252 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm3 = ymm15[0,1,2,3],ymm12[4,5],ymm15[6,7]
1253 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm3, %xmm0
1254 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm11 = xmm3[0,1],xmm0[2,3]
1255 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm11 = xmm11[0,2],xmm0[0,3]
1256 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm5 = ymm11[0,1,2],ymm5[3,4,5,6,7]
1257 ; AVX1-ONLY-NEXT: vmovapd 352(%rdi), %ymm4
1258 ; AVX1-ONLY-NEXT: vmovupd %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1259 ; AVX1-ONLY-NEXT: vmovapd 320(%rdi), %ymm12
1260 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm12[2,3],ymm4[0,1]
1261 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm11 = ymm4[0],ymm12[0],ymm4[3],ymm12[2]
1262 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm11 = ymm11[0,1,2,0,4,5,6,4]
1263 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3,4,5],ymm11[6,7]
1264 ; AVX1-ONLY-NEXT: vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1265 ; AVX1-ONLY-NEXT: vmovups %ymm9, (%rsp) # 32-byte Spill
1266 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm11 = ymm9[3,0],ymm2[1,0],ymm9[7,4],ymm2[5,4]
1267 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm2 = ymm11[2,0],ymm2[2,3],ymm11[6,4],ymm2[6,7]
1268 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm6 = xmm6[1,0],xmm7[3,0]
1269 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm6 = xmm6[0,2],xmm7[1,3]
1270 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm6[0,1,2],ymm2[3,4,5,6,7]
1271 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm6 = ymm8[3,1],ymm13[1,3],ymm8[7,5],ymm13[5,7]
1272 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm6 = ymm6[0,1,2,0,4,5,6,4]
1273 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5],ymm6[6,7]
1274 ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1275 ; AVX1-ONLY-NEXT: vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1276 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm2 = ymm10[3,0],ymm1[1,0],ymm10[7,4],ymm1[5,4]
1277 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm2[2,0],ymm1[2,3],ymm2[6,4],ymm1[6,7]
1278 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm2 = xmm3[1,0],xmm0[3,0]
1279 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm2[0,2],xmm0[1,3]
1280 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
1281 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm4[3,1],ymm12[1,3],ymm4[7,5],ymm12[5,7]
1282 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,1,2,0,4,5,6,4]
1283 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
1284 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1285 ; AVX1-ONLY-NEXT: vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm2 # 32-byte Folded Reload
1286 ; AVX1-ONLY-NEXT: # ymm2 = mem[0,1],ymm14[2,3],mem[4,5,6,7]
1287 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
1288 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm9[2,1],ymm7[2,0],ymm9[6,5],ymm7[6,4]
1289 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[2,3,0,1]
1290 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm2, %xmm5
1291 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm3 = xmm2[2,0],xmm5[2,3]
1292 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm3[0,1,2],ymm1[3,4,5,6,7]
1293 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
1294 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm4 = ymm13[0,1,2,3],ymm9[4,5],ymm13[6,7]
1295 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm8 = ymm4[2,3,0,1]
1296 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm3 = ymm8[0,0],ymm4[2,0],ymm8[4,4],ymm4[6,4]
1297 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm3[5,6,7]
1298 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1299 ; AVX1-ONLY-NEXT: vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm11 # 32-byte Folded Reload
1300 ; AVX1-ONLY-NEXT: # ymm11 = mem[0,1],ymm15[2,3],mem[4,5,6,7]
1301 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
1302 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm10[2,1],ymm3[2,0],ymm10[6,5],ymm3[6,4]
1303 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[2,3,0,1]
1304 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm11, %xmm15
1305 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm14 = xmm11[2,0],xmm15[2,3]
1306 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm14[0,1,2],ymm1[3,4,5,6,7]
1307 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
1308 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm14 = ymm12[0,1,2,3],ymm10[4,5],ymm12[6,7]
1309 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm14[2,3,0,1]
1310 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm6 = ymm0[0,0],ymm14[2,0],ymm0[4,4],ymm14[6,4]
1311 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm6[5,6,7]
1312 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1313 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm4 = ymm8[0,1],ymm4[3,1],ymm8[4,5],ymm4[7,5]
1314 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm2 = xmm2[3,1],xmm5[3,3]
1315 ; AVX1-ONLY-NEXT: vmovaps %ymm7, %ymm1
1316 ; AVX1-ONLY-NEXT: vmovups (%rsp), %ymm7 # 32-byte Reload
1317 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm5 = ymm7[3,1],ymm1[2,1],ymm7[7,5],ymm1[6,5]
1318 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm5 = ymm5[2,3,0,1]
1319 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2],ymm5[3,4,5,6,7]
1320 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm4[5,6,7]
1321 ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1322 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,1],ymm14[3,1],ymm0[4,5],ymm14[7,5]
1323 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm2 = xmm11[3,1],xmm15[3,3]
1324 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
1325 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm5 = ymm4[3,1],ymm3[2,1],ymm4[7,5],ymm3[6,5]
1326 ; AVX1-ONLY-NEXT: vmovaps %ymm3, %ymm15
1327 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm5 = ymm5[2,3,0,1]
1328 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2],ymm5[3,4,5,6,7]
1329 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3,4],ymm0[5,6,7]
1330 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1331 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm9 = ymm9[0,1],ymm13[2,3],ymm9[4,5,6,7]
1332 ; AVX1-ONLY-NEXT: vmovaps 32(%rdi), %xmm3
1333 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm2 = xmm3[2,2,3,3]
1334 ; AVX1-ONLY-NEXT: vmovaps 16(%rdi), %xmm5
1335 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm2 = xmm5[0],xmm2[1],xmm5[2,3]
1336 ; AVX1-ONLY-NEXT: vmovapd 80(%rdi), %xmm6
1337 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm8 = ymm6[1],ymm1[0],ymm6[2],ymm1[2]
1338 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm8 = ymm7[0,1],ymm8[2,0],ymm7[4,5],ymm8[6,4]
1339 ; AVX1-ONLY-NEXT: vmovaps %ymm7, %ymm13
1340 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm8[2,3,4,5,6,7]
1341 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm8 = ymm9[2,3,0,1]
1342 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm11 = ymm9[2,0],ymm8[0,0],ymm9[6,4],ymm8[4,4]
1343 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm11 = ymm8[0,2],ymm11[2,0],ymm8[4,6],ymm11[6,4]
1344 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm11[5,6,7]
1345 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm11 = ymm10[0,1],ymm12[2,3],ymm10[4,5,6,7]
1346 ; AVX1-ONLY-NEXT: vmovaps 224(%rdi), %xmm12
1347 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm14 = xmm12[2,2,3,3]
1348 ; AVX1-ONLY-NEXT: vmovaps 208(%rdi), %xmm0
1349 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm14 = xmm0[0],xmm14[1],xmm0[2,3]
1350 ; AVX1-ONLY-NEXT: vmovapd 272(%rdi), %xmm1
1351 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm7 = ymm1[1],ymm15[0],ymm1[2],ymm15[2]
1352 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm7 = ymm4[0,1],ymm7[2,0],ymm4[4,5],ymm7[6,4]
1353 ; AVX1-ONLY-NEXT: vmovaps %ymm4, %ymm10
1354 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm7 = ymm14[0,1],ymm7[2,3,4,5,6,7]
1355 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm14 = ymm11[2,3,0,1]
1356 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm4 = ymm11[2,0],ymm14[0,0],ymm11[6,4],ymm14[4,4]
1357 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm4 = ymm14[0,2],ymm4[2,0],ymm14[4,6],ymm4[6,4]
1358 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm4 = ymm7[0,1,2,3,4],ymm4[5,6,7]
1359 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm7 = ymm9[3,0],ymm8[1,0],ymm9[7,4],ymm8[5,4]
1360 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm7 = ymm8[0,3],ymm7[2,0],ymm8[4,7],ymm7[6,4]
1361 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm3 = xmm5[0,1],xmm3[2,3]
1362 ; AVX1-ONLY-NEXT: vshufps $215, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm5 # 32-byte Folded Reload
1363 ; AVX1-ONLY-NEXT: # ymm5 = ymm6[3,1],mem[1,3],ymm6[7,5],mem[5,7]
1364 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm5 = ymm13[1,1],ymm5[2,0],ymm13[5,5],ymm5[6,4]
1365 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm3 = xmm3[1,3,2,3]
1366 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm5[2,3,4,5,6,7]
1367 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3,4],ymm7[5,6,7]
1368 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm5 = ymm11[3,0],ymm14[1,0],ymm11[7,4],ymm14[5,4]
1369 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm5 = ymm14[0,3],ymm5[2,0],ymm14[4,7],ymm5[6,4]
1370 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm12[2,3]
1371 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[3,1],ymm15[1,3],ymm1[7,5],ymm15[5,7]
1372 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm10[1,1],ymm1[2,0],ymm10[5,5],ymm1[6,4]
1373 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,3,2,3]
1374 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5,6,7]
1375 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm5[5,6,7]
1376 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
1377 ; AVX1-ONLY-NEXT: vmovaps %ymm1, 32(%rsi)
1378 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
1379 ; AVX1-ONLY-NEXT: vmovaps %ymm1, (%rsi)
1380 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
1381 ; AVX1-ONLY-NEXT: vmovaps %ymm1, 32(%rdx)
1382 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
1383 ; AVX1-ONLY-NEXT: vmovaps %ymm1, (%rdx)
1384 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
1385 ; AVX1-ONLY-NEXT: vmovaps %ymm1, 32(%rcx)
1386 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
1387 ; AVX1-ONLY-NEXT: vmovaps %ymm1, (%rcx)
1388 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
1389 ; AVX1-ONLY-NEXT: vmovaps %ymm1, 32(%r8)
1390 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
1391 ; AVX1-ONLY-NEXT: vmovaps %ymm1, (%r8)
1392 ; AVX1-ONLY-NEXT: vmovaps %ymm4, 32(%r9)
1393 ; AVX1-ONLY-NEXT: vmovaps %ymm2, (%r9)
1394 ; AVX1-ONLY-NEXT: movq {{[0-9]+}}(%rsp), %rax
1395 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 32(%rax)
1396 ; AVX1-ONLY-NEXT: vmovaps %ymm3, (%rax)
1397 ; AVX1-ONLY-NEXT: addq $328, %rsp # imm = 0x148
1398 ; AVX1-ONLY-NEXT: vzeroupper
1399 ; AVX1-ONLY-NEXT: retq
1401 ; AVX2-SLOW-LABEL: load_i32_stride6_vf16:
1402 ; AVX2-SLOW: # %bb.0:
1403 ; AVX2-SLOW-NEXT: subq $232, %rsp
1404 ; AVX2-SLOW-NEXT: vmovaps 288(%rdi), %ymm9
1405 ; AVX2-SLOW-NEXT: vmovaps 224(%rdi), %ymm11
1406 ; AVX2-SLOW-NEXT: vmovaps 192(%rdi), %ymm3
1407 ; AVX2-SLOW-NEXT: vmovups %ymm3, (%rsp) # 32-byte Spill
1408 ; AVX2-SLOW-NEXT: vmovaps 128(%rdi), %ymm0
1409 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1410 ; AVX2-SLOW-NEXT: vmovaps 160(%rdi), %ymm2
1411 ; AVX2-SLOW-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1412 ; AVX2-SLOW-NEXT: vmovaps 96(%rdi), %ymm7
1413 ; AVX2-SLOW-NEXT: vmovaps (%rdi), %ymm1
1414 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1415 ; AVX2-SLOW-NEXT: vmovaps 32(%rdi), %ymm12
1416 ; AVX2-SLOW-NEXT: vmovaps 64(%rdi), %ymm10
1417 ; AVX2-SLOW-NEXT: vmovaps {{.*#+}} xmm14 = <0,6,4,u>
1418 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm5 = ymm1[0,1,2,3],ymm12[4,5],ymm1[6,7]
1419 ; AVX2-SLOW-NEXT: vpermps %ymm5, %ymm14, %ymm1
1420 ; AVX2-SLOW-NEXT: vperm2f128 {{.*#+}} ymm8 = ymm10[0,1],ymm7[0,1]
1421 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1,2,3,4,5],ymm7[6,7]
1422 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm13 = ymm8[0,2,2,2,4,6,6,6]
1423 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2],ymm13[3,4,5,6,7]
1424 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm13 = ymm2[0,1,2,3],ymm0[4,5,6,7]
1425 ; AVX2-SLOW-NEXT: vbroadcastsd {{.*#+}} ymm0 = [4,2,4,2,4,2,4,2]
1426 ; AVX2-SLOW-NEXT: vpermps %ymm13, %ymm0, %ymm15
1427 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm15[6,7]
1428 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1429 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm4 = ymm3[0,1,2,3],ymm11[4,5],ymm3[6,7]
1430 ; AVX2-SLOW-NEXT: vpermps %ymm4, %ymm14, %ymm14
1431 ; AVX2-SLOW-NEXT: vmovaps 256(%rdi), %ymm6
1432 ; AVX2-SLOW-NEXT: vperm2f128 {{.*#+}} ymm15 = ymm6[0,1],ymm9[0,1]
1433 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm15[0,1,2,3,4,5],ymm9[6,7]
1434 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm15 = ymm2[0,2,2,2,4,6,6,6]
1435 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm3 = ymm14[0,1,2],ymm15[3,4,5,6,7]
1436 ; AVX2-SLOW-NEXT: vmovaps 320(%rdi), %ymm14
1437 ; AVX2-SLOW-NEXT: vmovaps 352(%rdi), %ymm15
1438 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm15[0,1,2,3],ymm14[4,5,6,7]
1439 ; AVX2-SLOW-NEXT: vpermps %ymm1, %ymm0, %ymm0
1440 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm3[0,1,2,3,4,5],ymm0[6,7]
1441 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1442 ; AVX2-SLOW-NEXT: vmovaps {{.*#+}} xmm0 = <1,7,5,u>
1443 ; AVX2-SLOW-NEXT: vpermps %ymm5, %ymm0, %ymm3
1444 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm5 = ymm8[1,3,2,3,5,7,6,7]
1445 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2],ymm5[3,4,5,6,7]
1446 ; AVX2-SLOW-NEXT: vbroadcastsd {{.*#+}} ymm5 = [5,3,5,3,5,3,5,3]
1447 ; AVX2-SLOW-NEXT: vpermps %ymm13, %ymm5, %ymm8
1448 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5],ymm8[6,7]
1449 ; AVX2-SLOW-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1450 ; AVX2-SLOW-NEXT: vpermps %ymm4, %ymm0, %ymm0
1451 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm2 = ymm2[1,3,2,3,5,7,6,7]
1452 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm2[3,4,5,6,7]
1453 ; AVX2-SLOW-NEXT: vpermps %ymm1, %ymm5, %ymm1
1454 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
1455 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1456 ; AVX2-SLOW-NEXT: vmovaps (%rdi), %xmm3
1457 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm3[2,3,2,3]
1458 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm1 = ymm12[0,0,2,3,4,4,6,7]
1459 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
1460 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm10[0,1],ymm7[2,3],ymm10[4,5],ymm7[6,7]
1461 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm1 = ymm1[2,0,2,3,6,4,6,7]
1462 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,2,0,3]
1463 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
1464 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
1465 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
1466 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm1 = ymm13[0,0,0,0,4,4,4,4]
1467 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
1468 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm2 = ymm8[0,0,2,3,4,4,6,7]
1469 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3],ymm2[4,5,6],ymm1[7]
1470 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
1471 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
1472 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1473 ; AVX2-SLOW-NEXT: vmovaps 192(%rdi), %xmm0
1474 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm0[2,3,2,3]
1475 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm2 = ymm11[0,0,2,3,4,4,6,7]
1476 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2,3],ymm1[4],ymm2[5,6,7]
1477 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm6[0,1],ymm9[2,3],ymm6[4,5],ymm9[6,7]
1478 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm2 = ymm2[2,0,2,3,6,4,6,7]
1479 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,2,0,3]
1480 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,3,2,3]
1481 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4,5,6,7]
1482 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm2 = ymm15[0,0,0,0,4,4,4,4]
1483 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm4 = ymm14[0,0,2,3,4,4,6,7]
1484 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm4[0,1,2],ymm2[3],ymm4[4,5,6],ymm2[7]
1485 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,1,0,3]
1486 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5,6,7]
1487 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1488 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm3[3,3,3,3]
1489 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm2 = ymm12[0,1,3,3,4,5,7,7]
1490 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2,3],ymm1[4],ymm2[5,6,7]
1491 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm2 = ymm7[3,3,3,3,7,7,7,7]
1492 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0],ymm10[1],ymm2[2,3,4],ymm10[5],ymm2[6,7]
1493 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,2,0,3]
1494 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,3,2,3]
1495 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4,5,6,7]
1496 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm2 = ymm13[0,1,0,1,4,5,4,5]
1497 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm3 = ymm8[0,1,3,3,4,5,7,7]
1498 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1,2],ymm2[3],ymm3[4,5,6],ymm2[7]
1499 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,1,0,3]
1500 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5,6,7]
1501 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1502 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
1503 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm1 = ymm11[0,1,3,3,4,5,7,7]
1504 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
1505 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm1 = ymm9[3,3,3,3,7,7,7,7]
1506 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm6[1],ymm1[2,3,4],ymm6[5],ymm1[6,7]
1507 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,2,0,3]
1508 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
1509 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
1510 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm1 = ymm15[0,1,0,1,4,5,4,5]
1511 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm2 = ymm14[0,1,3,3,4,5,7,7]
1512 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3],ymm2[4,5,6],ymm1[7]
1513 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
1514 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm3 = ymm0[0,1,2,3,4],ymm1[5,6,7]
1515 ; AVX2-SLOW-NEXT: vblendps $240, (%rsp), %ymm11, %ymm11 # 32-byte Folded Reload
1516 ; AVX2-SLOW-NEXT: # ymm11 = ymm11[0,1,2,3],mem[4,5,6,7]
1517 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm6[0,1,2,3],ymm9[4,5,6,7]
1518 ; AVX2-SLOW-NEXT: vmovaps 272(%rdi), %xmm2
1519 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm2[2,3],ymm1[4,5,6,7]
1520 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,1,0,2,4,5,4,6]
1521 ; AVX2-SLOW-NEXT: vmovddup {{.*#+}} xmm4 = [4,2,4,2]
1522 ; AVX2-SLOW-NEXT: # xmm4 = mem[0,0]
1523 ; AVX2-SLOW-NEXT: vpermps %ymm11, %ymm4, %ymm5
1524 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm5[0,1],ymm0[2,3,4,5,6,7]
1525 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm5 = ymm15[0,1],ymm14[2,3],ymm15[4,5,6,7]
1526 ; AVX2-SLOW-NEXT: vbroadcastf128 {{.*#+}} ymm6 = [0,2,0,6,0,2,0,6]
1527 ; AVX2-SLOW-NEXT: # ymm6 = mem[0,1,0,1]
1528 ; AVX2-SLOW-NEXT: vpermps %ymm5, %ymm6, %ymm9
1529 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm9[5,6,7]
1530 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm7 = ymm10[0,1,2,3],ymm7[4,5,6,7]
1531 ; AVX2-SLOW-NEXT: vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm9 # 32-byte Folded Reload
1532 ; AVX2-SLOW-NEXT: # ymm9 = ymm12[0,1,2,3],mem[4,5,6,7]
1533 ; AVX2-SLOW-NEXT: vmovaps 80(%rdi), %xmm10
1534 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm12 = ymm7[0,1],ymm10[2,3],ymm7[4,5,6,7]
1535 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm12 = ymm12[0,1,0,2,4,5,4,6]
1536 ; AVX2-SLOW-NEXT: vpermps %ymm9, %ymm4, %ymm4
1537 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm12[2,3,4,5,6,7]
1538 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm12 = ymm13[0,1],ymm8[2,3],ymm13[4,5,6,7]
1539 ; AVX2-SLOW-NEXT: vpermps %ymm12, %ymm6, %ymm6
1540 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3,4],ymm6[5,6,7]
1541 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm1 = ymm1[1,1,1,1,5,5,5,5]
1542 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3],ymm1[4,5,6,7]
1543 ; AVX2-SLOW-NEXT: vmovddup {{.*#+}} xmm2 = [5,3,5,3]
1544 ; AVX2-SLOW-NEXT: # xmm2 = mem[0,0]
1545 ; AVX2-SLOW-NEXT: vpermps %ymm11, %ymm2, %ymm6
1546 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm6[0,1],ymm1[2,3,4,5,6,7]
1547 ; AVX2-SLOW-NEXT: vbroadcastf128 {{.*#+}} ymm6 = [0,3,1,7,0,3,1,7]
1548 ; AVX2-SLOW-NEXT: # ymm6 = mem[0,1,0,1]
1549 ; AVX2-SLOW-NEXT: vpermps %ymm5, %ymm6, %ymm5
1550 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm5[5,6,7]
1551 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm5 = ymm7[1,1,1,1,5,5,5,5]
1552 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2],ymm10[3],ymm5[4,5,6,7]
1553 ; AVX2-SLOW-NEXT: vpermps %ymm9, %ymm2, %ymm2
1554 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm5[2,3,4,5,6,7]
1555 ; AVX2-SLOW-NEXT: vpermps %ymm12, %ymm6, %ymm5
1556 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm5[5,6,7]
1557 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
1558 ; AVX2-SLOW-NEXT: vmovaps %ymm5, 32(%rsi)
1559 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
1560 ; AVX2-SLOW-NEXT: vmovaps %ymm5, (%rsi)
1561 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
1562 ; AVX2-SLOW-NEXT: vmovaps %ymm5, 32(%rdx)
1563 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
1564 ; AVX2-SLOW-NEXT: vmovaps %ymm5, (%rdx)
1565 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
1566 ; AVX2-SLOW-NEXT: vmovaps %ymm5, 32(%rcx)
1567 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
1568 ; AVX2-SLOW-NEXT: vmovaps %ymm5, (%rcx)
1569 ; AVX2-SLOW-NEXT: vmovaps %ymm3, 32(%r8)
1570 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
1571 ; AVX2-SLOW-NEXT: vmovaps %ymm3, (%r8)
1572 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 32(%r9)
1573 ; AVX2-SLOW-NEXT: vmovaps %ymm4, (%r9)
1574 ; AVX2-SLOW-NEXT: movq {{[0-9]+}}(%rsp), %rax
1575 ; AVX2-SLOW-NEXT: vmovaps %ymm1, 32(%rax)
1576 ; AVX2-SLOW-NEXT: vmovaps %ymm2, (%rax)
1577 ; AVX2-SLOW-NEXT: addq $232, %rsp
1578 ; AVX2-SLOW-NEXT: vzeroupper
1579 ; AVX2-SLOW-NEXT: retq
1581 ; AVX2-FAST-LABEL: load_i32_stride6_vf16:
1582 ; AVX2-FAST: # %bb.0:
1583 ; AVX2-FAST-NEXT: subq $200, %rsp
1584 ; AVX2-FAST-NEXT: vmovaps 288(%rdi), %ymm9
1585 ; AVX2-FAST-NEXT: vmovaps 224(%rdi), %ymm11
1586 ; AVX2-FAST-NEXT: vmovaps 192(%rdi), %ymm6
1587 ; AVX2-FAST-NEXT: vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1588 ; AVX2-FAST-NEXT: vmovaps 128(%rdi), %ymm0
1589 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1590 ; AVX2-FAST-NEXT: vmovaps 160(%rdi), %ymm2
1591 ; AVX2-FAST-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1592 ; AVX2-FAST-NEXT: vmovaps 96(%rdi), %ymm7
1593 ; AVX2-FAST-NEXT: vmovaps (%rdi), %ymm3
1594 ; AVX2-FAST-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1595 ; AVX2-FAST-NEXT: vmovaps 32(%rdi), %ymm12
1596 ; AVX2-FAST-NEXT: vmovaps 64(%rdi), %ymm8
1597 ; AVX2-FAST-NEXT: vmovaps {{.*#+}} xmm1 = <0,6,4,u>
1598 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm5 = ymm3[0,1,2,3],ymm12[4,5],ymm3[6,7]
1599 ; AVX2-FAST-NEXT: vpermps %ymm5, %ymm1, %ymm4
1600 ; AVX2-FAST-NEXT: vperm2f128 {{.*#+}} ymm10 = ymm8[0,1],ymm7[0,1]
1601 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0,1,2,3,4,5],ymm7[6,7]
1602 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm13 = ymm10[0,2,2,2,4,6,6,6]
1603 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2],ymm13[3,4,5,6,7]
1604 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm13 = ymm2[0,1,2,3],ymm0[4,5,6,7]
1605 ; AVX2-FAST-NEXT: vbroadcastsd {{.*#+}} ymm0 = [4,2,4,2,4,2,4,2]
1606 ; AVX2-FAST-NEXT: vpermps %ymm13, %ymm0, %ymm14
1607 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm4[0,1,2,3,4,5],ymm14[6,7]
1608 ; AVX2-FAST-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1609 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm4 = ymm6[0,1,2,3],ymm11[4,5],ymm6[6,7]
1610 ; AVX2-FAST-NEXT: vpermps %ymm4, %ymm1, %ymm14
1611 ; AVX2-FAST-NEXT: vmovaps 256(%rdi), %ymm6
1612 ; AVX2-FAST-NEXT: vperm2f128 {{.*#+}} ymm15 = ymm6[0,1],ymm9[0,1]
1613 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm15[0,1,2,3,4,5],ymm9[6,7]
1614 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm15 = ymm2[0,2,2,2,4,6,6,6]
1615 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm3 = ymm14[0,1,2],ymm15[3,4,5,6,7]
1616 ; AVX2-FAST-NEXT: vmovaps 320(%rdi), %ymm14
1617 ; AVX2-FAST-NEXT: vmovaps 352(%rdi), %ymm15
1618 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm15[0,1,2,3],ymm14[4,5,6,7]
1619 ; AVX2-FAST-NEXT: vpermps %ymm1, %ymm0, %ymm0
1620 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm3[0,1,2,3,4,5],ymm0[6,7]
1621 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1622 ; AVX2-FAST-NEXT: vmovaps {{.*#+}} xmm0 = <1,7,5,u>
1623 ; AVX2-FAST-NEXT: vpermps %ymm5, %ymm0, %ymm3
1624 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm5 = ymm10[1,3,2,3,5,7,6,7]
1625 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2],ymm5[3,4,5,6,7]
1626 ; AVX2-FAST-NEXT: vbroadcastsd {{.*#+}} ymm5 = [5,3,5,3,5,3,5,3]
1627 ; AVX2-FAST-NEXT: vpermps %ymm13, %ymm5, %ymm10
1628 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5],ymm10[6,7]
1629 ; AVX2-FAST-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1630 ; AVX2-FAST-NEXT: vpermps %ymm4, %ymm0, %ymm0
1631 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm2 = ymm2[1,3,2,3,5,7,6,7]
1632 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm2[3,4,5,6,7]
1633 ; AVX2-FAST-NEXT: vpermps %ymm1, %ymm5, %ymm1
1634 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
1635 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1636 ; AVX2-FAST-NEXT: vmovaps (%rdi), %xmm0
1637 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm1 = xmm0[2,3,2,3]
1638 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm2 = ymm12[0,0,2,3,4,4,6,7]
1639 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2,3],ymm1[4],ymm2[5,6,7]
1640 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm8[0,1],ymm7[2,3],ymm8[4,5],ymm7[6,7]
1641 ; AVX2-FAST-NEXT: vmovaps {{.*#+}} ymm3 = [2,0,6,4,2,0,6,7]
1642 ; AVX2-FAST-NEXT: vpermps %ymm2, %ymm3, %ymm2
1643 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,3,2,3]
1644 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4,5,6,7]
1645 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
1646 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm2 = ymm10[0,0,0,0,4,4,4,4]
1647 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
1648 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm4 = ymm13[0,0,2,3,4,4,6,7]
1649 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm4[0,1,2],ymm2[3],ymm4[4,5,6],ymm2[7]
1650 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,1,0,3]
1651 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5,6,7]
1652 ; AVX2-FAST-NEXT: vmovups %ymm1, (%rsp) # 32-byte Spill
1653 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm6[0,1],ymm9[2,3],ymm6[4,5],ymm9[6,7]
1654 ; AVX2-FAST-NEXT: vpermps %ymm1, %ymm3, %ymm1
1655 ; AVX2-FAST-NEXT: vmovaps 192(%rdi), %xmm2
1656 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm3 = xmm2[2,3,2,3]
1657 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm4 = ymm11[0,0,2,3,4,4,6,7]
1658 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0],ymm4[1,2,3],ymm3[4],ymm4[5,6,7]
1659 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[0,3,2,3]
1660 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm3[0,1,2],ymm1[3,4,5,6,7]
1661 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm3 = ymm15[0,0,0,0,4,4,4,4]
1662 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm4 = ymm14[0,0,2,3,4,4,6,7]
1663 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0,1,2],ymm3[3],ymm4[4,5,6],ymm3[7]
1664 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[0,1,0,3]
1665 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm3[5,6,7]
1666 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1667 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
1668 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm1 = ymm12[0,1,3,3,4,5,7,7]
1669 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
1670 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm1 = ymm7[3,3,3,3,7,7,7,7]
1671 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm8[1],ymm1[2,3,4],ymm8[5],ymm1[6,7]
1672 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,2,0,3]
1673 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
1674 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
1675 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm1 = ymm10[0,1,0,1,4,5,4,5]
1676 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm3 = ymm13[0,1,3,3,4,5,7,7]
1677 ; AVX2-FAST-NEXT: vmovaps %ymm13, %ymm4
1678 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm3[0,1,2],ymm1[3],ymm3[4,5,6],ymm1[7]
1679 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
1680 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm13 = ymm0[0,1,2,3,4],ymm1[5,6,7]
1681 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm0 = xmm2[3,3,3,3]
1682 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm1 = ymm11[0,1,3,3,4,5,7,7]
1683 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
1684 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm1 = ymm9[3,3,3,3,7,7,7,7]
1685 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm6[1],ymm1[2,3,4],ymm6[5],ymm1[6,7]
1686 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,2,0,3]
1687 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
1688 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
1689 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm1 = ymm15[0,1,0,1,4,5,4,5]
1690 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm2 = ymm14[0,1,3,3,4,5,7,7]
1691 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3],ymm2[4,5,6],ymm1[7]
1692 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
1693 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm3 = ymm0[0,1,2,3,4],ymm1[5,6,7]
1694 ; AVX2-FAST-NEXT: vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm11 # 32-byte Folded Reload
1695 ; AVX2-FAST-NEXT: # ymm11 = ymm11[0,1,2,3],mem[4,5,6,7]
1696 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm6[0,1,2,3],ymm9[4,5,6,7]
1697 ; AVX2-FAST-NEXT: vmovaps 272(%rdi), %xmm2
1698 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm2[2,3],ymm1[4,5,6,7]
1699 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,1,0,2,4,5,4,6]
1700 ; AVX2-FAST-NEXT: vmovddup {{.*#+}} xmm5 = [4,2,4,2]
1701 ; AVX2-FAST-NEXT: # xmm5 = mem[0,0]
1702 ; AVX2-FAST-NEXT: vpermps %ymm11, %ymm5, %ymm6
1703 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm6[0,1],ymm0[2,3,4,5,6,7]
1704 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm6 = ymm15[0,1],ymm14[2,3],ymm15[4,5,6,7]
1705 ; AVX2-FAST-NEXT: vbroadcastf128 {{.*#+}} ymm9 = [0,2,0,6,0,2,0,6]
1706 ; AVX2-FAST-NEXT: # ymm9 = mem[0,1,0,1]
1707 ; AVX2-FAST-NEXT: vpermps %ymm6, %ymm9, %ymm14
1708 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm14[5,6,7]
1709 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm7 = ymm8[0,1,2,3],ymm7[4,5,6,7]
1710 ; AVX2-FAST-NEXT: vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm8 # 32-byte Folded Reload
1711 ; AVX2-FAST-NEXT: # ymm8 = ymm12[0,1,2,3],mem[4,5,6,7]
1712 ; AVX2-FAST-NEXT: vmovaps 80(%rdi), %xmm12
1713 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm14 = ymm7[0,1],ymm12[2,3],ymm7[4,5,6,7]
1714 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm14 = ymm14[0,1,0,2,4,5,4,6]
1715 ; AVX2-FAST-NEXT: vpermps %ymm8, %ymm5, %ymm5
1716 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1],ymm14[2,3,4,5,6,7]
1717 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm14 = ymm10[0,1],ymm4[2,3],ymm10[4,5,6,7]
1718 ; AVX2-FAST-NEXT: vpermps %ymm14, %ymm9, %ymm9
1719 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3,4],ymm9[5,6,7]
1720 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm1 = ymm1[1,1,1,1,5,5,5,5]
1721 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3],ymm1[4,5,6,7]
1722 ; AVX2-FAST-NEXT: vmovddup {{.*#+}} xmm2 = [5,3,5,3]
1723 ; AVX2-FAST-NEXT: # xmm2 = mem[0,0]
1724 ; AVX2-FAST-NEXT: vpermps %ymm11, %ymm2, %ymm9
1725 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm9[0,1],ymm1[2,3,4,5,6,7]
1726 ; AVX2-FAST-NEXT: vbroadcastf128 {{.*#+}} ymm9 = [0,3,1,7,0,3,1,7]
1727 ; AVX2-FAST-NEXT: # ymm9 = mem[0,1,0,1]
1728 ; AVX2-FAST-NEXT: vpermps %ymm6, %ymm9, %ymm6
1729 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm6[5,6,7]
1730 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm6 = ymm7[1,1,1,1,5,5,5,5]
1731 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2],ymm12[3],ymm6[4,5,6,7]
1732 ; AVX2-FAST-NEXT: vpermps %ymm8, %ymm2, %ymm2
1733 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm6[2,3,4,5,6,7]
1734 ; AVX2-FAST-NEXT: vpermps %ymm14, %ymm9, %ymm6
1735 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm6[5,6,7]
1736 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
1737 ; AVX2-FAST-NEXT: vmovaps %ymm6, 32(%rsi)
1738 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
1739 ; AVX2-FAST-NEXT: vmovaps %ymm6, (%rsi)
1740 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
1741 ; AVX2-FAST-NEXT: vmovaps %ymm4, 32(%rdx)
1742 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
1743 ; AVX2-FAST-NEXT: vmovaps %ymm6, (%rdx)
1744 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
1745 ; AVX2-FAST-NEXT: vmovaps %ymm4, 32(%rcx)
1746 ; AVX2-FAST-NEXT: vmovups (%rsp), %ymm4 # 32-byte Reload
1747 ; AVX2-FAST-NEXT: vmovaps %ymm4, (%rcx)
1748 ; AVX2-FAST-NEXT: vmovaps %ymm3, 32(%r8)
1749 ; AVX2-FAST-NEXT: vmovaps %ymm13, (%r8)
1750 ; AVX2-FAST-NEXT: vmovaps %ymm0, 32(%r9)
1751 ; AVX2-FAST-NEXT: vmovaps %ymm5, (%r9)
1752 ; AVX2-FAST-NEXT: movq {{[0-9]+}}(%rsp), %rax
1753 ; AVX2-FAST-NEXT: vmovaps %ymm1, 32(%rax)
1754 ; AVX2-FAST-NEXT: vmovaps %ymm2, (%rax)
1755 ; AVX2-FAST-NEXT: addq $200, %rsp
1756 ; AVX2-FAST-NEXT: vzeroupper
1757 ; AVX2-FAST-NEXT: retq
1759 ; AVX2-FAST-PERLANE-LABEL: load_i32_stride6_vf16:
1760 ; AVX2-FAST-PERLANE: # %bb.0:
1761 ; AVX2-FAST-PERLANE-NEXT: subq $232, %rsp
1762 ; AVX2-FAST-PERLANE-NEXT: vmovaps 288(%rdi), %ymm9
1763 ; AVX2-FAST-PERLANE-NEXT: vmovaps 224(%rdi), %ymm11
1764 ; AVX2-FAST-PERLANE-NEXT: vmovaps 192(%rdi), %ymm3
1765 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm3, (%rsp) # 32-byte Spill
1766 ; AVX2-FAST-PERLANE-NEXT: vmovaps 128(%rdi), %ymm0
1767 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1768 ; AVX2-FAST-PERLANE-NEXT: vmovaps 160(%rdi), %ymm2
1769 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1770 ; AVX2-FAST-PERLANE-NEXT: vmovaps 96(%rdi), %ymm7
1771 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rdi), %ymm1
1772 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1773 ; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%rdi), %ymm12
1774 ; AVX2-FAST-PERLANE-NEXT: vmovaps 64(%rdi), %ymm10
1775 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{.*#+}} xmm14 = <0,6,4,u>
1776 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm5 = ymm1[0,1,2,3],ymm12[4,5],ymm1[6,7]
1777 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm5, %ymm14, %ymm1
1778 ; AVX2-FAST-PERLANE-NEXT: vperm2f128 {{.*#+}} ymm8 = ymm10[0,1],ymm7[0,1]
1779 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1,2,3,4,5],ymm7[6,7]
1780 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm13 = ymm8[0,2,2,2,4,6,6,6]
1781 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2],ymm13[3,4,5,6,7]
1782 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm13 = ymm2[0,1,2,3],ymm0[4,5,6,7]
1783 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd {{.*#+}} ymm0 = [4,2,4,2,4,2,4,2]
1784 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm13, %ymm0, %ymm15
1785 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm15[6,7]
1786 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1787 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm4 = ymm3[0,1,2,3],ymm11[4,5],ymm3[6,7]
1788 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm4, %ymm14, %ymm14
1789 ; AVX2-FAST-PERLANE-NEXT: vmovaps 256(%rdi), %ymm6
1790 ; AVX2-FAST-PERLANE-NEXT: vperm2f128 {{.*#+}} ymm15 = ymm6[0,1],ymm9[0,1]
1791 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm15[0,1,2,3,4,5],ymm9[6,7]
1792 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm15 = ymm2[0,2,2,2,4,6,6,6]
1793 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm3 = ymm14[0,1,2],ymm15[3,4,5,6,7]
1794 ; AVX2-FAST-PERLANE-NEXT: vmovaps 320(%rdi), %ymm14
1795 ; AVX2-FAST-PERLANE-NEXT: vmovaps 352(%rdi), %ymm15
1796 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm15[0,1,2,3],ymm14[4,5,6,7]
1797 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm1, %ymm0, %ymm0
1798 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm3[0,1,2,3,4,5],ymm0[6,7]
1799 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1800 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{.*#+}} xmm0 = <1,7,5,u>
1801 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm5, %ymm0, %ymm3
1802 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm5 = ymm8[1,3,2,3,5,7,6,7]
1803 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2],ymm5[3,4,5,6,7]
1804 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd {{.*#+}} ymm5 = [5,3,5,3,5,3,5,3]
1805 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm13, %ymm5, %ymm8
1806 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5],ymm8[6,7]
1807 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1808 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm4, %ymm0, %ymm0
1809 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm2 = ymm2[1,3,2,3,5,7,6,7]
1810 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm2[3,4,5,6,7]
1811 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm1, %ymm5, %ymm1
1812 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
1813 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1814 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rdi), %xmm3
1815 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm0 = xmm3[2,3,2,3]
1816 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm1 = ymm12[0,0,2,3,4,4,6,7]
1817 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
1818 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm10[0,1],ymm7[2,3],ymm10[4,5],ymm7[6,7]
1819 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm1 = ymm1[2,0,2,3,6,4,6,7]
1820 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,2,0,3]
1821 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
1822 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
1823 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
1824 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm1 = ymm13[0,0,0,0,4,4,4,4]
1825 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
1826 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm2 = ymm8[0,0,2,3,4,4,6,7]
1827 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3],ymm2[4,5,6],ymm1[7]
1828 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
1829 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
1830 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1831 ; AVX2-FAST-PERLANE-NEXT: vmovaps 192(%rdi), %xmm0
1832 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm1 = xmm0[2,3,2,3]
1833 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm2 = ymm11[0,0,2,3,4,4,6,7]
1834 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2,3],ymm1[4],ymm2[5,6,7]
1835 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm6[0,1],ymm9[2,3],ymm6[4,5],ymm9[6,7]
1836 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm2 = ymm2[2,0,2,3,6,4,6,7]
1837 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,2,0,3]
1838 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,3,2,3]
1839 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4,5,6,7]
1840 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm2 = ymm15[0,0,0,0,4,4,4,4]
1841 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm4 = ymm14[0,0,2,3,4,4,6,7]
1842 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm4[0,1,2],ymm2[3],ymm4[4,5,6],ymm2[7]
1843 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,1,0,3]
1844 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5,6,7]
1845 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1846 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm1 = xmm3[3,3,3,3]
1847 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm2 = ymm12[0,1,3,3,4,5,7,7]
1848 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2,3],ymm1[4],ymm2[5,6,7]
1849 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm2 = ymm7[3,3,3,3,7,7,7,7]
1850 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0],ymm10[1],ymm2[2,3,4],ymm10[5],ymm2[6,7]
1851 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,2,0,3]
1852 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,3,2,3]
1853 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4,5,6,7]
1854 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm2 = ymm13[0,1,0,1,4,5,4,5]
1855 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm3 = ymm8[0,1,3,3,4,5,7,7]
1856 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1,2],ymm2[3],ymm3[4,5,6],ymm2[7]
1857 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,1,0,3]
1858 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5,6,7]
1859 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1860 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
1861 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm1 = ymm11[0,1,3,3,4,5,7,7]
1862 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
1863 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm1 = ymm9[3,3,3,3,7,7,7,7]
1864 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm6[1],ymm1[2,3,4],ymm6[5],ymm1[6,7]
1865 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,2,0,3]
1866 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
1867 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
1868 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm1 = ymm15[0,1,0,1,4,5,4,5]
1869 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm2 = ymm14[0,1,3,3,4,5,7,7]
1870 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3],ymm2[4,5,6],ymm1[7]
1871 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
1872 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm3 = ymm0[0,1,2,3,4],ymm1[5,6,7]
1873 ; AVX2-FAST-PERLANE-NEXT: vblendps $240, (%rsp), %ymm11, %ymm11 # 32-byte Folded Reload
1874 ; AVX2-FAST-PERLANE-NEXT: # ymm11 = ymm11[0,1,2,3],mem[4,5,6,7]
1875 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm6[0,1,2,3],ymm9[4,5,6,7]
1876 ; AVX2-FAST-PERLANE-NEXT: vmovaps 272(%rdi), %xmm2
1877 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm2[2,3],ymm1[4,5,6,7]
1878 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,1,0,2,4,5,4,6]
1879 ; AVX2-FAST-PERLANE-NEXT: vmovddup {{.*#+}} xmm4 = [4,2,4,2]
1880 ; AVX2-FAST-PERLANE-NEXT: # xmm4 = mem[0,0]
1881 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm11, %ymm4, %ymm5
1882 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm5[0,1],ymm0[2,3,4,5,6,7]
1883 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm5 = ymm15[0,1],ymm14[2,3],ymm15[4,5,6,7]
1884 ; AVX2-FAST-PERLANE-NEXT: vbroadcastf128 {{.*#+}} ymm6 = [0,2,0,6,0,2,0,6]
1885 ; AVX2-FAST-PERLANE-NEXT: # ymm6 = mem[0,1,0,1]
1886 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm5, %ymm6, %ymm9
1887 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm9[5,6,7]
1888 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm7 = ymm10[0,1,2,3],ymm7[4,5,6,7]
1889 ; AVX2-FAST-PERLANE-NEXT: vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm9 # 32-byte Folded Reload
1890 ; AVX2-FAST-PERLANE-NEXT: # ymm9 = ymm12[0,1,2,3],mem[4,5,6,7]
1891 ; AVX2-FAST-PERLANE-NEXT: vmovaps 80(%rdi), %xmm10
1892 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm12 = ymm7[0,1],ymm10[2,3],ymm7[4,5,6,7]
1893 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm12 = ymm12[0,1,0,2,4,5,4,6]
1894 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm9, %ymm4, %ymm4
1895 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm12[2,3,4,5,6,7]
1896 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm12 = ymm13[0,1],ymm8[2,3],ymm13[4,5,6,7]
1897 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm12, %ymm6, %ymm6
1898 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3,4],ymm6[5,6,7]
1899 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm1 = ymm1[1,1,1,1,5,5,5,5]
1900 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3],ymm1[4,5,6,7]
1901 ; AVX2-FAST-PERLANE-NEXT: vmovddup {{.*#+}} xmm2 = [5,3,5,3]
1902 ; AVX2-FAST-PERLANE-NEXT: # xmm2 = mem[0,0]
1903 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm11, %ymm2, %ymm6
1904 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm6[0,1],ymm1[2,3,4,5,6,7]
1905 ; AVX2-FAST-PERLANE-NEXT: vbroadcastf128 {{.*#+}} ymm6 = [0,3,1,7,0,3,1,7]
1906 ; AVX2-FAST-PERLANE-NEXT: # ymm6 = mem[0,1,0,1]
1907 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm5, %ymm6, %ymm5
1908 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm5[5,6,7]
1909 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm5 = ymm7[1,1,1,1,5,5,5,5]
1910 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2],ymm10[3],ymm5[4,5,6,7]
1911 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm9, %ymm2, %ymm2
1912 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm5[2,3,4,5,6,7]
1913 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm12, %ymm6, %ymm5
1914 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm5[5,6,7]
1915 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
1916 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm5, 32(%rsi)
1917 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
1918 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm5, (%rsi)
1919 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
1920 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm5, 32(%rdx)
1921 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
1922 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm5, (%rdx)
1923 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
1924 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm5, 32(%rcx)
1925 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
1926 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm5, (%rcx)
1927 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm3, 32(%r8)
1928 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
1929 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm3, (%r8)
1930 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 32(%r9)
1931 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm4, (%r9)
1932 ; AVX2-FAST-PERLANE-NEXT: movq {{[0-9]+}}(%rsp), %rax
1933 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm1, 32(%rax)
1934 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm2, (%rax)
1935 ; AVX2-FAST-PERLANE-NEXT: addq $232, %rsp
1936 ; AVX2-FAST-PERLANE-NEXT: vzeroupper
1937 ; AVX2-FAST-PERLANE-NEXT: retq
1939 ; AVX512F-LABEL: load_i32_stride6_vf16:
1941 ; AVX512F-NEXT: movq {{[0-9]+}}(%rsp), %rax
1942 ; AVX512F-NEXT: vmovdqa64 320(%rdi), %zmm0
1943 ; AVX512F-NEXT: vmovdqa64 256(%rdi), %zmm1
1944 ; AVX512F-NEXT: vmovdqa64 (%rdi), %zmm3
1945 ; AVX512F-NEXT: vmovdqa64 64(%rdi), %zmm4
1946 ; AVX512F-NEXT: vmovdqa64 128(%rdi), %zmm5
1947 ; AVX512F-NEXT: vmovdqa64 192(%rdi), %zmm6
1948 ; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm7 = [0,6,12,0,0,0,20,26,0,6,12,0,0,0,20,26]
1949 ; AVX512F-NEXT: # zmm7 = mem[0,1,2,3,0,1,2,3]
1950 ; AVX512F-NEXT: vpermi2d %zmm5, %zmm6, %zmm7
1951 ; AVX512F-NEXT: vmovdqa {{.*#+}} ymm2 = <0,6,12,18,24,30,u,u>
1952 ; AVX512F-NEXT: vpermi2d %zmm4, %zmm3, %zmm2
1953 ; AVX512F-NEXT: movb $56, %dil
1954 ; AVX512F-NEXT: kmovw %edi, %k2
1955 ; AVX512F-NEXT: vmovdqa64 %zmm7, %zmm2 {%k2}
1956 ; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm7 = [0,0,0,2,8,14,20,26,0,0,0,2,8,14,20,26]
1957 ; AVX512F-NEXT: # zmm7 = mem[0,1,2,3,0,1,2,3]
1958 ; AVX512F-NEXT: vpermi2d %zmm0, %zmm1, %zmm7
1959 ; AVX512F-NEXT: movw $-2048, %di # imm = 0xF800
1960 ; AVX512F-NEXT: kmovw %edi, %k1
1961 ; AVX512F-NEXT: vmovdqa32 %zmm7, %zmm2 {%k1}
1962 ; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm8 = [1,7,13,0,0,0,21,27,1,7,13,0,0,0,21,27]
1963 ; AVX512F-NEXT: # zmm8 = mem[0,1,2,3,0,1,2,3]
1964 ; AVX512F-NEXT: vpermi2d %zmm5, %zmm6, %zmm8
1965 ; AVX512F-NEXT: vmovdqa {{.*#+}} ymm7 = <1,7,13,19,25,31,u,u>
1966 ; AVX512F-NEXT: vpermi2d %zmm4, %zmm3, %zmm7
1967 ; AVX512F-NEXT: vmovdqa64 %zmm8, %zmm7 {%k2}
1968 ; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm8 = [0,0,0,3,9,15,21,27,0,0,0,3,9,15,21,27]
1969 ; AVX512F-NEXT: # zmm8 = mem[0,1,2,3,0,1,2,3]
1970 ; AVX512F-NEXT: vpermi2d %zmm0, %zmm1, %zmm8
1971 ; AVX512F-NEXT: vmovdqa32 %zmm8, %zmm7 {%k1}
1972 ; AVX512F-NEXT: vmovdqa {{.*#+}} ymm8 = <2,8,14,20,26,u,u,u>
1973 ; AVX512F-NEXT: vpermi2d %zmm4, %zmm3, %zmm8
1974 ; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm9 = [18,24,30,0,0,0,6,12,18,24,30,0,0,0,6,12]
1975 ; AVX512F-NEXT: # zmm9 = mem[0,1,2,3,0,1,2,3]
1976 ; AVX512F-NEXT: vpermi2d %zmm6, %zmm5, %zmm9
1977 ; AVX512F-NEXT: movw $31, %di
1978 ; AVX512F-NEXT: kmovw %edi, %k2
1979 ; AVX512F-NEXT: vmovdqa32 %zmm8, %zmm9 {%k2}
1980 ; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm8 = [0,0,0,20,26,0,6,12,0,0,0,20,26,0,6,12]
1981 ; AVX512F-NEXT: # zmm8 = mem[0,1,2,3,0,1,2,3]
1982 ; AVX512F-NEXT: vpermi2d %zmm1, %zmm0, %zmm8
1983 ; AVX512F-NEXT: vmovdqa32 %zmm8, %zmm9 {%k1}
1984 ; AVX512F-NEXT: vmovdqa {{.*#+}} ymm8 = <3,9,15,21,27,u,u,u>
1985 ; AVX512F-NEXT: vpermi2d %zmm4, %zmm3, %zmm8
1986 ; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm10 = [19,25,31,0,0,1,7,13,19,25,31,0,0,1,7,13]
1987 ; AVX512F-NEXT: # zmm10 = mem[0,1,2,3,0,1,2,3]
1988 ; AVX512F-NEXT: vpermi2d %zmm6, %zmm5, %zmm10
1989 ; AVX512F-NEXT: vmovdqa32 %zmm8, %zmm10 {%k2}
1990 ; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm8 = [0,0,0,21,27,1,7,13,0,0,0,21,27,1,7,13]
1991 ; AVX512F-NEXT: # zmm8 = mem[0,1,2,3,0,1,2,3]
1992 ; AVX512F-NEXT: vpermi2d %zmm1, %zmm0, %zmm8
1993 ; AVX512F-NEXT: vmovdqa32 %zmm8, %zmm10 {%k1}
1994 ; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm8 = [20,26,0,0,0,2,8,14,20,26,0,0,0,2,8,14]
1995 ; AVX512F-NEXT: # zmm8 = mem[0,1,2,3,0,1,2,3]
1996 ; AVX512F-NEXT: vpermi2d %zmm6, %zmm5, %zmm8
1997 ; AVX512F-NEXT: vmovdqa {{.*#+}} ymm11 = <20,26,0,6,12,u,u,u>
1998 ; AVX512F-NEXT: vpermi2d %zmm3, %zmm4, %zmm11
1999 ; AVX512F-NEXT: movw $992, %di # imm = 0x3E0
2000 ; AVX512F-NEXT: kmovw %edi, %k1
2001 ; AVX512F-NEXT: vmovdqa32 %zmm8, %zmm11 {%k1}
2002 ; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm8 = [0,0,0,6,12,18,24,30,0,0,0,6,12,18,24,30]
2003 ; AVX512F-NEXT: # zmm8 = mem[0,1,2,3,0,1,2,3]
2004 ; AVX512F-NEXT: vpermi2d %zmm0, %zmm1, %zmm8
2005 ; AVX512F-NEXT: movb $-32, %dil
2006 ; AVX512F-NEXT: kmovw %edi, %k2
2007 ; AVX512F-NEXT: vmovdqa64 %zmm8, %zmm11 {%k2}
2008 ; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm8 = [21,27,0,0,0,3,9,15,21,27,0,0,0,3,9,15]
2009 ; AVX512F-NEXT: # zmm8 = mem[0,1,2,3,0,1,2,3]
2010 ; AVX512F-NEXT: vpermi2d %zmm6, %zmm5, %zmm8
2011 ; AVX512F-NEXT: vmovdqa {{.*#+}} ymm5 = <21,27,1,7,13,u,u,u>
2012 ; AVX512F-NEXT: vpermi2d %zmm3, %zmm4, %zmm5
2013 ; AVX512F-NEXT: vmovdqa32 %zmm8, %zmm5 {%k1}
2014 ; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm3 = [0,0,1,7,13,19,25,31,0,0,1,7,13,19,25,31]
2015 ; AVX512F-NEXT: # zmm3 = mem[0,1,2,3,0,1,2,3]
2016 ; AVX512F-NEXT: vpermi2d %zmm0, %zmm1, %zmm3
2017 ; AVX512F-NEXT: vmovdqa64 %zmm3, %zmm5 {%k2}
2018 ; AVX512F-NEXT: vmovdqa64 %zmm2, (%rsi)
2019 ; AVX512F-NEXT: vmovdqa64 %zmm7, (%rdx)
2020 ; AVX512F-NEXT: vmovdqa64 %zmm9, (%rcx)
2021 ; AVX512F-NEXT: vmovdqa64 %zmm10, (%r8)
2022 ; AVX512F-NEXT: vmovdqa64 %zmm11, (%r9)
2023 ; AVX512F-NEXT: vmovdqa64 %zmm5, (%rax)
2024 ; AVX512F-NEXT: vzeroupper
2025 ; AVX512F-NEXT: retq
2027 ; AVX512BW-LABEL: load_i32_stride6_vf16:
2028 ; AVX512BW: # %bb.0:
2029 ; AVX512BW-NEXT: movq {{[0-9]+}}(%rsp), %rax
2030 ; AVX512BW-NEXT: vmovdqa64 320(%rdi), %zmm0
2031 ; AVX512BW-NEXT: vmovdqa64 256(%rdi), %zmm1
2032 ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm3
2033 ; AVX512BW-NEXT: vmovdqa64 64(%rdi), %zmm4
2034 ; AVX512BW-NEXT: vmovdqa64 128(%rdi), %zmm5
2035 ; AVX512BW-NEXT: vmovdqa64 192(%rdi), %zmm6
2036 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm7 = [0,6,12,0,0,0,20,26,0,6,12,0,0,0,20,26]
2037 ; AVX512BW-NEXT: # zmm7 = mem[0,1,2,3,0,1,2,3]
2038 ; AVX512BW-NEXT: vpermi2d %zmm5, %zmm6, %zmm7
2039 ; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm2 = <0,6,12,18,24,30,u,u>
2040 ; AVX512BW-NEXT: vpermi2d %zmm4, %zmm3, %zmm2
2041 ; AVX512BW-NEXT: movb $56, %dil
2042 ; AVX512BW-NEXT: kmovd %edi, %k2
2043 ; AVX512BW-NEXT: vmovdqa64 %zmm7, %zmm2 {%k2}
2044 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm7 = [0,0,0,2,8,14,20,26,0,0,0,2,8,14,20,26]
2045 ; AVX512BW-NEXT: # zmm7 = mem[0,1,2,3,0,1,2,3]
2046 ; AVX512BW-NEXT: vpermi2d %zmm0, %zmm1, %zmm7
2047 ; AVX512BW-NEXT: movw $-2048, %di # imm = 0xF800
2048 ; AVX512BW-NEXT: kmovd %edi, %k1
2049 ; AVX512BW-NEXT: vmovdqa32 %zmm7, %zmm2 {%k1}
2050 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm8 = [1,7,13,0,0,0,21,27,1,7,13,0,0,0,21,27]
2051 ; AVX512BW-NEXT: # zmm8 = mem[0,1,2,3,0,1,2,3]
2052 ; AVX512BW-NEXT: vpermi2d %zmm5, %zmm6, %zmm8
2053 ; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm7 = <1,7,13,19,25,31,u,u>
2054 ; AVX512BW-NEXT: vpermi2d %zmm4, %zmm3, %zmm7
2055 ; AVX512BW-NEXT: vmovdqa64 %zmm8, %zmm7 {%k2}
2056 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm8 = [0,0,0,3,9,15,21,27,0,0,0,3,9,15,21,27]
2057 ; AVX512BW-NEXT: # zmm8 = mem[0,1,2,3,0,1,2,3]
2058 ; AVX512BW-NEXT: vpermi2d %zmm0, %zmm1, %zmm8
2059 ; AVX512BW-NEXT: vmovdqa32 %zmm8, %zmm7 {%k1}
2060 ; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm8 = <2,8,14,20,26,u,u,u>
2061 ; AVX512BW-NEXT: vpermi2d %zmm4, %zmm3, %zmm8
2062 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm9 = [18,24,30,0,0,0,6,12,18,24,30,0,0,0,6,12]
2063 ; AVX512BW-NEXT: # zmm9 = mem[0,1,2,3,0,1,2,3]
2064 ; AVX512BW-NEXT: vpermi2d %zmm6, %zmm5, %zmm9
2065 ; AVX512BW-NEXT: movw $31, %di
2066 ; AVX512BW-NEXT: kmovd %edi, %k2
2067 ; AVX512BW-NEXT: vmovdqa32 %zmm8, %zmm9 {%k2}
2068 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm8 = [0,0,0,20,26,0,6,12,0,0,0,20,26,0,6,12]
2069 ; AVX512BW-NEXT: # zmm8 = mem[0,1,2,3,0,1,2,3]
2070 ; AVX512BW-NEXT: vpermi2d %zmm1, %zmm0, %zmm8
2071 ; AVX512BW-NEXT: vmovdqa32 %zmm8, %zmm9 {%k1}
2072 ; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm8 = <3,9,15,21,27,u,u,u>
2073 ; AVX512BW-NEXT: vpermi2d %zmm4, %zmm3, %zmm8
2074 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm10 = [19,25,31,0,0,1,7,13,19,25,31,0,0,1,7,13]
2075 ; AVX512BW-NEXT: # zmm10 = mem[0,1,2,3,0,1,2,3]
2076 ; AVX512BW-NEXT: vpermi2d %zmm6, %zmm5, %zmm10
2077 ; AVX512BW-NEXT: vmovdqa32 %zmm8, %zmm10 {%k2}
2078 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm8 = [0,0,0,21,27,1,7,13,0,0,0,21,27,1,7,13]
2079 ; AVX512BW-NEXT: # zmm8 = mem[0,1,2,3,0,1,2,3]
2080 ; AVX512BW-NEXT: vpermi2d %zmm1, %zmm0, %zmm8
2081 ; AVX512BW-NEXT: vmovdqa32 %zmm8, %zmm10 {%k1}
2082 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm8 = [20,26,0,0,0,2,8,14,20,26,0,0,0,2,8,14]
2083 ; AVX512BW-NEXT: # zmm8 = mem[0,1,2,3,0,1,2,3]
2084 ; AVX512BW-NEXT: vpermi2d %zmm6, %zmm5, %zmm8
2085 ; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm11 = <20,26,0,6,12,u,u,u>
2086 ; AVX512BW-NEXT: vpermi2d %zmm3, %zmm4, %zmm11
2087 ; AVX512BW-NEXT: movw $992, %di # imm = 0x3E0
2088 ; AVX512BW-NEXT: kmovd %edi, %k1
2089 ; AVX512BW-NEXT: vmovdqa32 %zmm8, %zmm11 {%k1}
2090 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm8 = [0,0,0,6,12,18,24,30,0,0,0,6,12,18,24,30]
2091 ; AVX512BW-NEXT: # zmm8 = mem[0,1,2,3,0,1,2,3]
2092 ; AVX512BW-NEXT: vpermi2d %zmm0, %zmm1, %zmm8
2093 ; AVX512BW-NEXT: movb $-32, %dil
2094 ; AVX512BW-NEXT: kmovd %edi, %k2
2095 ; AVX512BW-NEXT: vmovdqa64 %zmm8, %zmm11 {%k2}
2096 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm8 = [21,27,0,0,0,3,9,15,21,27,0,0,0,3,9,15]
2097 ; AVX512BW-NEXT: # zmm8 = mem[0,1,2,3,0,1,2,3]
2098 ; AVX512BW-NEXT: vpermi2d %zmm6, %zmm5, %zmm8
2099 ; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm5 = <21,27,1,7,13,u,u,u>
2100 ; AVX512BW-NEXT: vpermi2d %zmm3, %zmm4, %zmm5
2101 ; AVX512BW-NEXT: vmovdqa32 %zmm8, %zmm5 {%k1}
2102 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm3 = [0,0,1,7,13,19,25,31,0,0,1,7,13,19,25,31]
2103 ; AVX512BW-NEXT: # zmm3 = mem[0,1,2,3,0,1,2,3]
2104 ; AVX512BW-NEXT: vpermi2d %zmm0, %zmm1, %zmm3
2105 ; AVX512BW-NEXT: vmovdqa64 %zmm3, %zmm5 {%k2}
2106 ; AVX512BW-NEXT: vmovdqa64 %zmm2, (%rsi)
2107 ; AVX512BW-NEXT: vmovdqa64 %zmm7, (%rdx)
2108 ; AVX512BW-NEXT: vmovdqa64 %zmm9, (%rcx)
2109 ; AVX512BW-NEXT: vmovdqa64 %zmm10, (%r8)
2110 ; AVX512BW-NEXT: vmovdqa64 %zmm11, (%r9)
2111 ; AVX512BW-NEXT: vmovdqa64 %zmm5, (%rax)
2112 ; AVX512BW-NEXT: vzeroupper
2113 ; AVX512BW-NEXT: retq
2114 %wide.vec = load <96 x i32>, ptr %in.vec, align 64
2115 %strided.vec0 = shufflevector <96 x i32> %wide.vec, <96 x i32> poison, <16 x i32> <i32 0, i32 6, i32 12, i32 18, i32 24, i32 30, i32 36, i32 42, i32 48, i32 54, i32 60, i32 66, i32 72, i32 78, i32 84, i32 90>
2116 %strided.vec1 = shufflevector <96 x i32> %wide.vec, <96 x i32> poison, <16 x i32> <i32 1, i32 7, i32 13, i32 19, i32 25, i32 31, i32 37, i32 43, i32 49, i32 55, i32 61, i32 67, i32 73, i32 79, i32 85, i32 91>
2117 %strided.vec2 = shufflevector <96 x i32> %wide.vec, <96 x i32> poison, <16 x i32> <i32 2, i32 8, i32 14, i32 20, i32 26, i32 32, i32 38, i32 44, i32 50, i32 56, i32 62, i32 68, i32 74, i32 80, i32 86, i32 92>
2118 %strided.vec3 = shufflevector <96 x i32> %wide.vec, <96 x i32> poison, <16 x i32> <i32 3, i32 9, i32 15, i32 21, i32 27, i32 33, i32 39, i32 45, i32 51, i32 57, i32 63, i32 69, i32 75, i32 81, i32 87, i32 93>
2119 %strided.vec4 = shufflevector <96 x i32> %wide.vec, <96 x i32> poison, <16 x i32> <i32 4, i32 10, i32 16, i32 22, i32 28, i32 34, i32 40, i32 46, i32 52, i32 58, i32 64, i32 70, i32 76, i32 82, i32 88, i32 94>
2120 %strided.vec5 = shufflevector <96 x i32> %wide.vec, <96 x i32> poison, <16 x i32> <i32 5, i32 11, i32 17, i32 23, i32 29, i32 35, i32 41, i32 47, i32 53, i32 59, i32 65, i32 71, i32 77, i32 83, i32 89, i32 95>
2121 store <16 x i32> %strided.vec0, ptr %out.vec0, align 64
2122 store <16 x i32> %strided.vec1, ptr %out.vec1, align 64
2123 store <16 x i32> %strided.vec2, ptr %out.vec2, align 64
2124 store <16 x i32> %strided.vec3, ptr %out.vec3, align 64
2125 store <16 x i32> %strided.vec4, ptr %out.vec4, align 64
2126 store <16 x i32> %strided.vec5, ptr %out.vec5, align 64
2130 define void @load_i32_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2, ptr %out.vec3, ptr %out.vec4, ptr %out.vec5) nounwind {
2131 ; SSE-LABEL: load_i32_stride6_vf32:
2133 ; SSE-NEXT: subq $1032, %rsp # imm = 0x408
2134 ; SSE-NEXT: movdqa 64(%rdi), %xmm5
2135 ; SSE-NEXT: movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2136 ; SSE-NEXT: movdqa (%rdi), %xmm12
2137 ; SSE-NEXT: movdqa %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2138 ; SSE-NEXT: movdqa 16(%rdi), %xmm13
2139 ; SSE-NEXT: movdqa %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2140 ; SSE-NEXT: movdqa 48(%rdi), %xmm9
2141 ; SSE-NEXT: movdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2142 ; SSE-NEXT: movdqa 528(%rdi), %xmm7
2143 ; SSE-NEXT: movdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2144 ; SSE-NEXT: movdqa 544(%rdi), %xmm3
2145 ; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2146 ; SSE-NEXT: movdqa 480(%rdi), %xmm8
2147 ; SSE-NEXT: movdqa 496(%rdi), %xmm4
2148 ; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2149 ; SSE-NEXT: movdqa 144(%rdi), %xmm10
2150 ; SSE-NEXT: movdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2151 ; SSE-NEXT: movdqa 160(%rdi), %xmm2
2152 ; SSE-NEXT: movdqa 96(%rdi), %xmm6
2153 ; SSE-NEXT: movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2154 ; SSE-NEXT: movdqa 112(%rdi), %xmm1
2155 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
2156 ; SSE-NEXT: movdqa %xmm1, %xmm11
2157 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2158 ; SSE-NEXT: movdqa %xmm6, %xmm1
2159 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
2160 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,2,3,3]
2161 ; SSE-NEXT: movdqa %xmm2, %xmm6
2162 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2163 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm10[0,0,1,1]
2164 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
2165 ; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm1[0],xmm2[1]
2166 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2167 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm4[2,3,2,3]
2168 ; SSE-NEXT: movdqa %xmm8, %xmm1
2169 ; SSE-NEXT: movdqa %xmm8, %xmm4
2170 ; SSE-NEXT: movdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2171 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
2172 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm3[2,2,3,3]
2173 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm7[0,0,1,1]
2174 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
2175 ; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm1[0],xmm2[1]
2176 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2177 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm13[2,3,2,3]
2178 ; SSE-NEXT: movdqa %xmm12, %xmm1
2179 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
2180 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm5[2,2,3,3]
2181 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm9[0,0,1,1]
2182 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
2183 ; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm1[0],xmm2[1]
2184 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2185 ; SSE-NEXT: movdqa 384(%rdi), %xmm2
2186 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2187 ; SSE-NEXT: movdqa 400(%rdi), %xmm14
2188 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm14[2,3,2,3]
2189 ; SSE-NEXT: movdqa %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2190 ; SSE-NEXT: movdqa %xmm2, %xmm1
2191 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
2192 ; SSE-NEXT: movdqa 432(%rdi), %xmm3
2193 ; SSE-NEXT: movdqa %xmm3, (%rsp) # 16-byte Spill
2194 ; SSE-NEXT: movdqa 448(%rdi), %xmm9
2195 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm9[2,2,3,3]
2196 ; SSE-NEXT: movdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2197 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm3[0,0,1,1]
2198 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
2199 ; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm1[0],xmm2[1]
2200 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2201 ; SSE-NEXT: movdqa 288(%rdi), %xmm2
2202 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2203 ; SSE-NEXT: movdqa 304(%rdi), %xmm15
2204 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm15[2,3,2,3]
2205 ; SSE-NEXT: movdqa %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2206 ; SSE-NEXT: movdqa %xmm2, %xmm1
2207 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
2208 ; SSE-NEXT: movdqa 336(%rdi), %xmm3
2209 ; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2210 ; SSE-NEXT: movdqa 352(%rdi), %xmm2
2211 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,2,3,3]
2212 ; SSE-NEXT: movdqa %xmm2, %xmm12
2213 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2214 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm3[0,0,1,1]
2215 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
2216 ; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm1[0],xmm2[1]
2217 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2218 ; SSE-NEXT: movdqa 672(%rdi), %xmm2
2219 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2220 ; SSE-NEXT: movdqa 688(%rdi), %xmm8
2221 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm8[2,3,2,3]
2222 ; SSE-NEXT: movdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2223 ; SSE-NEXT: movdqa %xmm2, %xmm1
2224 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
2225 ; SSE-NEXT: movdqa 720(%rdi), %xmm3
2226 ; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2227 ; SSE-NEXT: movdqa 736(%rdi), %xmm5
2228 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm5[2,2,3,3]
2229 ; SSE-NEXT: movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2230 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm3[0,0,1,1]
2231 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
2232 ; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm1[0],xmm2[1]
2233 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2234 ; SSE-NEXT: movdqa 192(%rdi), %xmm1
2235 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2236 ; SSE-NEXT: movdqa 208(%rdi), %xmm0
2237 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2238 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
2239 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
2240 ; SSE-NEXT: movdqa 240(%rdi), %xmm2
2241 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2242 ; SSE-NEXT: movdqa 256(%rdi), %xmm0
2243 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2244 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,2,3,3]
2245 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,1,1]
2246 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
2247 ; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm1[0],xmm2[1]
2248 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2249 ; SSE-NEXT: movdqa 576(%rdi), %xmm7
2250 ; SSE-NEXT: movdqa 592(%rdi), %xmm13
2251 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm13[2,3,2,3]
2252 ; SSE-NEXT: movdqa %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2253 ; SSE-NEXT: movdqa %xmm7, %xmm1
2254 ; SSE-NEXT: movdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2255 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
2256 ; SSE-NEXT: movdqa 624(%rdi), %xmm10
2257 ; SSE-NEXT: movdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2258 ; SSE-NEXT: movdqa 640(%rdi), %xmm3
2259 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm3[2,2,3,3]
2260 ; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2261 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm10[0,0,1,1]
2262 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
2263 ; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm1[0],xmm2[1]
2264 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2265 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
2266 ; SSE-NEXT: # xmm0 = mem[1,1,1,1]
2267 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm11[3,3,3,3]
2268 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
2269 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm6[2,3,2,3]
2270 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
2271 ; SSE-NEXT: movdqa %xmm10, %xmm2
2272 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
2273 ; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm0[0],xmm2[1]
2274 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2275 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm4[1,1,1,1]
2276 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
2277 ; SSE-NEXT: # xmm1 = mem[3,3,3,3]
2278 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
2279 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
2280 ; SSE-NEXT: # xmm1 = mem[2,3,2,3]
2281 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
2282 ; SSE-NEXT: movdqa %xmm11, %xmm2
2283 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
2284 ; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm0[0],xmm2[1]
2285 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2286 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
2287 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm6[1,1,1,1]
2288 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
2289 ; SSE-NEXT: # xmm1 = mem[3,3,3,3]
2290 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
2291 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
2292 ; SSE-NEXT: # xmm1 = mem[2,3,2,3]
2293 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
2294 ; SSE-NEXT: movdqa %xmm4, %xmm2
2295 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
2296 ; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm0[0],xmm2[1]
2297 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2298 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
2299 ; SSE-NEXT: # xmm0 = mem[1,1,1,1]
2300 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm14[3,3,3,3]
2301 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
2302 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm9[2,3,2,3]
2303 ; SSE-NEXT: movdqa (%rsp), %xmm2 # 16-byte Reload
2304 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
2305 ; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm0[0],xmm2[1]
2306 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2307 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
2308 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm9[1,1,1,1]
2309 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm15[3,3,3,3]
2310 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
2311 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm12[2,3,2,3]
2312 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
2313 ; SSE-NEXT: movdqa %xmm15, %xmm2
2314 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
2315 ; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm0[0],xmm2[1]
2316 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2317 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
2318 ; SSE-NEXT: # xmm0 = mem[1,1,1,1]
2319 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm8[3,3,3,3]
2320 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
2321 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm5[2,3,2,3]
2322 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
2323 ; SSE-NEXT: movdqa %xmm8, %xmm2
2324 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
2325 ; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm0[0],xmm2[1]
2326 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2327 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
2328 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm12[1,1,1,1]
2329 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
2330 ; SSE-NEXT: # xmm1 = mem[3,3,3,3]
2331 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
2332 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
2333 ; SSE-NEXT: # xmm1 = mem[2,3,2,3]
2334 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
2335 ; SSE-NEXT: movdqa %xmm14, %xmm5
2336 ; SSE-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm1[0],xmm5[1],xmm1[1]
2337 ; SSE-NEXT: movsd {{.*#+}} xmm5 = xmm0[0],xmm5[1]
2338 ; SSE-NEXT: movapd %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2339 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm7[1,1,1,1]
2340 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm13[3,3,3,3]
2341 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
2342 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm3[2,3,2,3]
2343 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
2344 ; SSE-NEXT: movdqa %xmm5, %xmm3
2345 ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
2346 ; SSE-NEXT: movsd {{.*#+}} xmm3 = xmm0[0],xmm3[1]
2347 ; SSE-NEXT: movapd %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2348 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm10[2,2,3,3]
2349 ; SSE-NEXT: movdqa 176(%rdi), %xmm0
2350 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2351 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
2352 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
2353 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
2354 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm10[2,3,2,3]
2355 ; SSE-NEXT: movdqa 128(%rdi), %xmm2
2356 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2357 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
2358 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
2359 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2360 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm4[2,2,3,3]
2361 ; SSE-NEXT: movdqa 80(%rdi), %xmm0
2362 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2363 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
2364 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
2365 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm6[2,3,2,3]
2366 ; SSE-NEXT: movdqa 32(%rdi), %xmm13
2367 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm13[0],xmm0[1],xmm13[1]
2368 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
2369 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2370 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm15[2,2,3,3]
2371 ; SSE-NEXT: movdqa 368(%rdi), %xmm0
2372 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2373 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
2374 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
2375 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm9[2,3,2,3]
2376 ; SSE-NEXT: movdqa 320(%rdi), %xmm2
2377 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2378 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
2379 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
2380 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2381 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm14[2,2,3,3]
2382 ; SSE-NEXT: movdqa 272(%rdi), %xmm0
2383 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2384 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
2385 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
2386 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm12[2,3,2,3]
2387 ; SSE-NEXT: movdqa %xmm12, %xmm14
2388 ; SSE-NEXT: movdqa 224(%rdi), %xmm6
2389 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1]
2390 ; SSE-NEXT: movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2391 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
2392 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2393 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm11[2,2,3,3]
2394 ; SSE-NEXT: movdqa 560(%rdi), %xmm0
2395 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2396 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
2397 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
2398 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
2399 ; SSE-NEXT: # xmm0 = mem[2,3,2,3]
2400 ; SSE-NEXT: movdqa 512(%rdi), %xmm2
2401 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2402 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
2403 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
2404 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2405 ; SSE-NEXT: movdqa (%rsp), %xmm11 # 16-byte Reload
2406 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm11[2,2,3,3]
2407 ; SSE-NEXT: movdqa 464(%rdi), %xmm15
2408 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm15[0,0,1,1]
2409 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
2410 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
2411 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm7[2,3,2,3]
2412 ; SSE-NEXT: movdqa 416(%rdi), %xmm4
2413 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
2414 ; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2415 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
2416 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2417 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm8[2,2,3,3]
2418 ; SSE-NEXT: movdqa 752(%rdi), %xmm0
2419 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2420 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
2421 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
2422 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
2423 ; SSE-NEXT: # xmm0 = mem[2,3,2,3]
2424 ; SSE-NEXT: movdqa 704(%rdi), %xmm12
2425 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm12[0],xmm0[1],xmm12[1]
2426 ; SSE-NEXT: movdqa %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2427 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
2428 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2429 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm5[2,2,3,3]
2430 ; SSE-NEXT: movdqa 656(%rdi), %xmm0
2431 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2432 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
2433 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
2434 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
2435 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,2,3]
2436 ; SSE-NEXT: movdqa 608(%rdi), %xmm5
2437 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1]
2438 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
2439 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2440 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
2441 ; SSE-NEXT: # xmm0 = mem[3,3,3,3]
2442 ; SSE-NEXT: movdqa %xmm13, %xmm9
2443 ; SSE-NEXT: movdqa %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2444 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm13[1,1,1,1]
2445 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
2446 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
2447 ; SSE-NEXT: # xmm1 = mem[2,3,2,3]
2448 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
2449 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm13[0],xmm1[1],xmm13[1]
2450 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
2451 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2452 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm10[3,3,3,3]
2453 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
2454 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm8[1,1,1,1]
2455 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
2456 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
2457 ; SSE-NEXT: # xmm1 = mem[2,3,2,3]
2458 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
2459 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
2460 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
2461 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2462 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm14[3,3,3,3]
2463 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm6[1,1,1,1]
2464 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
2465 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
2466 ; SSE-NEXT: # xmm1 = mem[2,3,2,3]
2467 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
2468 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm14[0],xmm1[1],xmm14[1]
2469 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
2470 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2471 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
2472 ; SSE-NEXT: # xmm0 = mem[3,3,3,3]
2473 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
2474 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm10[1,1,1,1]
2475 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
2476 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
2477 ; SSE-NEXT: # xmm1 = mem[2,3,2,3]
2478 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
2479 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm6[0],xmm1[1],xmm6[1]
2480 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
2481 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2482 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm7[3,3,3,3]
2483 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm4[1,1,1,1]
2484 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
2485 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm11[2,3,2,3]
2486 ; SSE-NEXT: movdqa %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2487 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm15[0],xmm1[1],xmm15[1]
2488 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
2489 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2490 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
2491 ; SSE-NEXT: # xmm0 = mem[3,3,3,3]
2492 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
2493 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm7[1,1,1,1]
2494 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
2495 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
2496 ; SSE-NEXT: # xmm1 = mem[2,3,2,3]
2497 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
2498 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1]
2499 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
2500 ; SSE-NEXT: movapd %xmm1, (%rsp) # 16-byte Spill
2501 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm2[3,3,3,3]
2502 ; SSE-NEXT: movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2503 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm5[1,1,1,1]
2504 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
2505 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
2506 ; SSE-NEXT: # xmm1 = mem[2,3,2,3]
2507 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
2508 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
2509 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
2510 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2511 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
2512 ; SSE-NEXT: # xmm0 = mem[3,3,3,3]
2513 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm12[1,1,1,1]
2514 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
2515 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
2516 ; SSE-NEXT: # xmm1 = mem[2,3,2,3]
2517 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
2518 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm11[0],xmm1[1],xmm11[1]
2519 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
2520 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2521 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm9[2,3,2,3]
2522 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
2523 ; SSE-NEXT: movdqa %xmm12, %xmm1
2524 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
2525 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm13[2,2,3,3]
2526 ; SSE-NEXT: pshufd $80, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Folded Reload
2527 ; SSE-NEXT: # xmm9 = mem[0,0,1,1]
2528 ; SSE-NEXT: punpckldq {{.*#+}} xmm9 = xmm9[0],xmm0[0],xmm9[1],xmm0[1]
2529 ; SSE-NEXT: movsd {{.*#+}} xmm9 = xmm1[0],xmm9[1]
2530 ; SSE-NEXT: movapd %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2531 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm8[2,3,2,3]
2532 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
2533 ; SSE-NEXT: movdqa %xmm13, %xmm1
2534 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
2535 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm3[2,2,3,3]
2536 ; SSE-NEXT: pshufd $80, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
2537 ; SSE-NEXT: # xmm3 = mem[0,0,1,1]
2538 ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1]
2539 ; SSE-NEXT: movsd {{.*#+}} xmm3 = xmm1[0],xmm3[1]
2540 ; SSE-NEXT: movapd %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2541 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
2542 ; SSE-NEXT: # xmm0 = mem[2,3,2,3]
2543 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
2544 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
2545 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm14[2,2,3,3]
2546 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
2547 ; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm14[0,0,1,1]
2548 ; SSE-NEXT: punpckldq {{.*#+}} xmm9 = xmm9[0],xmm0[0],xmm9[1],xmm0[1]
2549 ; SSE-NEXT: movsd {{.*#+}} xmm9 = xmm1[0],xmm9[1]
2550 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm10[2,3,2,3]
2551 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
2552 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
2553 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm6[2,2,3,3]
2554 ; SSE-NEXT: pshufd $80, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Folded Reload
2555 ; SSE-NEXT: # xmm8 = mem[0,0,1,1]
2556 ; SSE-NEXT: punpckldq {{.*#+}} xmm8 = xmm8[0],xmm0[0],xmm8[1],xmm0[1]
2557 ; SSE-NEXT: movsd {{.*#+}} xmm8 = xmm1[0],xmm8[1]
2558 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
2559 ; SSE-NEXT: # xmm0 = mem[2,3,2,3]
2560 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
2561 ; SSE-NEXT: movdqa %xmm10, %xmm1
2562 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
2563 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm15[2,2,3,3]
2564 ; SSE-NEXT: pshufd $80, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
2565 ; SSE-NEXT: # xmm6 = mem[0,0,1,1]
2566 ; SSE-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm0[0],xmm6[1],xmm0[1]
2567 ; SSE-NEXT: movsd {{.*#+}} xmm6 = xmm1[0],xmm6[1]
2568 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm7[2,3,2,3]
2569 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
2570 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
2571 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm4[2,2,3,3]
2572 ; SSE-NEXT: pshufd $80, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
2573 ; SSE-NEXT: # xmm4 = mem[0,0,1,1]
2574 ; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1]
2575 ; SSE-NEXT: movsd {{.*#+}} xmm4 = xmm1[0],xmm4[1]
2576 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm5[2,3,2,3]
2577 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2578 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
2579 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm2[2,2,3,3]
2580 ; SSE-NEXT: pshufd $80, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
2581 ; SSE-NEXT: # xmm3 = mem[0,0,1,1]
2582 ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
2583 ; SSE-NEXT: movsd {{.*#+}} xmm3 = xmm0[0],xmm3[1]
2584 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
2585 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm5[2,3,2,3]
2586 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
2587 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
2588 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm11[2,2,3,3]
2589 ; SSE-NEXT: pshufd $80, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
2590 ; SSE-NEXT: # xmm2 = mem[0,0,1,1]
2591 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
2592 ; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm1[0],xmm2[1]
2593 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm12[1,1,1,1]
2594 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
2595 ; SSE-NEXT: # xmm1 = mem[3,3,3,3]
2596 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
2597 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
2598 ; SSE-NEXT: # xmm1 = mem[2,3,2,3]
2599 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
2600 ; SSE-NEXT: punpckldq {{.*#+}} xmm15 = xmm15[0],xmm1[0],xmm15[1],xmm1[1]
2601 ; SSE-NEXT: movsd {{.*#+}} xmm15 = xmm0[0],xmm15[1]
2602 ; SSE-NEXT: movapd %xmm15, %xmm7
2603 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm13[1,1,1,1]
2604 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
2605 ; SSE-NEXT: # xmm1 = mem[3,3,3,3]
2606 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
2607 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
2608 ; SSE-NEXT: # xmm1 = mem[2,3,2,3]
2609 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
2610 ; SSE-NEXT: punpckldq {{.*#+}} xmm15 = xmm15[0],xmm1[0],xmm15[1],xmm1[1]
2611 ; SSE-NEXT: movsd {{.*#+}} xmm15 = xmm0[0],xmm15[1]
2612 ; SSE-NEXT: movapd %xmm15, %xmm11
2613 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
2614 ; SSE-NEXT: # xmm0 = mem[1,1,1,1]
2615 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
2616 ; SSE-NEXT: # xmm1 = mem[3,3,3,3]
2617 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
2618 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
2619 ; SSE-NEXT: # xmm1 = mem[2,3,2,3]
2620 ; SSE-NEXT: punpckldq {{.*#+}} xmm14 = xmm14[0],xmm1[0],xmm14[1],xmm1[1]
2621 ; SSE-NEXT: movsd {{.*#+}} xmm14 = xmm0[0],xmm14[1]
2622 ; SSE-NEXT: movapd %xmm14, %xmm12
2623 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
2624 ; SSE-NEXT: # xmm0 = mem[1,1,1,1]
2625 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
2626 ; SSE-NEXT: # xmm1 = mem[3,3,3,3]
2627 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
2628 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
2629 ; SSE-NEXT: # xmm1 = mem[2,3,2,3]
2630 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
2631 ; SSE-NEXT: punpckldq {{.*#+}} xmm13 = xmm13[0],xmm1[0],xmm13[1],xmm1[1]
2632 ; SSE-NEXT: movsd {{.*#+}} xmm13 = xmm0[0],xmm13[1]
2633 ; SSE-NEXT: movapd %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2634 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm10[1,1,1,1]
2635 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
2636 ; SSE-NEXT: # xmm1 = mem[3,3,3,3]
2637 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
2638 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
2639 ; SSE-NEXT: # xmm1 = mem[2,3,2,3]
2640 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
2641 ; SSE-NEXT: punpckldq {{.*#+}} xmm15 = xmm15[0],xmm1[0],xmm15[1],xmm1[1]
2642 ; SSE-NEXT: movsd {{.*#+}} xmm15 = xmm0[0],xmm15[1]
2643 ; SSE-NEXT: movapd %xmm15, %xmm10
2644 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
2645 ; SSE-NEXT: # xmm0 = mem[1,1,1,1]
2646 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
2647 ; SSE-NEXT: # xmm1 = mem[3,3,3,3]
2648 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
2649 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
2650 ; SSE-NEXT: # xmm1 = mem[2,3,2,3]
2651 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
2652 ; SSE-NEXT: punpckldq {{.*#+}} xmm15 = xmm15[0],xmm1[0],xmm15[1],xmm1[1]
2653 ; SSE-NEXT: movsd {{.*#+}} xmm15 = xmm0[0],xmm15[1]
2654 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
2655 ; SSE-NEXT: # xmm0 = mem[1,1,1,1]
2656 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
2657 ; SSE-NEXT: # xmm1 = mem[3,3,3,3]
2658 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
2659 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
2660 ; SSE-NEXT: # xmm1 = mem[2,3,2,3]
2661 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
2662 ; SSE-NEXT: punpckldq {{.*#+}} xmm13 = xmm13[0],xmm1[0],xmm13[1],xmm1[1]
2663 ; SSE-NEXT: movsd {{.*#+}} xmm13 = xmm0[0],xmm13[1]
2664 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
2665 ; SSE-NEXT: # xmm0 = mem[1,1,1,1]
2666 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm5[3,3,3,3]
2667 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
2668 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
2669 ; SSE-NEXT: # xmm1 = mem[2,3,2,3]
2670 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
2671 ; SSE-NEXT: punpckldq {{.*#+}} xmm14 = xmm14[0],xmm1[0],xmm14[1],xmm1[1]
2672 ; SSE-NEXT: movsd {{.*#+}} xmm14 = xmm0[0],xmm14[1]
2673 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2674 ; SSE-NEXT: movaps %xmm0, 96(%rsi)
2675 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2676 ; SSE-NEXT: movaps %xmm0, 32(%rsi)
2677 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2678 ; SSE-NEXT: movaps %xmm0, 112(%rsi)
2679 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2680 ; SSE-NEXT: movaps %xmm0, 48(%rsi)
2681 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2682 ; SSE-NEXT: movaps %xmm0, 64(%rsi)
2683 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2684 ; SSE-NEXT: movaps %xmm0, (%rsi)
2685 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2686 ; SSE-NEXT: movaps %xmm0, 80(%rsi)
2687 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2688 ; SSE-NEXT: movaps %xmm0, 16(%rsi)
2689 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2690 ; SSE-NEXT: movaps %xmm0, 96(%rdx)
2691 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2692 ; SSE-NEXT: movaps %xmm0, 32(%rdx)
2693 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2694 ; SSE-NEXT: movaps %xmm0, 112(%rdx)
2695 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2696 ; SSE-NEXT: movaps %xmm0, 48(%rdx)
2697 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2698 ; SSE-NEXT: movaps %xmm0, 64(%rdx)
2699 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2700 ; SSE-NEXT: movaps %xmm0, (%rdx)
2701 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2702 ; SSE-NEXT: movaps %xmm0, 80(%rdx)
2703 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2704 ; SSE-NEXT: movaps %xmm0, 16(%rdx)
2705 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2706 ; SSE-NEXT: movaps %xmm0, 96(%rcx)
2707 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2708 ; SSE-NEXT: movaps %xmm0, 112(%rcx)
2709 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2710 ; SSE-NEXT: movaps %xmm0, 64(%rcx)
2711 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2712 ; SSE-NEXT: movaps %xmm0, 80(%rcx)
2713 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2714 ; SSE-NEXT: movaps %xmm0, 32(%rcx)
2715 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2716 ; SSE-NEXT: movaps %xmm0, 48(%rcx)
2717 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2718 ; SSE-NEXT: movaps %xmm0, (%rcx)
2719 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2720 ; SSE-NEXT: movaps %xmm0, 16(%rcx)
2721 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2722 ; SSE-NEXT: movaps %xmm0, 112(%r8)
2723 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2724 ; SSE-NEXT: movaps %xmm0, 96(%r8)
2725 ; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
2726 ; SSE-NEXT: movaps %xmm0, 80(%r8)
2727 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2728 ; SSE-NEXT: movaps %xmm0, 64(%r8)
2729 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2730 ; SSE-NEXT: movaps %xmm0, 48(%r8)
2731 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2732 ; SSE-NEXT: movaps %xmm0, 32(%r8)
2733 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2734 ; SSE-NEXT: movaps %xmm0, 16(%r8)
2735 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2736 ; SSE-NEXT: movaps %xmm0, (%r8)
2737 ; SSE-NEXT: movapd %xmm2, 112(%r9)
2738 ; SSE-NEXT: movapd %xmm3, 96(%r9)
2739 ; SSE-NEXT: movapd %xmm4, 80(%r9)
2740 ; SSE-NEXT: movapd %xmm6, 64(%r9)
2741 ; SSE-NEXT: movapd %xmm8, 48(%r9)
2742 ; SSE-NEXT: movapd %xmm9, 32(%r9)
2743 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2744 ; SSE-NEXT: movaps %xmm0, 16(%r9)
2745 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2746 ; SSE-NEXT: movaps %xmm0, (%r9)
2747 ; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax
2748 ; SSE-NEXT: movapd %xmm14, 112(%rax)
2749 ; SSE-NEXT: movapd %xmm13, 96(%rax)
2750 ; SSE-NEXT: movapd %xmm15, 80(%rax)
2751 ; SSE-NEXT: movapd %xmm10, 64(%rax)
2752 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2753 ; SSE-NEXT: movaps %xmm0, 48(%rax)
2754 ; SSE-NEXT: movapd %xmm12, 32(%rax)
2755 ; SSE-NEXT: movapd %xmm11, 16(%rax)
2756 ; SSE-NEXT: movapd %xmm7, (%rax)
2757 ; SSE-NEXT: addq $1032, %rsp # imm = 0x408
2760 ; AVX1-ONLY-LABEL: load_i32_stride6_vf32:
2761 ; AVX1-ONLY: # %bb.0:
2762 ; AVX1-ONLY-NEXT: subq $1032, %rsp # imm = 0x408
2763 ; AVX1-ONLY-NEXT: vmovaps 416(%rdi), %ymm9
2764 ; AVX1-ONLY-NEXT: vmovups %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2765 ; AVX1-ONLY-NEXT: vmovaps 480(%rdi), %ymm4
2766 ; AVX1-ONLY-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2767 ; AVX1-ONLY-NEXT: vmovaps 448(%rdi), %ymm5
2768 ; AVX1-ONLY-NEXT: vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2769 ; AVX1-ONLY-NEXT: vmovapd 160(%rdi), %ymm2
2770 ; AVX1-ONLY-NEXT: vmovupd %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2771 ; AVX1-ONLY-NEXT: vmovapd 128(%rdi), %ymm3
2772 ; AVX1-ONLY-NEXT: vmovupd %ymm3, (%rsp) # 32-byte Spill
2773 ; AVX1-ONLY-NEXT: vmovaps 32(%rdi), %ymm6
2774 ; AVX1-ONLY-NEXT: vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2775 ; AVX1-ONLY-NEXT: vmovaps (%rdi), %ymm7
2776 ; AVX1-ONLY-NEXT: vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2777 ; AVX1-ONLY-NEXT: vmovaps 96(%rdi), %ymm1
2778 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2779 ; AVX1-ONLY-NEXT: vmovaps 64(%rdi), %ymm0
2780 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2781 ; AVX1-ONLY-NEXT: vinsertf128 $1, 96(%rdi), %ymm0, %ymm8
2782 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm1[2,0],ymm8[0,0],ymm1[6,4],ymm8[4,4]
2783 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0],ymm8[2,2],ymm0[6,4],ymm8[6,6]
2784 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm12 = ymm7[0,1,2,3],ymm6[4,5],ymm7[6,7]
2785 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm12, %xmm7
2786 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm12[0,1],xmm7[2,3]
2787 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm7[0,3]
2788 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
2789 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm3[2,3],ymm2[0,1]
2790 ; AVX1-ONLY-NEXT: vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2791 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm1 = ymm1[0],ymm3[0],ymm1[3],ymm3[2]
2792 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,1,2,0,4,5,6,4]
2793 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
2794 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2795 ; AVX1-ONLY-NEXT: vinsertf128 $1, 480(%rdi), %ymm5, %ymm6
2796 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm4[2,0],ymm6[0,0],ymm4[6,4],ymm6[4,4]
2797 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0],ymm6[2,2],ymm0[6,4],ymm6[6,6]
2798 ; AVX1-ONLY-NEXT: vmovaps 384(%rdi), %ymm1
2799 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2800 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm13 = ymm1[0,1,2,3],ymm9[4,5],ymm1[6,7]
2801 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm13, %xmm5
2802 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm13[0,1],xmm5[2,3]
2803 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm5[0,3]
2804 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
2805 ; AVX1-ONLY-NEXT: vmovapd 544(%rdi), %ymm1
2806 ; AVX1-ONLY-NEXT: vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2807 ; AVX1-ONLY-NEXT: vmovapd 512(%rdi), %ymm2
2808 ; AVX1-ONLY-NEXT: vmovupd %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2809 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm2[2,3],ymm1[0,1]
2810 ; AVX1-ONLY-NEXT: vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2811 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[3],ymm2[2]
2812 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,1,2,0,4,5,6,4]
2813 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
2814 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2815 ; AVX1-ONLY-NEXT: vmovaps 288(%rdi), %ymm1
2816 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2817 ; AVX1-ONLY-NEXT: vmovaps 256(%rdi), %ymm0
2818 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2819 ; AVX1-ONLY-NEXT: vinsertf128 $1, 288(%rdi), %ymm0, %ymm3
2820 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm1[2,0],ymm3[0,0],ymm1[6,4],ymm3[4,4]
2821 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0],ymm3[2,2],ymm0[6,4],ymm3[6,6]
2822 ; AVX1-ONLY-NEXT: vmovaps 224(%rdi), %ymm1
2823 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2824 ; AVX1-ONLY-NEXT: vmovaps 192(%rdi), %ymm2
2825 ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2826 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm11 = ymm2[0,1,2,3],ymm1[4,5],ymm2[6,7]
2827 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm11, %xmm2
2828 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm11[0,1],xmm2[2,3]
2829 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,3]
2830 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
2831 ; AVX1-ONLY-NEXT: vmovapd 352(%rdi), %ymm1
2832 ; AVX1-ONLY-NEXT: vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2833 ; AVX1-ONLY-NEXT: vmovapd 320(%rdi), %ymm4
2834 ; AVX1-ONLY-NEXT: vmovupd %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2835 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm4[2,3],ymm1[0,1]
2836 ; AVX1-ONLY-NEXT: vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2837 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm1 = ymm1[0],ymm4[0],ymm1[3],ymm4[2]
2838 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,1,2,0,4,5,6,4]
2839 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
2840 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2841 ; AVX1-ONLY-NEXT: vmovaps 672(%rdi), %ymm1
2842 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2843 ; AVX1-ONLY-NEXT: vmovaps 640(%rdi), %ymm0
2844 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2845 ; AVX1-ONLY-NEXT: vinsertf128 $1, 672(%rdi), %ymm0, %ymm9
2846 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm1[2,0],ymm9[0,0],ymm1[6,4],ymm9[4,4]
2847 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm10 = ymm0[2,0],ymm9[2,2],ymm0[6,4],ymm9[6,6]
2848 ; AVX1-ONLY-NEXT: vmovaps 608(%rdi), %ymm0
2849 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2850 ; AVX1-ONLY-NEXT: vmovaps 576(%rdi), %ymm1
2851 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2852 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm4 = ymm1[0,1,2,3],ymm0[4,5],ymm1[6,7]
2853 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm4, %xmm1
2854 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm15 = xmm4[0,1],xmm1[2,3]
2855 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm15 = xmm15[0,2],xmm1[0,3]
2856 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm15 = ymm15[0,1,2],ymm10[3,4,5,6,7]
2857 ; AVX1-ONLY-NEXT: vmovapd 736(%rdi), %ymm10
2858 ; AVX1-ONLY-NEXT: vmovupd %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2859 ; AVX1-ONLY-NEXT: vmovapd 704(%rdi), %ymm0
2860 ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2861 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm10 = ymm0[2,3],ymm10[0,1]
2862 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm14 = ymm10[0],ymm0[0],ymm10[3],ymm0[2]
2863 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm14 = ymm14[0,1,2,0,4,5,6,4]
2864 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm14 = ymm15[0,1,2,3,4,5],ymm14[6,7]
2865 ; AVX1-ONLY-NEXT: vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2866 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
2867 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm14 = ymm15[3,0],ymm8[1,0],ymm15[7,4],ymm8[5,4]
2868 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm8 = ymm14[2,0],ymm8[2,3],ymm14[6,4],ymm8[6,7]
2869 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm12 = xmm12[1,0],xmm7[3,0]
2870 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm7 = xmm12[0,2],xmm7[1,3]
2871 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2],ymm8[3,4,5,6,7]
2872 ; AVX1-ONLY-NEXT: vmovups (%rsp), %ymm12 # 32-byte Reload
2873 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2874 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm8 = ymm0[3,1],ymm12[1,3],ymm0[7,5],ymm12[5,7]
2875 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm8 = ymm8[0,1,2,0,4,5,6,4]
2876 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3,4,5],ymm8[6,7]
2877 ; AVX1-ONLY-NEXT: vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2878 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
2879 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm7 = ymm8[3,0],ymm6[1,0],ymm8[7,4],ymm6[5,4]
2880 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm6 = ymm7[2,0],ymm6[2,3],ymm7[6,4],ymm6[6,7]
2881 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm7 = xmm13[1,0],xmm5[3,0]
2882 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm5 = xmm7[0,2],xmm5[1,3]
2883 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2],ymm6[3,4,5,6,7]
2884 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
2885 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2886 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm6 = ymm0[3,1],ymm14[1,3],ymm0[7,5],ymm14[5,7]
2887 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm6 = ymm6[0,1,2,0,4,5,6,4]
2888 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3,4,5],ymm6[6,7]
2889 ; AVX1-ONLY-NEXT: vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2890 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2891 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm5 = ymm0[3,0],ymm3[1,0],ymm0[7,4],ymm3[5,4]
2892 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm3 = ymm5[2,0],ymm3[2,3],ymm5[6,4],ymm3[6,7]
2893 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm5 = xmm11[1,0],xmm2[3,0]
2894 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm2 = xmm5[0,2],xmm2[1,3]
2895 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2],ymm3[3,4,5,6,7]
2896 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2897 ; AVX1-ONLY-NEXT: vshufps $215, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm3 # 32-byte Folded Reload
2898 ; AVX1-ONLY-NEXT: # ymm3 = ymm0[3,1],mem[1,3],ymm0[7,5],mem[5,7]
2899 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm3 = ymm3[0,1,2,0,4,5,6,4]
2900 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5],ymm3[6,7]
2901 ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2902 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
2903 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm2 = ymm11[3,0],ymm9[1,0],ymm11[7,4],ymm9[5,4]
2904 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm3 = ymm2[2,0],ymm9[2,3],ymm2[6,4],ymm9[6,7]
2905 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm2 = xmm4[1,0],xmm1[3,0]
2906 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm2[0,2],xmm1[1,3]
2907 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm3[3,4,5,6,7]
2908 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
2909 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm10[3,1],ymm13[1,3],ymm10[7,5],ymm13[5,7]
2910 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,1,2,0,4,5,6,4]
2911 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
2912 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2913 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2914 ; AVX1-ONLY-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm4 # 32-byte Folded Reload
2915 ; AVX1-ONLY-NEXT: # ymm4 = ymm0[0,1],mem[2,3],ymm0[4,5,6,7]
2916 ; AVX1-ONLY-NEXT: vshufps $38, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm1 # 32-byte Folded Reload
2917 ; AVX1-ONLY-NEXT: # ymm1 = ymm15[2,1],mem[2,0],ymm15[6,5],mem[6,4]
2918 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm1[2,3,0,1]
2919 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm4, %xmm0
2920 ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2921 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm3 = xmm4[2,0],xmm0[2,3]
2922 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1,2],ymm2[3,4,5,6,7]
2923 ; AVX1-ONLY-NEXT: vblendps $48, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm3 # 32-byte Folded Reload
2924 ; AVX1-ONLY-NEXT: # ymm3 = ymm12[0,1,2,3],mem[4,5],ymm12[6,7]
2925 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm3[2,3,0,1]
2926 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2927 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm5 = ymm0[0,0],ymm3[2,0],ymm0[4,4],ymm3[6,4]
2928 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3,4],ymm5[5,6,7]
2929 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2930 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2931 ; AVX1-ONLY-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm6 # 32-byte Folded Reload
2932 ; AVX1-ONLY-NEXT: # ymm6 = ymm0[0,1],mem[2,3],ymm0[4,5,6,7]
2933 ; AVX1-ONLY-NEXT: vshufps $38, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm5 # 32-byte Folded Reload
2934 ; AVX1-ONLY-NEXT: # ymm5 = ymm8[2,1],mem[2,0],ymm8[6,5],mem[6,4]
2935 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm5 = ymm5[2,3,0,1]
2936 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm6, %xmm0
2937 ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2938 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm7 = xmm6[2,0],xmm0[2,3]
2939 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm5 = ymm7[0,1,2],ymm5[3,4,5,6,7]
2940 ; AVX1-ONLY-NEXT: vblendps $48, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm2 # 32-byte Folded Reload
2941 ; AVX1-ONLY-NEXT: # ymm2 = ymm14[0,1,2,3],mem[4,5],ymm14[6,7]
2942 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm2[2,3,0,1]
2943 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2944 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm7 = ymm0[0,0],ymm2[2,0],ymm0[4,4],ymm2[6,4]
2945 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm5[0,1,2,3,4],ymm7[5,6,7]
2946 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2947 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2948 ; AVX1-ONLY-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm9 # 32-byte Folded Reload
2949 ; AVX1-ONLY-NEXT: # ymm9 = ymm0[0,1],mem[2,3],ymm0[4,5,6,7]
2950 ; AVX1-ONLY-NEXT: vshufps $38, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm7 # 32-byte Folded Reload
2951 ; AVX1-ONLY-NEXT: # ymm7 = ymm11[2,1],mem[2,0],ymm11[6,5],mem[6,4]
2952 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm10 = ymm7[2,3,0,1]
2953 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm9, %xmm0
2954 ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2955 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm11 = xmm9[2,0],xmm0[2,3]
2956 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm12 = ymm11[0,1,2],ymm10[3,4,5,6,7]
2957 ; AVX1-ONLY-NEXT: vblendps $48, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm10 # 32-byte Folded Reload
2958 ; AVX1-ONLY-NEXT: # ymm10 = ymm13[0,1,2,3],mem[4,5],ymm13[6,7]
2959 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm7 = ymm10[2,3,0,1]
2960 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm13 = ymm7[0,0],ymm10[2,0],ymm7[4,4],ymm10[6,4]
2961 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm12[0,1,2,3,4],ymm13[5,6,7]
2962 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2963 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2964 ; AVX1-ONLY-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm12 # 32-byte Folded Reload
2965 ; AVX1-ONLY-NEXT: # ymm12 = ymm0[0,1],mem[2,3],ymm0[4,5,6,7]
2966 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
2967 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
2968 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm13 = ymm5[2,1],ymm8[2,0],ymm5[6,5],ymm8[6,4]
2969 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm13 = ymm13[2,3,0,1]
2970 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm12, %xmm14
2971 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm15 = xmm12[2,0],xmm14[2,3]
2972 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm13 = ymm15[0,1,2],ymm13[3,4,5,6,7]
2973 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2974 ; AVX1-ONLY-NEXT: vblendps $48, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm15 # 32-byte Folded Reload
2975 ; AVX1-ONLY-NEXT: # ymm15 = ymm0[0,1,2,3],mem[4,5],ymm0[6,7]
2976 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm15[2,3,0,1]
2977 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm0[0,0],ymm15[2,0],ymm0[4,4],ymm15[6,4]
2978 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm13[0,1,2,3,4],ymm1[5,6,7]
2979 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2980 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
2981 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,1],ymm3[3,1],ymm1[4,5],ymm3[7,5]
2982 ; AVX1-ONLY-NEXT: vshufps $247, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm3 # 16-byte Folded Reload
2983 ; AVX1-ONLY-NEXT: # xmm3 = xmm4[3,1],mem[3,3]
2984 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
2985 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
2986 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm4 = ymm11[3,1],ymm13[2,1],ymm11[7,5],ymm13[6,5]
2987 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm4[2,3,0,1]
2988 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2],ymm4[3,4,5,6,7]
2989 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm3[0,1,2,3,4],ymm1[5,6,7]
2990 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2991 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
2992 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,1],ymm2[3,1],ymm1[4,5],ymm2[7,5]
2993 ; AVX1-ONLY-NEXT: vshufps $247, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6, %xmm2 # 16-byte Folded Reload
2994 ; AVX1-ONLY-NEXT: # xmm2 = xmm6[3,1],mem[3,3]
2995 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
2996 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
2997 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm3 = ymm4[3,1],ymm6[2,1],ymm4[7,5],ymm6[6,5]
2998 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm3 = ymm3[2,3,0,1]
2999 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2],ymm3[3,4,5,6,7]
3000 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3,4],ymm1[5,6,7]
3001 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3002 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,1],ymm15[3,1],ymm0[4,5],ymm15[7,5]
3003 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm12[3,1],xmm14[3,3]
3004 ; AVX1-ONLY-NEXT: vmovaps %ymm5, %ymm3
3005 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm2 = ymm5[3,1],ymm8[2,1],ymm5[7,5],ymm8[6,5]
3006 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm2[2,3,0,1]
3007 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4,5,6,7]
3008 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5,6,7]
3009 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3010 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm7[0,1],ymm10[3,1],ymm7[4,5],ymm10[7,5]
3011 ; AVX1-ONLY-NEXT: vshufps $247, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9, %xmm1 # 16-byte Folded Reload
3012 ; AVX1-ONLY-NEXT: # xmm1 = xmm9[3,1],mem[3,3]
3013 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
3014 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
3015 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm2 = ymm2[3,1],ymm12[2,1],ymm2[7,5],ymm12[6,5]
3016 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm2[2,3,0,1]
3017 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4,5,6,7]
3018 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5,6,7]
3019 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3020 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3021 ; AVX1-ONLY-NEXT: vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm2 # 32-byte Folded Reload
3022 ; AVX1-ONLY-NEXT: # ymm2 = mem[0,1],ymm0[2,3],mem[4,5,6,7]
3023 ; AVX1-ONLY-NEXT: vmovaps 416(%rdi), %xmm0
3024 ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3025 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,2,3,3]
3026 ; AVX1-ONLY-NEXT: vmovaps 400(%rdi), %xmm1
3027 ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3028 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3]
3029 ; AVX1-ONLY-NEXT: vmovapd 464(%rdi), %xmm1
3030 ; AVX1-ONLY-NEXT: vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3031 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm1 = ymm1[1],ymm6[0],ymm1[2],ymm6[2]
3032 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm4[0,1],ymm1[2,0],ymm4[4,5],ymm1[6,4]
3033 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5,6,7]
3034 ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3035 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm7 = ymm2[2,3,0,1]
3036 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm2[2,0],ymm7[0,0],ymm2[6,4],ymm7[4,4]
3037 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm7[0,2],ymm1[2,0],ymm7[4,6],ymm1[6,4]
3038 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
3039 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3040 ; AVX1-ONLY-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
3041 ; AVX1-ONLY-NEXT: vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm6 # 32-byte Folded Reload
3042 ; AVX1-ONLY-NEXT: # ymm6 = mem[0,1],ymm0[2,3],mem[4,5,6,7]
3043 ; AVX1-ONLY-NEXT: vmovaps 32(%rdi), %xmm0
3044 ; AVX1-ONLY-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
3045 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,2,3,3]
3046 ; AVX1-ONLY-NEXT: vmovaps 16(%rdi), %xmm15
3047 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm0 = xmm15[0],xmm0[1],xmm15[2,3]
3048 ; AVX1-ONLY-NEXT: vmovapd 80(%rdi), %xmm10
3049 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm1 = ymm10[1],ymm13[0],ymm10[2],ymm13[2]
3050 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm11[0,1],ymm1[2,0],ymm11[4,5],ymm1[6,4]
3051 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5,6,7]
3052 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm6[2,3,0,1]
3053 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm2 = ymm6[2,0],ymm1[0,0],ymm6[6,4],ymm1[4,4]
3054 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm2 = ymm1[0,2],ymm2[2,0],ymm1[4,6],ymm2[6,4]
3055 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm2[5,6,7]
3056 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3057 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3058 ; AVX1-ONLY-NEXT: vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm5 # 32-byte Folded Reload
3059 ; AVX1-ONLY-NEXT: # ymm5 = mem[0,1],ymm0[2,3],mem[4,5,6,7]
3060 ; AVX1-ONLY-NEXT: vmovaps 224(%rdi), %xmm0
3061 ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3062 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,2,3,3]
3063 ; AVX1-ONLY-NEXT: vmovaps 208(%rdi), %xmm13
3064 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm0 = xmm13[0],xmm0[1],xmm13[2,3]
3065 ; AVX1-ONLY-NEXT: vmovapd 272(%rdi), %xmm2
3066 ; AVX1-ONLY-NEXT: vmovupd %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3067 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm2 = ymm2[1],ymm8[0],ymm2[2],ymm8[2]
3068 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,0],ymm3[4,5],ymm2[6,4]
3069 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm2[2,3,4,5,6,7]
3070 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm5[2,3,0,1]
3071 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm3 = ymm5[2,0],ymm2[0,0],ymm5[6,4],ymm2[4,4]
3072 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm3 = ymm2[0,2],ymm3[2,0],ymm2[4,6],ymm3[6,4]
3073 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm3[5,6,7]
3074 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3075 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3076 ; AVX1-ONLY-NEXT: vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm3 # 32-byte Folded Reload
3077 ; AVX1-ONLY-NEXT: # ymm3 = mem[0,1],ymm0[2,3],mem[4,5,6,7]
3078 ; AVX1-ONLY-NEXT: vmovaps 608(%rdi), %xmm11
3079 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm11[2,2,3,3]
3080 ; AVX1-ONLY-NEXT: vmovaps 592(%rdi), %xmm8
3081 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm0 = xmm8[0],xmm0[1],xmm8[2,3]
3082 ; AVX1-ONLY-NEXT: vmovapd 656(%rdi), %xmm9
3083 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm14 = ymm9[1],ymm12[0],ymm9[2],ymm12[2]
3084 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
3085 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm14 = ymm4[0,1],ymm14[2,0],ymm4[4,5],ymm14[6,4]
3086 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm14 = ymm0[0,1],ymm14[2,3,4,5,6,7]
3087 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm3[2,3,0,1]
3088 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm12 = ymm3[2,0],ymm0[0,0],ymm3[6,4],ymm0[4,4]
3089 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm12 = ymm0[0,2],ymm12[2,0],ymm0[4,6],ymm12[6,4]
3090 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm14 = ymm14[0,1,2,3,4],ymm12[5,6,7]
3091 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm6 = ymm6[3,0],ymm1[1,0],ymm6[7,4],ymm1[5,4]
3092 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,3],ymm6[2,0],ymm1[4,7],ymm6[6,4]
3093 ; AVX1-ONLY-NEXT: vblendps $12, (%rsp), %xmm15, %xmm6 # 16-byte Folded Reload
3094 ; AVX1-ONLY-NEXT: # xmm6 = xmm15[0,1],mem[2,3]
3095 ; AVX1-ONLY-NEXT: vshufps $215, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm10 # 32-byte Folded Reload
3096 ; AVX1-ONLY-NEXT: # ymm10 = ymm10[3,1],mem[1,3],ymm10[7,5],mem[5,7]
3097 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
3098 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm10 = ymm12[1,1],ymm10[2,0],ymm12[5,5],ymm10[6,4]
3099 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm6 = xmm6[1,3,2,3]
3100 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1],ymm10[2,3,4,5,6,7]
3101 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3,4],ymm1[5,6,7]
3102 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
3103 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[3,0],ymm7[1,0],ymm1[7,4],ymm7[5,4]
3104 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm7[0,3],ymm1[2,0],ymm7[4,7],ymm1[6,4]
3105 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
3106 ; AVX1-ONLY-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7, %xmm7 # 16-byte Folded Reload
3107 ; AVX1-ONLY-NEXT: # xmm7 = xmm7[0,1],mem[2,3]
3108 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
3109 ; AVX1-ONLY-NEXT: vshufps $215, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm10 # 32-byte Folded Reload
3110 ; AVX1-ONLY-NEXT: # ymm10 = ymm10[3,1],mem[1,3],ymm10[7,5],mem[5,7]
3111 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
3112 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm10 = ymm12[1,1],ymm10[2,0],ymm12[5,5],ymm10[6,4]
3113 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm7 = xmm7[1,3,2,3]
3114 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1],ymm10[2,3,4,5,6,7]
3115 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3,4],ymm1[5,6,7]
3116 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm5[3,0],ymm2[1,0],ymm5[7,4],ymm2[5,4]
3117 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm2[0,3],ymm1[2,0],ymm2[4,7],ymm1[6,4]
3118 ; AVX1-ONLY-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm13, %xmm2 # 16-byte Folded Reload
3119 ; AVX1-ONLY-NEXT: # xmm2 = xmm13[0,1],mem[2,3]
3120 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
3121 ; AVX1-ONLY-NEXT: vshufps $215, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm5 # 32-byte Folded Reload
3122 ; AVX1-ONLY-NEXT: # ymm5 = ymm5[3,1],mem[1,3],ymm5[7,5],mem[5,7]
3123 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
3124 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm5 = ymm10[1,1],ymm5[2,0],ymm10[5,5],ymm5[6,4]
3125 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm2 = xmm2[1,3,2,3]
3126 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm5[2,3,4,5,6,7]
3127 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3,4],ymm1[5,6,7]
3128 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm2 = ymm3[3,0],ymm0[1,0],ymm3[7,4],ymm0[5,4]
3129 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,3],ymm2[2,0],ymm0[4,7],ymm2[6,4]
3130 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm2 = xmm8[0,1],xmm11[2,3]
3131 ; AVX1-ONLY-NEXT: vshufps $215, {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm3 # 32-byte Folded Reload
3132 ; AVX1-ONLY-NEXT: # ymm3 = ymm9[3,1],mem[1,3],ymm9[7,5],mem[5,7]
3133 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm3 = ymm4[1,1],ymm3[2,0],ymm4[5,5],ymm3[6,4]
3134 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm2 = xmm2[1,3,2,3]
3135 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm3[2,3,4,5,6,7]
3136 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3,4],ymm0[5,6,7]
3137 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
3138 ; AVX1-ONLY-NEXT: vmovaps %ymm2, 96(%rsi)
3139 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
3140 ; AVX1-ONLY-NEXT: vmovaps %ymm2, 32(%rsi)
3141 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
3142 ; AVX1-ONLY-NEXT: vmovaps %ymm2, 64(%rsi)
3143 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
3144 ; AVX1-ONLY-NEXT: vmovaps %ymm2, (%rsi)
3145 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
3146 ; AVX1-ONLY-NEXT: vmovaps %ymm2, 96(%rdx)
3147 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
3148 ; AVX1-ONLY-NEXT: vmovaps %ymm2, 32(%rdx)
3149 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
3150 ; AVX1-ONLY-NEXT: vmovaps %ymm2, 64(%rdx)
3151 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
3152 ; AVX1-ONLY-NEXT: vmovaps %ymm2, (%rdx)
3153 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
3154 ; AVX1-ONLY-NEXT: vmovaps %ymm2, 32(%rcx)
3155 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
3156 ; AVX1-ONLY-NEXT: vmovaps %ymm2, 96(%rcx)
3157 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
3158 ; AVX1-ONLY-NEXT: vmovaps %ymm2, 64(%rcx)
3159 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
3160 ; AVX1-ONLY-NEXT: vmovaps %ymm2, (%rcx)
3161 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
3162 ; AVX1-ONLY-NEXT: vmovaps %ymm2, 96(%r8)
3163 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
3164 ; AVX1-ONLY-NEXT: vmovaps %ymm2, 32(%r8)
3165 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
3166 ; AVX1-ONLY-NEXT: vmovaps %ymm2, 64(%r8)
3167 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
3168 ; AVX1-ONLY-NEXT: vmovaps %ymm2, (%r8)
3169 ; AVX1-ONLY-NEXT: vmovaps %ymm14, 96(%r9)
3170 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
3171 ; AVX1-ONLY-NEXT: vmovaps %ymm2, 32(%r9)
3172 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
3173 ; AVX1-ONLY-NEXT: vmovaps %ymm2, (%r9)
3174 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
3175 ; AVX1-ONLY-NEXT: vmovaps %ymm2, 64(%r9)
3176 ; AVX1-ONLY-NEXT: movq {{[0-9]+}}(%rsp), %rax
3177 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 96(%rax)
3178 ; AVX1-ONLY-NEXT: vmovaps %ymm1, 32(%rax)
3179 ; AVX1-ONLY-NEXT: vmovaps %ymm7, 64(%rax)
3180 ; AVX1-ONLY-NEXT: vmovaps %ymm6, (%rax)
3181 ; AVX1-ONLY-NEXT: addq $1032, %rsp # imm = 0x408
3182 ; AVX1-ONLY-NEXT: vzeroupper
3183 ; AVX1-ONLY-NEXT: retq
3185 ; AVX2-SLOW-LABEL: load_i32_stride6_vf32:
3186 ; AVX2-SLOW: # %bb.0:
3187 ; AVX2-SLOW-NEXT: subq $1160, %rsp # imm = 0x488
3188 ; AVX2-SLOW-NEXT: vmovaps 480(%rdi), %ymm6
3189 ; AVX2-SLOW-NEXT: vmovaps 448(%rdi), %ymm10
3190 ; AVX2-SLOW-NEXT: vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3191 ; AVX2-SLOW-NEXT: vmovaps 416(%rdi), %ymm11
3192 ; AVX2-SLOW-NEXT: vmovups %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3193 ; AVX2-SLOW-NEXT: vmovaps 128(%rdi), %ymm2
3194 ; AVX2-SLOW-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3195 ; AVX2-SLOW-NEXT: vmovaps 160(%rdi), %ymm3
3196 ; AVX2-SLOW-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3197 ; AVX2-SLOW-NEXT: vmovaps 96(%rdi), %ymm4
3198 ; AVX2-SLOW-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3199 ; AVX2-SLOW-NEXT: vmovaps (%rdi), %ymm0
3200 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3201 ; AVX2-SLOW-NEXT: vmovaps 32(%rdi), %ymm1
3202 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3203 ; AVX2-SLOW-NEXT: vmovaps 64(%rdi), %ymm5
3204 ; AVX2-SLOW-NEXT: vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3205 ; AVX2-SLOW-NEXT: vmovaps {{.*#+}} xmm12 = <0,6,4,u>
3206 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm9 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
3207 ; AVX2-SLOW-NEXT: vpermps %ymm9, %ymm12, %ymm0
3208 ; AVX2-SLOW-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm5[0,1],ymm4[0,1]
3209 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm7 = ymm1[0,1,2,3,4,5],ymm4[6,7]
3210 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm1 = ymm7[0,2,2,2,4,6,6,6]
3211 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm0[0,1,2],ymm1[3,4,5,6,7]
3212 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm8 = ymm3[0,1,2,3],ymm2[4,5,6,7]
3213 ; AVX2-SLOW-NEXT: vbroadcastsd {{.*#+}} ymm4 = [4,2,4,2,4,2,4,2]
3214 ; AVX2-SLOW-NEXT: vpermps %ymm8, %ymm4, %ymm2
3215 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm2[6,7]
3216 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3217 ; AVX2-SLOW-NEXT: vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3218 ; AVX2-SLOW-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm10[0,1],ymm6[0,1]
3219 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm6 = ymm1[0,1,2,3,4,5],ymm6[6,7]
3220 ; AVX2-SLOW-NEXT: vmovaps 384(%rdi), %ymm0
3221 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3222 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm5 = ymm0[0,1,2,3],ymm11[4,5],ymm0[6,7]
3223 ; AVX2-SLOW-NEXT: vpermps %ymm5, %ymm12, %ymm1
3224 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm2 = ymm6[0,2,2,2,4,6,6,6]
3225 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4,5,6,7]
3226 ; AVX2-SLOW-NEXT: vmovaps 512(%rdi), %ymm0
3227 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3228 ; AVX2-SLOW-NEXT: vmovaps 544(%rdi), %ymm2
3229 ; AVX2-SLOW-NEXT: vmovups %ymm2, (%rsp) # 32-byte Spill
3230 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm11 = ymm2[0,1,2,3],ymm0[4,5,6,7]
3231 ; AVX2-SLOW-NEXT: vpermps %ymm11, %ymm4, %ymm2
3232 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm2[6,7]
3233 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3234 ; AVX2-SLOW-NEXT: vmovaps 288(%rdi), %ymm0
3235 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3236 ; AVX2-SLOW-NEXT: vmovaps 256(%rdi), %ymm1
3237 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3238 ; AVX2-SLOW-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[0,1],ymm0[0,1]
3239 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm3 = ymm1[0,1,2,3,4,5],ymm0[6,7]
3240 ; AVX2-SLOW-NEXT: vmovaps 224(%rdi), %ymm0
3241 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3242 ; AVX2-SLOW-NEXT: vmovaps 192(%rdi), %ymm1
3243 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3244 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm1[0,1,2,3],ymm0[4,5],ymm1[6,7]
3245 ; AVX2-SLOW-NEXT: vpermps %ymm2, %ymm12, %ymm10
3246 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm15 = ymm3[0,2,2,2,4,6,6,6]
3247 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm15 = ymm10[0,1,2],ymm15[3,4,5,6,7]
3248 ; AVX2-SLOW-NEXT: vmovaps 320(%rdi), %ymm0
3249 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3250 ; AVX2-SLOW-NEXT: vmovaps 352(%rdi), %ymm1
3251 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3252 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm10 = ymm1[0,1,2,3],ymm0[4,5,6,7]
3253 ; AVX2-SLOW-NEXT: vpermps %ymm10, %ymm4, %ymm14
3254 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm15[0,1,2,3,4,5],ymm14[6,7]
3255 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3256 ; AVX2-SLOW-NEXT: vmovaps 608(%rdi), %ymm0
3257 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3258 ; AVX2-SLOW-NEXT: vmovaps 576(%rdi), %ymm1
3259 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3260 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm14 = ymm1[0,1,2,3],ymm0[4,5],ymm1[6,7]
3261 ; AVX2-SLOW-NEXT: vpermps %ymm14, %ymm12, %ymm15
3262 ; AVX2-SLOW-NEXT: vmovaps 672(%rdi), %ymm0
3263 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3264 ; AVX2-SLOW-NEXT: vmovaps 640(%rdi), %ymm1
3265 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3266 ; AVX2-SLOW-NEXT: vperm2f128 {{.*#+}} ymm12 = ymm1[0,1],ymm0[0,1]
3267 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm12 = ymm12[0,1,2,3,4,5],ymm0[6,7]
3268 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm13 = ymm12[0,2,2,2,4,6,6,6]
3269 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm13 = ymm15[0,1,2],ymm13[3,4,5,6,7]
3270 ; AVX2-SLOW-NEXT: vmovaps 704(%rdi), %ymm0
3271 ; AVX2-SLOW-NEXT: vmovaps 736(%rdi), %ymm1
3272 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3273 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm0[4,5,6,7]
3274 ; AVX2-SLOW-NEXT: vmovaps %ymm0, %ymm15
3275 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3276 ; AVX2-SLOW-NEXT: vpermps %ymm1, %ymm4, %ymm0
3277 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm13[0,1,2,3,4,5],ymm0[6,7]
3278 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3279 ; AVX2-SLOW-NEXT: vmovaps {{.*#+}} xmm0 = <1,7,5,u>
3280 ; AVX2-SLOW-NEXT: vpermps %ymm9, %ymm0, %ymm9
3281 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm7 = ymm7[1,3,2,3,5,7,6,7]
3282 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm7 = ymm9[0,1,2],ymm7[3,4,5,6,7]
3283 ; AVX2-SLOW-NEXT: vbroadcastsd {{.*#+}} ymm9 = [5,3,5,3,5,3,5,3]
3284 ; AVX2-SLOW-NEXT: vpermps %ymm8, %ymm9, %ymm8
3285 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3,4,5],ymm8[6,7]
3286 ; AVX2-SLOW-NEXT: vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3287 ; AVX2-SLOW-NEXT: vpermps %ymm5, %ymm0, %ymm5
3288 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm6 = ymm6[1,3,2,3,5,7,6,7]
3289 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2],ymm6[3,4,5,6,7]
3290 ; AVX2-SLOW-NEXT: vpermps %ymm11, %ymm9, %ymm4
3291 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0,1,2,3,4,5],ymm4[6,7]
3292 ; AVX2-SLOW-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3293 ; AVX2-SLOW-NEXT: vpermps %ymm2, %ymm0, %ymm2
3294 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm3 = ymm3[1,3,2,3,5,7,6,7]
3295 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2],ymm3[3,4,5,6,7]
3296 ; AVX2-SLOW-NEXT: vpermps %ymm10, %ymm9, %ymm3
3297 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5],ymm3[6,7]
3298 ; AVX2-SLOW-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3299 ; AVX2-SLOW-NEXT: vpermps %ymm14, %ymm0, %ymm0
3300 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm2 = ymm12[1,3,2,3,5,7,6,7]
3301 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm2[3,4,5,6,7]
3302 ; AVX2-SLOW-NEXT: vpermps %ymm1, %ymm9, %ymm1
3303 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
3304 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3305 ; AVX2-SLOW-NEXT: vmovaps (%rdi), %xmm1
3306 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm1[2,3,2,3]
3307 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
3308 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm2 = ymm10[0,0,2,3,4,4,6,7]
3309 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm2[1,2,3],ymm0[4],ymm2[5,6,7]
3310 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
3311 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
3312 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm9[0,1],ymm12[2,3],ymm9[4,5],ymm12[6,7]
3313 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm2 = ymm2[2,0,2,3,6,4,6,7]
3314 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,2,0,3]
3315 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
3316 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm2[3,4,5,6,7]
3317 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
3318 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm2 = ymm7[0,0,0,0,4,4,4,4]
3319 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
3320 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm3 = ymm8[0,0,2,3,4,4,6,7]
3321 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1,2],ymm2[3],ymm3[4,5,6],ymm2[7]
3322 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,1,0,3]
3323 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm2[5,6,7]
3324 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3325 ; AVX2-SLOW-NEXT: vmovaps 384(%rdi), %xmm0
3326 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm2 = xmm0[2,3,2,3]
3327 ; AVX2-SLOW-NEXT: vpermilps $224, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Folded Reload
3328 ; AVX2-SLOW-NEXT: # ymm3 = mem[0,0,2,3,4,4,6,7]
3329 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0],ymm3[1,2,3],ymm2[4],ymm3[5,6,7]
3330 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
3331 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
3332 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm3 = ymm13[0,1],ymm14[2,3],ymm13[4,5],ymm14[6,7]
3333 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm3 = ymm3[2,0,2,3,6,4,6,7]
3334 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[0,2,0,3]
3335 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,3,2,3]
3336 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2],ymm3[3,4,5,6,7]
3337 ; AVX2-SLOW-NEXT: vpermilps $0, (%rsp), %ymm3 # 32-byte Folded Reload
3338 ; AVX2-SLOW-NEXT: # ymm3 = mem[0,0,0,0,4,4,4,4]
3339 ; AVX2-SLOW-NEXT: vpermilps $224, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Folded Reload
3340 ; AVX2-SLOW-NEXT: # ymm4 = mem[0,0,2,3,4,4,6,7]
3341 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0,1,2],ymm3[3],ymm4[4,5,6],ymm3[7]
3342 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[0,1,0,3]
3343 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm3[5,6,7]
3344 ; AVX2-SLOW-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3345 ; AVX2-SLOW-NEXT: vmovaps 576(%rdi), %xmm2
3346 ; AVX2-SLOW-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3347 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm3 = xmm2[2,3,2,3]
3348 ; AVX2-SLOW-NEXT: vpermilps $224, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Folded Reload
3349 ; AVX2-SLOW-NEXT: # ymm4 = mem[0,0,2,3,4,4,6,7]
3350 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0],ymm4[1,2,3],ymm3[4],ymm4[5,6,7]
3351 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
3352 ; AVX2-SLOW-NEXT: vblendps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm4 # 32-byte Folded Reload
3353 ; AVX2-SLOW-NEXT: # ymm4 = mem[0,1],ymm2[2,3],mem[4,5],ymm2[6,7]
3354 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm4 = ymm4[2,0,2,3,6,4,6,7]
3355 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,2,0,3]
3356 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[0,3,2,3]
3357 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2],ymm4[3,4,5,6,7]
3358 ; AVX2-SLOW-NEXT: vpermilps $0, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Folded Reload
3359 ; AVX2-SLOW-NEXT: # ymm4 = mem[0,0,0,0,4,4,4,4]
3360 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm5 = ymm15[0,0,2,3,4,4,6,7]
3361 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0,1,2],ymm4[3],ymm5[4,5,6],ymm4[7]
3362 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,1,0,3]
3363 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1,2,3,4],ymm4[5,6,7]
3364 ; AVX2-SLOW-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3365 ; AVX2-SLOW-NEXT: vmovaps 192(%rdi), %xmm15
3366 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm4 = xmm15[2,3,2,3]
3367 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
3368 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm5 = ymm2[0,0,2,3,4,4,6,7]
3369 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0],ymm5[1,2,3],ymm4[4],ymm5[5,6,7]
3370 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
3371 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
3372 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm5 = ymm3[0,1],ymm11[2,3],ymm3[4,5],ymm11[6,7]
3373 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm5 = ymm5[2,0,2,3,6,4,6,7]
3374 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,2,0,3]
3375 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,3,2,3]
3376 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2],ymm5[3,4,5,6,7]
3377 ; AVX2-SLOW-NEXT: vpermilps $0, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload
3378 ; AVX2-SLOW-NEXT: # ymm5 = mem[0,0,0,0,4,4,4,4]
3379 ; AVX2-SLOW-NEXT: vpermilps $224, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Folded Reload
3380 ; AVX2-SLOW-NEXT: # ymm6 = mem[0,0,2,3,4,4,6,7]
3381 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm5 = ymm6[0,1,2],ymm5[3],ymm6[4,5,6],ymm5[7]
3382 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,1,0,3]
3383 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3,4],ymm5[5,6,7]
3384 ; AVX2-SLOW-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3385 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm1[3,3,3,3]
3386 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm4 = ymm10[0,1,3,3,4,5,7,7]
3387 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm4[1,2,3],ymm1[4],ymm4[5,6,7]
3388 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm4 = ymm12[3,3,3,3,7,7,7,7]
3389 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0],ymm9[1],ymm4[2,3,4],ymm9[5],ymm4[6,7]
3390 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,2,0,3]
3391 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,3,2,3]
3392 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2],ymm4[3,4,5,6,7]
3393 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm4 = ymm7[0,1,0,1,4,5,4,5]
3394 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm5 = ymm8[0,1,3,3,4,5,7,7]
3395 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0,1,2],ymm4[3],ymm5[4,5,6],ymm4[7]
3396 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,1,0,3]
3397 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm4[5,6,7]
3398 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3399 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
3400 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
3401 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm1 = ymm8[0,1,3,3,4,5,7,7]
3402 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
3403 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm1 = ymm14[3,3,3,3,7,7,7,7]
3404 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm13[1],ymm1[2,3,4],ymm13[5],ymm1[6,7]
3405 ; AVX2-SLOW-NEXT: vmovaps %ymm13, %ymm14
3406 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,2,0,3]
3407 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
3408 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
3409 ; AVX2-SLOW-NEXT: vmovups (%rsp), %ymm13 # 32-byte Reload
3410 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm1 = ymm13[0,1,0,1,4,5,4,5]
3411 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
3412 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm4 = ymm10[0,1,3,3,4,5,7,7]
3413 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm4[0,1,2],ymm1[3],ymm4[4,5,6],ymm1[7]
3414 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
3415 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
3416 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3417 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm15[3,3,3,3]
3418 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm1 = ymm2[0,1,3,3,4,5,7,7]
3419 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
3420 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm1 = ymm11[3,3,3,3,7,7,7,7]
3421 ; AVX2-SLOW-NEXT: vmovaps %ymm3, %ymm7
3422 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm3[1],ymm1[2,3,4],ymm3[5],ymm1[6,7]
3423 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,2,0,3]
3424 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
3425 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
3426 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
3427 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm1 = ymm9[0,1,0,1,4,5,4,5]
3428 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
3429 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm3 = ymm12[0,1,3,3,4,5,7,7]
3430 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm3[0,1,2],ymm1[3],ymm3[4,5,6],ymm1[7]
3431 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
3432 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
3433 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3434 ; AVX2-SLOW-NEXT: vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
3435 ; AVX2-SLOW-NEXT: # xmm0 = mem[3,3,3,3]
3436 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
3437 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm1 = ymm3[0,1,3,3,4,5,7,7]
3438 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
3439 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
3440 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm1 = ymm15[3,3,3,3,7,7,7,7]
3441 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
3442 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm4[1],ymm1[2,3,4],ymm4[5],ymm1[6,7]
3443 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,2,0,3]
3444 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
3445 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
3446 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
3447 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm1 = ymm6[0,1,0,1,4,5,4,5]
3448 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
3449 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm2 = ymm5[0,1,3,3,4,5,7,7]
3450 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3],ymm2[4,5,6],ymm1[7]
3451 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
3452 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
3453 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3454 ; AVX2-SLOW-NEXT: vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm2 # 32-byte Folded Reload
3455 ; AVX2-SLOW-NEXT: # ymm2 = ymm8[0,1,2,3],mem[4,5,6,7]
3456 ; AVX2-SLOW-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3457 ; AVX2-SLOW-NEXT: vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm1 # 32-byte Folded Reload
3458 ; AVX2-SLOW-NEXT: # ymm1 = ymm14[0,1,2,3],mem[4,5,6,7]
3459 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3460 ; AVX2-SLOW-NEXT: vmovaps 464(%rdi), %xmm0
3461 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3462 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5,6,7]
3463 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,1,0,2,4,5,4,6]
3464 ; AVX2-SLOW-NEXT: vmovddup {{.*#+}} xmm11 = [4,2,4,2]
3465 ; AVX2-SLOW-NEXT: # xmm11 = mem[0,0]
3466 ; AVX2-SLOW-NEXT: vpermps %ymm2, %ymm11, %ymm1
3467 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm0[2,3,4,5,6,7]
3468 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm14 = ymm13[0,1],ymm10[2,3],ymm13[4,5,6,7]
3469 ; AVX2-SLOW-NEXT: vbroadcastf128 {{.*#+}} ymm0 = [0,2,0,6,0,2,0,6]
3470 ; AVX2-SLOW-NEXT: # ymm0 = mem[0,1,0,1]
3471 ; AVX2-SLOW-NEXT: vpermps %ymm14, %ymm0, %ymm2
3472 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5,6,7]
3473 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3474 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
3475 ; AVX2-SLOW-NEXT: vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm8 # 32-byte Folded Reload
3476 ; AVX2-SLOW-NEXT: # ymm8 = ymm1[0,1,2,3],mem[4,5,6,7]
3477 ; AVX2-SLOW-NEXT: vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3478 ; AVX2-SLOW-NEXT: vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm2 # 32-byte Folded Reload
3479 ; AVX2-SLOW-NEXT: # ymm2 = ymm7[0,1,2,3],mem[4,5,6,7]
3480 ; AVX2-SLOW-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3481 ; AVX2-SLOW-NEXT: vmovaps 272(%rdi), %xmm1
3482 ; AVX2-SLOW-NEXT: vmovups %ymm1, (%rsp) # 32-byte Spill
3483 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1],ymm1[2,3],ymm2[4,5,6,7]
3484 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,1,0,2,4,5,4,6]
3485 ; AVX2-SLOW-NEXT: vpermps %ymm8, %ymm11, %ymm2
3486 ; AVX2-SLOW-NEXT: vmovaps %ymm11, %ymm13
3487 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1],ymm1[2,3,4,5,6,7]
3488 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm7 = ymm9[0,1],ymm12[2,3],ymm9[4,5,6,7]
3489 ; AVX2-SLOW-NEXT: vpermps %ymm7, %ymm0, %ymm2
3490 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5,6,7]
3491 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3492 ; AVX2-SLOW-NEXT: vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm11 # 32-byte Folded Reload
3493 ; AVX2-SLOW-NEXT: # ymm11 = ymm3[0,1,2,3],mem[4,5,6,7]
3494 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm8 = ymm4[0,1,2,3],ymm15[4,5,6,7]
3495 ; AVX2-SLOW-NEXT: vmovaps 656(%rdi), %xmm12
3496 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm8[0,1],ymm12[2,3],ymm8[4,5,6,7]
3497 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,1,0,2,4,5,4,6]
3498 ; AVX2-SLOW-NEXT: vpermps %ymm11, %ymm13, %ymm2
3499 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1],ymm1[2,3,4,5,6,7]
3500 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm15 = ymm6[0,1],ymm5[2,3],ymm6[4,5,6,7]
3501 ; AVX2-SLOW-NEXT: vpermps %ymm15, %ymm0, %ymm2
3502 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5,6,7]
3503 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3504 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
3505 ; AVX2-SLOW-NEXT: vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm9 # 32-byte Folded Reload
3506 ; AVX2-SLOW-NEXT: # ymm9 = ymm1[0,1,2,3],mem[4,5,6,7]
3507 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
3508 ; AVX2-SLOW-NEXT: vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
3509 ; AVX2-SLOW-NEXT: # ymm2 = ymm1[0,1,2,3],mem[4,5,6,7]
3510 ; AVX2-SLOW-NEXT: vmovaps 80(%rdi), %xmm1
3511 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm10 = ymm9[0,1],ymm1[2,3],ymm9[4,5,6,7]
3512 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm10 = ymm10[0,1,0,2,4,5,4,6]
3513 ; AVX2-SLOW-NEXT: vpermps %ymm2, %ymm13, %ymm4
3514 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm10[2,3,4,5,6,7]
3515 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
3516 ; AVX2-SLOW-NEXT: vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm10 # 32-byte Folded Reload
3517 ; AVX2-SLOW-NEXT: # ymm10 = mem[0,1],ymm3[2,3],mem[4,5,6,7]
3518 ; AVX2-SLOW-NEXT: vpermps %ymm10, %ymm0, %ymm0
3519 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm4[0,1,2,3,4],ymm0[5,6,7]
3520 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3521 ; AVX2-SLOW-NEXT: vpermilps $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
3522 ; AVX2-SLOW-NEXT: # ymm0 = mem[1,1,1,1,5,5,5,5]
3523 ; AVX2-SLOW-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
3524 ; AVX2-SLOW-NEXT: # ymm0 = ymm0[0,1,2],mem[3],ymm0[4,5,6,7]
3525 ; AVX2-SLOW-NEXT: vmovddup {{.*#+}} xmm4 = [5,3,5,3]
3526 ; AVX2-SLOW-NEXT: # xmm4 = mem[0,0]
3527 ; AVX2-SLOW-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm13 # 32-byte Folded Reload
3528 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm13[0,1],ymm0[2,3,4,5,6,7]
3529 ; AVX2-SLOW-NEXT: vbroadcastf128 {{.*#+}} ymm13 = [0,3,1,7,0,3,1,7]
3530 ; AVX2-SLOW-NEXT: # ymm13 = mem[0,1,0,1]
3531 ; AVX2-SLOW-NEXT: vpermps %ymm14, %ymm13, %ymm14
3532 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm14[5,6,7]
3533 ; AVX2-SLOW-NEXT: vpermilps $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Folded Reload
3534 ; AVX2-SLOW-NEXT: # ymm3 = mem[1,1,1,1,5,5,5,5]
3535 ; AVX2-SLOW-NEXT: vblendps $8, (%rsp), %ymm3, %ymm3 # 32-byte Folded Reload
3536 ; AVX2-SLOW-NEXT: # ymm3 = ymm3[0,1,2],mem[3],ymm3[4,5,6,7]
3537 ; AVX2-SLOW-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm5 # 32-byte Folded Reload
3538 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm3 = ymm5[0,1],ymm3[2,3,4,5,6,7]
3539 ; AVX2-SLOW-NEXT: vpermps %ymm7, %ymm13, %ymm5
3540 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3,4],ymm5[5,6,7]
3541 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm5 = ymm8[1,1,1,1,5,5,5,5]
3542 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2],ymm12[3],ymm5[4,5,6,7]
3543 ; AVX2-SLOW-NEXT: vpermps %ymm11, %ymm4, %ymm6
3544 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm5 = ymm6[0,1],ymm5[2,3,4,5,6,7]
3545 ; AVX2-SLOW-NEXT: vpermps %ymm15, %ymm13, %ymm6
3546 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3,4],ymm6[5,6,7]
3547 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm6 = ymm9[1,1,1,1,5,5,5,5]
3548 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm6[0,1,2],ymm1[3],ymm6[4,5,6,7]
3549 ; AVX2-SLOW-NEXT: vpermps %ymm2, %ymm4, %ymm2
3550 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1],ymm1[2,3,4,5,6,7]
3551 ; AVX2-SLOW-NEXT: vpermps %ymm10, %ymm13, %ymm2
3552 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5,6,7]
3553 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
3554 ; AVX2-SLOW-NEXT: vmovaps %ymm2, 96(%rsi)
3555 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
3556 ; AVX2-SLOW-NEXT: vmovaps %ymm2, 32(%rsi)
3557 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
3558 ; AVX2-SLOW-NEXT: vmovaps %ymm2, 64(%rsi)
3559 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
3560 ; AVX2-SLOW-NEXT: vmovaps %ymm2, (%rsi)
3561 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
3562 ; AVX2-SLOW-NEXT: vmovaps %ymm2, 96(%rdx)
3563 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
3564 ; AVX2-SLOW-NEXT: vmovaps %ymm2, 32(%rdx)
3565 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
3566 ; AVX2-SLOW-NEXT: vmovaps %ymm2, 64(%rdx)
3567 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
3568 ; AVX2-SLOW-NEXT: vmovaps %ymm2, (%rdx)
3569 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
3570 ; AVX2-SLOW-NEXT: vmovaps %ymm2, 32(%rcx)
3571 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
3572 ; AVX2-SLOW-NEXT: vmovaps %ymm2, 96(%rcx)
3573 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
3574 ; AVX2-SLOW-NEXT: vmovaps %ymm2, 64(%rcx)
3575 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
3576 ; AVX2-SLOW-NEXT: vmovaps %ymm2, (%rcx)
3577 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
3578 ; AVX2-SLOW-NEXT: vmovaps %ymm2, 96(%r8)
3579 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
3580 ; AVX2-SLOW-NEXT: vmovaps %ymm2, 32(%r8)
3581 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
3582 ; AVX2-SLOW-NEXT: vmovaps %ymm2, 64(%r8)
3583 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
3584 ; AVX2-SLOW-NEXT: vmovaps %ymm2, (%r8)
3585 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
3586 ; AVX2-SLOW-NEXT: vmovaps %ymm2, 96(%r9)
3587 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
3588 ; AVX2-SLOW-NEXT: vmovaps %ymm2, 32(%r9)
3589 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
3590 ; AVX2-SLOW-NEXT: vmovaps %ymm2, (%r9)
3591 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
3592 ; AVX2-SLOW-NEXT: vmovaps %ymm2, 64(%r9)
3593 ; AVX2-SLOW-NEXT: movq {{[0-9]+}}(%rsp), %rax
3594 ; AVX2-SLOW-NEXT: vmovaps %ymm5, 96(%rax)
3595 ; AVX2-SLOW-NEXT: vmovaps %ymm3, 32(%rax)
3596 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 64(%rax)
3597 ; AVX2-SLOW-NEXT: vmovaps %ymm1, (%rax)
3598 ; AVX2-SLOW-NEXT: addq $1160, %rsp # imm = 0x488
3599 ; AVX2-SLOW-NEXT: vzeroupper
3600 ; AVX2-SLOW-NEXT: retq
3602 ; AVX2-FAST-LABEL: load_i32_stride6_vf32:
3603 ; AVX2-FAST: # %bb.0:
3604 ; AVX2-FAST-NEXT: subq $1160, %rsp # imm = 0x488
3605 ; AVX2-FAST-NEXT: vmovaps 480(%rdi), %ymm6
3606 ; AVX2-FAST-NEXT: vmovaps 448(%rdi), %ymm10
3607 ; AVX2-FAST-NEXT: vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3608 ; AVX2-FAST-NEXT: vmovaps 416(%rdi), %ymm11
3609 ; AVX2-FAST-NEXT: vmovups %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3610 ; AVX2-FAST-NEXT: vmovaps 128(%rdi), %ymm2
3611 ; AVX2-FAST-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3612 ; AVX2-FAST-NEXT: vmovaps 160(%rdi), %ymm3
3613 ; AVX2-FAST-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3614 ; AVX2-FAST-NEXT: vmovaps 96(%rdi), %ymm4
3615 ; AVX2-FAST-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3616 ; AVX2-FAST-NEXT: vmovaps (%rdi), %ymm0
3617 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3618 ; AVX2-FAST-NEXT: vmovaps 32(%rdi), %ymm1
3619 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3620 ; AVX2-FAST-NEXT: vmovaps 64(%rdi), %ymm5
3621 ; AVX2-FAST-NEXT: vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3622 ; AVX2-FAST-NEXT: vmovaps {{.*#+}} xmm12 = <0,6,4,u>
3623 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm9 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
3624 ; AVX2-FAST-NEXT: vpermps %ymm9, %ymm12, %ymm0
3625 ; AVX2-FAST-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm5[0,1],ymm4[0,1]
3626 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm7 = ymm1[0,1,2,3,4,5],ymm4[6,7]
3627 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm1 = ymm7[0,2,2,2,4,6,6,6]
3628 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm0[0,1,2],ymm1[3,4,5,6,7]
3629 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm8 = ymm3[0,1,2,3],ymm2[4,5,6,7]
3630 ; AVX2-FAST-NEXT: vbroadcastsd {{.*#+}} ymm4 = [4,2,4,2,4,2,4,2]
3631 ; AVX2-FAST-NEXT: vpermps %ymm8, %ymm4, %ymm2
3632 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm2[6,7]
3633 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3634 ; AVX2-FAST-NEXT: vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3635 ; AVX2-FAST-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm10[0,1],ymm6[0,1]
3636 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm6 = ymm1[0,1,2,3,4,5],ymm6[6,7]
3637 ; AVX2-FAST-NEXT: vmovaps 384(%rdi), %ymm0
3638 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3639 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm5 = ymm0[0,1,2,3],ymm11[4,5],ymm0[6,7]
3640 ; AVX2-FAST-NEXT: vpermps %ymm5, %ymm12, %ymm1
3641 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm2 = ymm6[0,2,2,2,4,6,6,6]
3642 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4,5,6,7]
3643 ; AVX2-FAST-NEXT: vmovaps 512(%rdi), %ymm0
3644 ; AVX2-FAST-NEXT: vmovups %ymm0, (%rsp) # 32-byte Spill
3645 ; AVX2-FAST-NEXT: vmovaps 544(%rdi), %ymm2
3646 ; AVX2-FAST-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3647 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm11 = ymm2[0,1,2,3],ymm0[4,5,6,7]
3648 ; AVX2-FAST-NEXT: vpermps %ymm11, %ymm4, %ymm2
3649 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm2[6,7]
3650 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3651 ; AVX2-FAST-NEXT: vmovaps 288(%rdi), %ymm0
3652 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3653 ; AVX2-FAST-NEXT: vmovaps 256(%rdi), %ymm1
3654 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3655 ; AVX2-FAST-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[0,1],ymm0[0,1]
3656 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm3 = ymm1[0,1,2,3,4,5],ymm0[6,7]
3657 ; AVX2-FAST-NEXT: vmovaps 224(%rdi), %ymm0
3658 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3659 ; AVX2-FAST-NEXT: vmovaps 192(%rdi), %ymm1
3660 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3661 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm1[0,1,2,3],ymm0[4,5],ymm1[6,7]
3662 ; AVX2-FAST-NEXT: vpermps %ymm2, %ymm12, %ymm10
3663 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm15 = ymm3[0,2,2,2,4,6,6,6]
3664 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm15 = ymm10[0,1,2],ymm15[3,4,5,6,7]
3665 ; AVX2-FAST-NEXT: vmovaps 320(%rdi), %ymm1
3666 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3667 ; AVX2-FAST-NEXT: vmovaps 352(%rdi), %ymm0
3668 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3669 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm10 = ymm0[0,1,2,3],ymm1[4,5,6,7]
3670 ; AVX2-FAST-NEXT: vpermps %ymm10, %ymm4, %ymm14
3671 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm15[0,1,2,3,4,5],ymm14[6,7]
3672 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3673 ; AVX2-FAST-NEXT: vmovaps 608(%rdi), %ymm0
3674 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3675 ; AVX2-FAST-NEXT: vmovaps 576(%rdi), %ymm1
3676 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3677 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm14 = ymm1[0,1,2,3],ymm0[4,5],ymm1[6,7]
3678 ; AVX2-FAST-NEXT: vpermps %ymm14, %ymm12, %ymm15
3679 ; AVX2-FAST-NEXT: vmovaps 672(%rdi), %ymm0
3680 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3681 ; AVX2-FAST-NEXT: vmovaps 640(%rdi), %ymm1
3682 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3683 ; AVX2-FAST-NEXT: vperm2f128 {{.*#+}} ymm12 = ymm1[0,1],ymm0[0,1]
3684 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm12 = ymm12[0,1,2,3,4,5],ymm0[6,7]
3685 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm13 = ymm12[0,2,2,2,4,6,6,6]
3686 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm13 = ymm15[0,1,2],ymm13[3,4,5,6,7]
3687 ; AVX2-FAST-NEXT: vmovaps 704(%rdi), %ymm0
3688 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3689 ; AVX2-FAST-NEXT: vmovaps 736(%rdi), %ymm1
3690 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3691 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm0[4,5,6,7]
3692 ; AVX2-FAST-NEXT: vpermps %ymm1, %ymm4, %ymm0
3693 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm13[0,1,2,3,4,5],ymm0[6,7]
3694 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3695 ; AVX2-FAST-NEXT: vmovaps {{.*#+}} xmm0 = <1,7,5,u>
3696 ; AVX2-FAST-NEXT: vpermps %ymm9, %ymm0, %ymm9
3697 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm7 = ymm7[1,3,2,3,5,7,6,7]
3698 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm7 = ymm9[0,1,2],ymm7[3,4,5,6,7]
3699 ; AVX2-FAST-NEXT: vbroadcastsd {{.*#+}} ymm9 = [5,3,5,3,5,3,5,3]
3700 ; AVX2-FAST-NEXT: vpermps %ymm8, %ymm9, %ymm8
3701 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3,4,5],ymm8[6,7]
3702 ; AVX2-FAST-NEXT: vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3703 ; AVX2-FAST-NEXT: vpermps %ymm5, %ymm0, %ymm5
3704 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm6 = ymm6[1,3,2,3,5,7,6,7]
3705 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2],ymm6[3,4,5,6,7]
3706 ; AVX2-FAST-NEXT: vpermps %ymm11, %ymm9, %ymm4
3707 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0,1,2,3,4,5],ymm4[6,7]
3708 ; AVX2-FAST-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3709 ; AVX2-FAST-NEXT: vpermps %ymm2, %ymm0, %ymm2
3710 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm3 = ymm3[1,3,2,3,5,7,6,7]
3711 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2],ymm3[3,4,5,6,7]
3712 ; AVX2-FAST-NEXT: vpermps %ymm10, %ymm9, %ymm3
3713 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5],ymm3[6,7]
3714 ; AVX2-FAST-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3715 ; AVX2-FAST-NEXT: vpermps %ymm14, %ymm0, %ymm0
3716 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm2 = ymm12[1,3,2,3,5,7,6,7]
3717 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm2[3,4,5,6,7]
3718 ; AVX2-FAST-NEXT: vpermps %ymm1, %ymm9, %ymm1
3719 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
3720 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3721 ; AVX2-FAST-NEXT: vmovaps (%rdi), %xmm0
3722 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm1 = xmm0[2,3,2,3]
3723 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
3724 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm2 = ymm10[0,0,2,3,4,4,6,7]
3725 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2,3],ymm1[4],ymm2[5,6,7]
3726 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
3727 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
3728 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm9[0,1],ymm12[2,3],ymm9[4,5],ymm12[6,7]
3729 ; AVX2-FAST-NEXT: vmovaps {{.*#+}} ymm3 = [2,0,6,4,2,0,6,7]
3730 ; AVX2-FAST-NEXT: vpermps %ymm2, %ymm3, %ymm2
3731 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,3,2,3]
3732 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4,5,6,7]
3733 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
3734 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm2 = ymm7[0,0,0,0,4,4,4,4]
3735 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
3736 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm4 = ymm8[0,0,2,3,4,4,6,7]
3737 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm4[0,1,2],ymm2[3],ymm4[4,5,6],ymm2[7]
3738 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,1,0,3]
3739 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5,6,7]
3740 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3741 ; AVX2-FAST-NEXT: vmovaps 384(%rdi), %xmm1
3742 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm2 = xmm1[2,3,2,3]
3743 ; AVX2-FAST-NEXT: vpermilps $224, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Folded Reload
3744 ; AVX2-FAST-NEXT: # ymm4 = mem[0,0,2,3,4,4,6,7]
3745 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0],ymm4[1,2,3],ymm2[4],ymm4[5,6,7]
3746 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
3747 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
3748 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm4 = ymm13[0,1],ymm14[2,3],ymm13[4,5],ymm14[6,7]
3749 ; AVX2-FAST-NEXT: vpermps %ymm4, %ymm3, %ymm4
3750 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,3,2,3]
3751 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2],ymm4[3,4,5,6,7]
3752 ; AVX2-FAST-NEXT: vpermilps $0, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Folded Reload
3753 ; AVX2-FAST-NEXT: # ymm4 = mem[0,0,0,0,4,4,4,4]
3754 ; AVX2-FAST-NEXT: vpermilps $224, (%rsp), %ymm5 # 32-byte Folded Reload
3755 ; AVX2-FAST-NEXT: # ymm5 = mem[0,0,2,3,4,4,6,7]
3756 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0,1,2],ymm4[3],ymm5[4,5,6],ymm4[7]
3757 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,1,0,3]
3758 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm4[5,6,7]
3759 ; AVX2-FAST-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3760 ; AVX2-FAST-NEXT: vmovaps 576(%rdi), %xmm2
3761 ; AVX2-FAST-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3762 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm4 = xmm2[2,3,2,3]
3763 ; AVX2-FAST-NEXT: vpermilps $224, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload
3764 ; AVX2-FAST-NEXT: # ymm5 = mem[0,0,2,3,4,4,6,7]
3765 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0],ymm5[1,2,3],ymm4[4],ymm5[5,6,7]
3766 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
3767 ; AVX2-FAST-NEXT: vblendps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm5 # 32-byte Folded Reload
3768 ; AVX2-FAST-NEXT: # ymm5 = mem[0,1],ymm2[2,3],mem[4,5],ymm2[6,7]
3769 ; AVX2-FAST-NEXT: vpermps %ymm5, %ymm3, %ymm5
3770 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,3,2,3]
3771 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2],ymm5[3,4,5,6,7]
3772 ; AVX2-FAST-NEXT: vpermilps $0, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload
3773 ; AVX2-FAST-NEXT: # ymm5 = mem[0,0,0,0,4,4,4,4]
3774 ; AVX2-FAST-NEXT: vpermilps $224, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Folded Reload
3775 ; AVX2-FAST-NEXT: # ymm6 = mem[0,0,2,3,4,4,6,7]
3776 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm5 = ymm6[0,1,2],ymm5[3],ymm6[4,5,6],ymm5[7]
3777 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,1,0,3]
3778 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm4[0,1,2,3,4],ymm5[5,6,7]
3779 ; AVX2-FAST-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3780 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
3781 ; AVX2-FAST-NEXT: vblendps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm4 # 32-byte Folded Reload
3782 ; AVX2-FAST-NEXT: # ymm4 = mem[0,1],ymm11[2,3],mem[4,5],ymm11[6,7]
3783 ; AVX2-FAST-NEXT: vpermps %ymm4, %ymm3, %ymm4
3784 ; AVX2-FAST-NEXT: vmovaps 192(%rdi), %xmm3
3785 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm5 = xmm3[2,3,2,3]
3786 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
3787 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm6 = ymm2[0,0,2,3,4,4,6,7]
3788 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0],ymm6[1,2,3],ymm5[4],ymm6[5,6,7]
3789 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,3,2,3]
3790 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0,1,2],ymm4[3,4,5,6,7]
3791 ; AVX2-FAST-NEXT: vpermilps $0, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload
3792 ; AVX2-FAST-NEXT: # ymm5 = mem[0,0,0,0,4,4,4,4]
3793 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
3794 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm6 = ymm15[0,0,2,3,4,4,6,7]
3795 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm5 = ymm6[0,1,2],ymm5[3],ymm6[4,5,6],ymm5[7]
3796 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,1,0,3]
3797 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3,4],ymm5[5,6,7]
3798 ; AVX2-FAST-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3799 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
3800 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm4 = ymm10[0,1,3,3,4,5,7,7]
3801 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm4[1,2,3],ymm0[4],ymm4[5,6,7]
3802 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm4 = ymm12[3,3,3,3,7,7,7,7]
3803 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0],ymm9[1],ymm4[2,3,4],ymm9[5],ymm4[6,7]
3804 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,2,0,3]
3805 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
3806 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm4[3,4,5,6,7]
3807 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm4 = ymm7[0,1,0,1,4,5,4,5]
3808 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm5 = ymm8[0,1,3,3,4,5,7,7]
3809 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0,1,2],ymm4[3],ymm5[4,5,6],ymm4[7]
3810 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,1,0,3]
3811 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm4[5,6,7]
3812 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3813 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm0 = xmm1[3,3,3,3]
3814 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
3815 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm1 = ymm8[0,1,3,3,4,5,7,7]
3816 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
3817 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm1 = ymm14[3,3,3,3,7,7,7,7]
3818 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm13[1],ymm1[2,3,4],ymm13[5],ymm1[6,7]
3819 ; AVX2-FAST-NEXT: vmovaps %ymm13, %ymm14
3820 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,2,0,3]
3821 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
3822 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
3823 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
3824 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm1 = ymm13[0,1,0,1,4,5,4,5]
3825 ; AVX2-FAST-NEXT: vmovups (%rsp), %ymm10 # 32-byte Reload
3826 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm4 = ymm10[0,1,3,3,4,5,7,7]
3827 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm4[0,1,2],ymm1[3],ymm4[4,5,6],ymm1[7]
3828 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
3829 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
3830 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3831 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm0 = xmm3[3,3,3,3]
3832 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm1 = ymm2[0,1,3,3,4,5,7,7]
3833 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
3834 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm1 = ymm11[3,3,3,3,7,7,7,7]
3835 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
3836 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm7[1],ymm1[2,3,4],ymm7[5],ymm1[6,7]
3837 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,2,0,3]
3838 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
3839 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
3840 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
3841 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm1 = ymm9[0,1,0,1,4,5,4,5]
3842 ; AVX2-FAST-NEXT: vmovaps %ymm15, %ymm12
3843 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm3 = ymm15[0,1,3,3,4,5,7,7]
3844 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm3[0,1,2],ymm1[3],ymm3[4,5,6],ymm1[7]
3845 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
3846 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
3847 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3848 ; AVX2-FAST-NEXT: vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
3849 ; AVX2-FAST-NEXT: # xmm0 = mem[3,3,3,3]
3850 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
3851 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm1 = ymm3[0,1,3,3,4,5,7,7]
3852 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
3853 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
3854 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm1 = ymm15[3,3,3,3,7,7,7,7]
3855 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
3856 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm4[1],ymm1[2,3,4],ymm4[5],ymm1[6,7]
3857 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,2,0,3]
3858 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
3859 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
3860 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
3861 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm1 = ymm6[0,1,0,1,4,5,4,5]
3862 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
3863 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm2 = ymm5[0,1,3,3,4,5,7,7]
3864 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3],ymm2[4,5,6],ymm1[7]
3865 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
3866 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
3867 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3868 ; AVX2-FAST-NEXT: vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm2 # 32-byte Folded Reload
3869 ; AVX2-FAST-NEXT: # ymm2 = ymm8[0,1,2,3],mem[4,5,6,7]
3870 ; AVX2-FAST-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3871 ; AVX2-FAST-NEXT: vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm1 # 32-byte Folded Reload
3872 ; AVX2-FAST-NEXT: # ymm1 = ymm14[0,1,2,3],mem[4,5,6,7]
3873 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3874 ; AVX2-FAST-NEXT: vmovaps 464(%rdi), %xmm0
3875 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3876 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5,6,7]
3877 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,1,0,2,4,5,4,6]
3878 ; AVX2-FAST-NEXT: vmovddup {{.*#+}} xmm11 = [4,2,4,2]
3879 ; AVX2-FAST-NEXT: # xmm11 = mem[0,0]
3880 ; AVX2-FAST-NEXT: vpermps %ymm2, %ymm11, %ymm1
3881 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm0[2,3,4,5,6,7]
3882 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm14 = ymm13[0,1],ymm10[2,3],ymm13[4,5,6,7]
3883 ; AVX2-FAST-NEXT: vbroadcastf128 {{.*#+}} ymm0 = [0,2,0,6,0,2,0,6]
3884 ; AVX2-FAST-NEXT: # ymm0 = mem[0,1,0,1]
3885 ; AVX2-FAST-NEXT: vpermps %ymm14, %ymm0, %ymm2
3886 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5,6,7]
3887 ; AVX2-FAST-NEXT: vmovups %ymm1, (%rsp) # 32-byte Spill
3888 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
3889 ; AVX2-FAST-NEXT: vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm8 # 32-byte Folded Reload
3890 ; AVX2-FAST-NEXT: # ymm8 = ymm1[0,1,2,3],mem[4,5,6,7]
3891 ; AVX2-FAST-NEXT: vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3892 ; AVX2-FAST-NEXT: vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm2 # 32-byte Folded Reload
3893 ; AVX2-FAST-NEXT: # ymm2 = ymm7[0,1,2,3],mem[4,5,6,7]
3894 ; AVX2-FAST-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3895 ; AVX2-FAST-NEXT: vmovaps 272(%rdi), %xmm1
3896 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3897 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1],ymm1[2,3],ymm2[4,5,6,7]
3898 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,1,0,2,4,5,4,6]
3899 ; AVX2-FAST-NEXT: vpermps %ymm8, %ymm11, %ymm2
3900 ; AVX2-FAST-NEXT: vmovaps %ymm11, %ymm13
3901 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1],ymm1[2,3,4,5,6,7]
3902 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm7 = ymm9[0,1],ymm12[2,3],ymm9[4,5,6,7]
3903 ; AVX2-FAST-NEXT: vpermps %ymm7, %ymm0, %ymm2
3904 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5,6,7]
3905 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3906 ; AVX2-FAST-NEXT: vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm11 # 32-byte Folded Reload
3907 ; AVX2-FAST-NEXT: # ymm11 = ymm3[0,1,2,3],mem[4,5,6,7]
3908 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm8 = ymm4[0,1,2,3],ymm15[4,5,6,7]
3909 ; AVX2-FAST-NEXT: vmovaps 656(%rdi), %xmm12
3910 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm8[0,1],ymm12[2,3],ymm8[4,5,6,7]
3911 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,1,0,2,4,5,4,6]
3912 ; AVX2-FAST-NEXT: vpermps %ymm11, %ymm13, %ymm2
3913 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1],ymm1[2,3,4,5,6,7]
3914 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm15 = ymm6[0,1],ymm5[2,3],ymm6[4,5,6,7]
3915 ; AVX2-FAST-NEXT: vpermps %ymm15, %ymm0, %ymm2
3916 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5,6,7]
3917 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3918 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
3919 ; AVX2-FAST-NEXT: vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm9 # 32-byte Folded Reload
3920 ; AVX2-FAST-NEXT: # ymm9 = ymm1[0,1,2,3],mem[4,5,6,7]
3921 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
3922 ; AVX2-FAST-NEXT: vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
3923 ; AVX2-FAST-NEXT: # ymm2 = ymm1[0,1,2,3],mem[4,5,6,7]
3924 ; AVX2-FAST-NEXT: vmovaps 80(%rdi), %xmm1
3925 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm10 = ymm9[0,1],ymm1[2,3],ymm9[4,5,6,7]
3926 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm10 = ymm10[0,1,0,2,4,5,4,6]
3927 ; AVX2-FAST-NEXT: vpermps %ymm2, %ymm13, %ymm4
3928 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm10[2,3,4,5,6,7]
3929 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
3930 ; AVX2-FAST-NEXT: vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm10 # 32-byte Folded Reload
3931 ; AVX2-FAST-NEXT: # ymm10 = mem[0,1],ymm3[2,3],mem[4,5,6,7]
3932 ; AVX2-FAST-NEXT: vpermps %ymm10, %ymm0, %ymm0
3933 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm4[0,1,2,3,4],ymm0[5,6,7]
3934 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3935 ; AVX2-FAST-NEXT: vpermilps $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
3936 ; AVX2-FAST-NEXT: # ymm0 = mem[1,1,1,1,5,5,5,5]
3937 ; AVX2-FAST-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
3938 ; AVX2-FAST-NEXT: # ymm0 = ymm0[0,1,2],mem[3],ymm0[4,5,6,7]
3939 ; AVX2-FAST-NEXT: vmovddup {{.*#+}} xmm4 = [5,3,5,3]
3940 ; AVX2-FAST-NEXT: # xmm4 = mem[0,0]
3941 ; AVX2-FAST-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm13 # 32-byte Folded Reload
3942 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm13[0,1],ymm0[2,3,4,5,6,7]
3943 ; AVX2-FAST-NEXT: vbroadcastf128 {{.*#+}} ymm13 = [0,3,1,7,0,3,1,7]
3944 ; AVX2-FAST-NEXT: # ymm13 = mem[0,1,0,1]
3945 ; AVX2-FAST-NEXT: vpermps %ymm14, %ymm13, %ymm14
3946 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm14[5,6,7]
3947 ; AVX2-FAST-NEXT: vpermilps $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Folded Reload
3948 ; AVX2-FAST-NEXT: # ymm3 = mem[1,1,1,1,5,5,5,5]
3949 ; AVX2-FAST-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
3950 ; AVX2-FAST-NEXT: # ymm3 = ymm3[0,1,2],mem[3],ymm3[4,5,6,7]
3951 ; AVX2-FAST-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm5 # 32-byte Folded Reload
3952 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm3 = ymm5[0,1],ymm3[2,3,4,5,6,7]
3953 ; AVX2-FAST-NEXT: vpermps %ymm7, %ymm13, %ymm5
3954 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3,4],ymm5[5,6,7]
3955 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm5 = ymm8[1,1,1,1,5,5,5,5]
3956 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2],ymm12[3],ymm5[4,5,6,7]
3957 ; AVX2-FAST-NEXT: vpermps %ymm11, %ymm4, %ymm6
3958 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm5 = ymm6[0,1],ymm5[2,3,4,5,6,7]
3959 ; AVX2-FAST-NEXT: vpermps %ymm15, %ymm13, %ymm6
3960 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3,4],ymm6[5,6,7]
3961 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm6 = ymm9[1,1,1,1,5,5,5,5]
3962 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm6[0,1,2],ymm1[3],ymm6[4,5,6,7]
3963 ; AVX2-FAST-NEXT: vpermps %ymm2, %ymm4, %ymm2
3964 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1],ymm1[2,3,4,5,6,7]
3965 ; AVX2-FAST-NEXT: vpermps %ymm10, %ymm13, %ymm2
3966 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5,6,7]
3967 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
3968 ; AVX2-FAST-NEXT: vmovaps %ymm2, 96(%rsi)
3969 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
3970 ; AVX2-FAST-NEXT: vmovaps %ymm2, 32(%rsi)
3971 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
3972 ; AVX2-FAST-NEXT: vmovaps %ymm2, 64(%rsi)
3973 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
3974 ; AVX2-FAST-NEXT: vmovaps %ymm2, (%rsi)
3975 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
3976 ; AVX2-FAST-NEXT: vmovaps %ymm2, 96(%rdx)
3977 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
3978 ; AVX2-FAST-NEXT: vmovaps %ymm2, 32(%rdx)
3979 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
3980 ; AVX2-FAST-NEXT: vmovaps %ymm2, 64(%rdx)
3981 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
3982 ; AVX2-FAST-NEXT: vmovaps %ymm2, (%rdx)
3983 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
3984 ; AVX2-FAST-NEXT: vmovaps %ymm2, 32(%rcx)
3985 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
3986 ; AVX2-FAST-NEXT: vmovaps %ymm2, 96(%rcx)
3987 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
3988 ; AVX2-FAST-NEXT: vmovaps %ymm2, 64(%rcx)
3989 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
3990 ; AVX2-FAST-NEXT: vmovaps %ymm2, (%rcx)
3991 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
3992 ; AVX2-FAST-NEXT: vmovaps %ymm2, 96(%r8)
3993 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
3994 ; AVX2-FAST-NEXT: vmovaps %ymm2, 32(%r8)
3995 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
3996 ; AVX2-FAST-NEXT: vmovaps %ymm2, 64(%r8)
3997 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
3998 ; AVX2-FAST-NEXT: vmovaps %ymm2, (%r8)
3999 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
4000 ; AVX2-FAST-NEXT: vmovaps %ymm2, 96(%r9)
4001 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
4002 ; AVX2-FAST-NEXT: vmovaps %ymm2, 32(%r9)
4003 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
4004 ; AVX2-FAST-NEXT: vmovaps %ymm2, (%r9)
4005 ; AVX2-FAST-NEXT: vmovups (%rsp), %ymm2 # 32-byte Reload
4006 ; AVX2-FAST-NEXT: vmovaps %ymm2, 64(%r9)
4007 ; AVX2-FAST-NEXT: movq {{[0-9]+}}(%rsp), %rax
4008 ; AVX2-FAST-NEXT: vmovaps %ymm5, 96(%rax)
4009 ; AVX2-FAST-NEXT: vmovaps %ymm3, 32(%rax)
4010 ; AVX2-FAST-NEXT: vmovaps %ymm0, 64(%rax)
4011 ; AVX2-FAST-NEXT: vmovaps %ymm1, (%rax)
4012 ; AVX2-FAST-NEXT: addq $1160, %rsp # imm = 0x488
4013 ; AVX2-FAST-NEXT: vzeroupper
4014 ; AVX2-FAST-NEXT: retq
4016 ; AVX2-FAST-PERLANE-LABEL: load_i32_stride6_vf32:
4017 ; AVX2-FAST-PERLANE: # %bb.0:
4018 ; AVX2-FAST-PERLANE-NEXT: subq $1160, %rsp # imm = 0x488
4019 ; AVX2-FAST-PERLANE-NEXT: vmovaps 480(%rdi), %ymm6
4020 ; AVX2-FAST-PERLANE-NEXT: vmovaps 448(%rdi), %ymm10
4021 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4022 ; AVX2-FAST-PERLANE-NEXT: vmovaps 416(%rdi), %ymm11
4023 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4024 ; AVX2-FAST-PERLANE-NEXT: vmovaps 128(%rdi), %ymm2
4025 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4026 ; AVX2-FAST-PERLANE-NEXT: vmovaps 160(%rdi), %ymm3
4027 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4028 ; AVX2-FAST-PERLANE-NEXT: vmovaps 96(%rdi), %ymm4
4029 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4030 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rdi), %ymm0
4031 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4032 ; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%rdi), %ymm1
4033 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4034 ; AVX2-FAST-PERLANE-NEXT: vmovaps 64(%rdi), %ymm5
4035 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4036 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{.*#+}} xmm12 = <0,6,4,u>
4037 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm9 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
4038 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm9, %ymm12, %ymm0
4039 ; AVX2-FAST-PERLANE-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm5[0,1],ymm4[0,1]
4040 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm7 = ymm1[0,1,2,3,4,5],ymm4[6,7]
4041 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm1 = ymm7[0,2,2,2,4,6,6,6]
4042 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm0[0,1,2],ymm1[3,4,5,6,7]
4043 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm8 = ymm3[0,1,2,3],ymm2[4,5,6,7]
4044 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd {{.*#+}} ymm4 = [4,2,4,2,4,2,4,2]
4045 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm8, %ymm4, %ymm2
4046 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm2[6,7]
4047 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4048 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4049 ; AVX2-FAST-PERLANE-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm10[0,1],ymm6[0,1]
4050 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm6 = ymm1[0,1,2,3,4,5],ymm6[6,7]
4051 ; AVX2-FAST-PERLANE-NEXT: vmovaps 384(%rdi), %ymm0
4052 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4053 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm5 = ymm0[0,1,2,3],ymm11[4,5],ymm0[6,7]
4054 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm5, %ymm12, %ymm1
4055 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm2 = ymm6[0,2,2,2,4,6,6,6]
4056 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4,5,6,7]
4057 ; AVX2-FAST-PERLANE-NEXT: vmovaps 512(%rdi), %ymm0
4058 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4059 ; AVX2-FAST-PERLANE-NEXT: vmovaps 544(%rdi), %ymm2
4060 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm2, (%rsp) # 32-byte Spill
4061 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm11 = ymm2[0,1,2,3],ymm0[4,5,6,7]
4062 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm11, %ymm4, %ymm2
4063 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm2[6,7]
4064 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4065 ; AVX2-FAST-PERLANE-NEXT: vmovaps 288(%rdi), %ymm0
4066 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4067 ; AVX2-FAST-PERLANE-NEXT: vmovaps 256(%rdi), %ymm1
4068 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4069 ; AVX2-FAST-PERLANE-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[0,1],ymm0[0,1]
4070 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm3 = ymm1[0,1,2,3,4,5],ymm0[6,7]
4071 ; AVX2-FAST-PERLANE-NEXT: vmovaps 224(%rdi), %ymm0
4072 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4073 ; AVX2-FAST-PERLANE-NEXT: vmovaps 192(%rdi), %ymm1
4074 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4075 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm1[0,1,2,3],ymm0[4,5],ymm1[6,7]
4076 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm2, %ymm12, %ymm10
4077 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm15 = ymm3[0,2,2,2,4,6,6,6]
4078 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm15 = ymm10[0,1,2],ymm15[3,4,5,6,7]
4079 ; AVX2-FAST-PERLANE-NEXT: vmovaps 320(%rdi), %ymm0
4080 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4081 ; AVX2-FAST-PERLANE-NEXT: vmovaps 352(%rdi), %ymm1
4082 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4083 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm10 = ymm1[0,1,2,3],ymm0[4,5,6,7]
4084 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm10, %ymm4, %ymm14
4085 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm15[0,1,2,3,4,5],ymm14[6,7]
4086 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4087 ; AVX2-FAST-PERLANE-NEXT: vmovaps 608(%rdi), %ymm0
4088 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4089 ; AVX2-FAST-PERLANE-NEXT: vmovaps 576(%rdi), %ymm1
4090 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4091 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm14 = ymm1[0,1,2,3],ymm0[4,5],ymm1[6,7]
4092 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm14, %ymm12, %ymm15
4093 ; AVX2-FAST-PERLANE-NEXT: vmovaps 672(%rdi), %ymm0
4094 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4095 ; AVX2-FAST-PERLANE-NEXT: vmovaps 640(%rdi), %ymm1
4096 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4097 ; AVX2-FAST-PERLANE-NEXT: vperm2f128 {{.*#+}} ymm12 = ymm1[0,1],ymm0[0,1]
4098 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm12 = ymm12[0,1,2,3,4,5],ymm0[6,7]
4099 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm13 = ymm12[0,2,2,2,4,6,6,6]
4100 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm13 = ymm15[0,1,2],ymm13[3,4,5,6,7]
4101 ; AVX2-FAST-PERLANE-NEXT: vmovaps 704(%rdi), %ymm0
4102 ; AVX2-FAST-PERLANE-NEXT: vmovaps 736(%rdi), %ymm1
4103 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4104 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm0[4,5,6,7]
4105 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, %ymm15
4106 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4107 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm1, %ymm4, %ymm0
4108 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm13[0,1,2,3,4,5],ymm0[6,7]
4109 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4110 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{.*#+}} xmm0 = <1,7,5,u>
4111 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm9, %ymm0, %ymm9
4112 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm7 = ymm7[1,3,2,3,5,7,6,7]
4113 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm7 = ymm9[0,1,2],ymm7[3,4,5,6,7]
4114 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd {{.*#+}} ymm9 = [5,3,5,3,5,3,5,3]
4115 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm8, %ymm9, %ymm8
4116 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3,4,5],ymm8[6,7]
4117 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4118 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm5, %ymm0, %ymm5
4119 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm6 = ymm6[1,3,2,3,5,7,6,7]
4120 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2],ymm6[3,4,5,6,7]
4121 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm11, %ymm9, %ymm4
4122 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0,1,2,3,4,5],ymm4[6,7]
4123 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4124 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm2, %ymm0, %ymm2
4125 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm3 = ymm3[1,3,2,3,5,7,6,7]
4126 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2],ymm3[3,4,5,6,7]
4127 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm10, %ymm9, %ymm3
4128 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5],ymm3[6,7]
4129 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4130 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm14, %ymm0, %ymm0
4131 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm2 = ymm12[1,3,2,3,5,7,6,7]
4132 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm2[3,4,5,6,7]
4133 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm1, %ymm9, %ymm1
4134 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
4135 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4136 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rdi), %xmm1
4137 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm0 = xmm1[2,3,2,3]
4138 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
4139 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm2 = ymm10[0,0,2,3,4,4,6,7]
4140 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm2[1,2,3],ymm0[4],ymm2[5,6,7]
4141 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
4142 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
4143 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm9[0,1],ymm12[2,3],ymm9[4,5],ymm12[6,7]
4144 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm2 = ymm2[2,0,2,3,6,4,6,7]
4145 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,2,0,3]
4146 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
4147 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm2[3,4,5,6,7]
4148 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
4149 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm2 = ymm7[0,0,0,0,4,4,4,4]
4150 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
4151 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm3 = ymm8[0,0,2,3,4,4,6,7]
4152 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1,2],ymm2[3],ymm3[4,5,6],ymm2[7]
4153 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,1,0,3]
4154 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm2[5,6,7]
4155 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4156 ; AVX2-FAST-PERLANE-NEXT: vmovaps 384(%rdi), %xmm0
4157 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm2 = xmm0[2,3,2,3]
4158 ; AVX2-FAST-PERLANE-NEXT: vpermilps $224, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Folded Reload
4159 ; AVX2-FAST-PERLANE-NEXT: # ymm3 = mem[0,0,2,3,4,4,6,7]
4160 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0],ymm3[1,2,3],ymm2[4],ymm3[5,6,7]
4161 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
4162 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
4163 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm3 = ymm13[0,1],ymm14[2,3],ymm13[4,5],ymm14[6,7]
4164 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm3 = ymm3[2,0,2,3,6,4,6,7]
4165 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[0,2,0,3]
4166 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,3,2,3]
4167 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2],ymm3[3,4,5,6,7]
4168 ; AVX2-FAST-PERLANE-NEXT: vpermilps $0, (%rsp), %ymm3 # 32-byte Folded Reload
4169 ; AVX2-FAST-PERLANE-NEXT: # ymm3 = mem[0,0,0,0,4,4,4,4]
4170 ; AVX2-FAST-PERLANE-NEXT: vpermilps $224, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Folded Reload
4171 ; AVX2-FAST-PERLANE-NEXT: # ymm4 = mem[0,0,2,3,4,4,6,7]
4172 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0,1,2],ymm3[3],ymm4[4,5,6],ymm3[7]
4173 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[0,1,0,3]
4174 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm3[5,6,7]
4175 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4176 ; AVX2-FAST-PERLANE-NEXT: vmovaps 576(%rdi), %xmm2
4177 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4178 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm3 = xmm2[2,3,2,3]
4179 ; AVX2-FAST-PERLANE-NEXT: vpermilps $224, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Folded Reload
4180 ; AVX2-FAST-PERLANE-NEXT: # ymm4 = mem[0,0,2,3,4,4,6,7]
4181 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0],ymm4[1,2,3],ymm3[4],ymm4[5,6,7]
4182 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
4183 ; AVX2-FAST-PERLANE-NEXT: vblendps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm4 # 32-byte Folded Reload
4184 ; AVX2-FAST-PERLANE-NEXT: # ymm4 = mem[0,1],ymm2[2,3],mem[4,5],ymm2[6,7]
4185 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm4 = ymm4[2,0,2,3,6,4,6,7]
4186 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,2,0,3]
4187 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[0,3,2,3]
4188 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2],ymm4[3,4,5,6,7]
4189 ; AVX2-FAST-PERLANE-NEXT: vpermilps $0, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Folded Reload
4190 ; AVX2-FAST-PERLANE-NEXT: # ymm4 = mem[0,0,0,0,4,4,4,4]
4191 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm5 = ymm15[0,0,2,3,4,4,6,7]
4192 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0,1,2],ymm4[3],ymm5[4,5,6],ymm4[7]
4193 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,1,0,3]
4194 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1,2,3,4],ymm4[5,6,7]
4195 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4196 ; AVX2-FAST-PERLANE-NEXT: vmovaps 192(%rdi), %xmm15
4197 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm4 = xmm15[2,3,2,3]
4198 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
4199 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm5 = ymm2[0,0,2,3,4,4,6,7]
4200 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0],ymm5[1,2,3],ymm4[4],ymm5[5,6,7]
4201 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
4202 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
4203 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm5 = ymm3[0,1],ymm11[2,3],ymm3[4,5],ymm11[6,7]
4204 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm5 = ymm5[2,0,2,3,6,4,6,7]
4205 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,2,0,3]
4206 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,3,2,3]
4207 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2],ymm5[3,4,5,6,7]
4208 ; AVX2-FAST-PERLANE-NEXT: vpermilps $0, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload
4209 ; AVX2-FAST-PERLANE-NEXT: # ymm5 = mem[0,0,0,0,4,4,4,4]
4210 ; AVX2-FAST-PERLANE-NEXT: vpermilps $224, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Folded Reload
4211 ; AVX2-FAST-PERLANE-NEXT: # ymm6 = mem[0,0,2,3,4,4,6,7]
4212 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm5 = ymm6[0,1,2],ymm5[3],ymm6[4,5,6],ymm5[7]
4213 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,1,0,3]
4214 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3,4],ymm5[5,6,7]
4215 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4216 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm1 = xmm1[3,3,3,3]
4217 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm4 = ymm10[0,1,3,3,4,5,7,7]
4218 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm4[1,2,3],ymm1[4],ymm4[5,6,7]
4219 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm4 = ymm12[3,3,3,3,7,7,7,7]
4220 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0],ymm9[1],ymm4[2,3,4],ymm9[5],ymm4[6,7]
4221 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,2,0,3]
4222 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,3,2,3]
4223 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2],ymm4[3,4,5,6,7]
4224 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm4 = ymm7[0,1,0,1,4,5,4,5]
4225 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm5 = ymm8[0,1,3,3,4,5,7,7]
4226 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0,1,2],ymm4[3],ymm5[4,5,6],ymm4[7]
4227 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,1,0,3]
4228 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm4[5,6,7]
4229 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4230 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
4231 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
4232 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm1 = ymm8[0,1,3,3,4,5,7,7]
4233 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
4234 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm1 = ymm14[3,3,3,3,7,7,7,7]
4235 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm13[1],ymm1[2,3,4],ymm13[5],ymm1[6,7]
4236 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm13, %ymm14
4237 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,2,0,3]
4238 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
4239 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
4240 ; AVX2-FAST-PERLANE-NEXT: vmovups (%rsp), %ymm13 # 32-byte Reload
4241 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm1 = ymm13[0,1,0,1,4,5,4,5]
4242 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
4243 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm4 = ymm10[0,1,3,3,4,5,7,7]
4244 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm4[0,1,2],ymm1[3],ymm4[4,5,6],ymm1[7]
4245 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
4246 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
4247 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4248 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm0 = xmm15[3,3,3,3]
4249 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm1 = ymm2[0,1,3,3,4,5,7,7]
4250 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
4251 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm1 = ymm11[3,3,3,3,7,7,7,7]
4252 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm3, %ymm7
4253 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm3[1],ymm1[2,3,4],ymm3[5],ymm1[6,7]
4254 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,2,0,3]
4255 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
4256 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
4257 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
4258 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm1 = ymm9[0,1,0,1,4,5,4,5]
4259 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
4260 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm3 = ymm12[0,1,3,3,4,5,7,7]
4261 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm3[0,1,2],ymm1[3],ymm3[4,5,6],ymm1[7]
4262 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
4263 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
4264 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4265 ; AVX2-FAST-PERLANE-NEXT: vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
4266 ; AVX2-FAST-PERLANE-NEXT: # xmm0 = mem[3,3,3,3]
4267 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
4268 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm1 = ymm3[0,1,3,3,4,5,7,7]
4269 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
4270 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
4271 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm1 = ymm15[3,3,3,3,7,7,7,7]
4272 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
4273 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm4[1],ymm1[2,3,4],ymm4[5],ymm1[6,7]
4274 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,2,0,3]
4275 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
4276 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
4277 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
4278 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm1 = ymm6[0,1,0,1,4,5,4,5]
4279 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
4280 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm2 = ymm5[0,1,3,3,4,5,7,7]
4281 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3],ymm2[4,5,6],ymm1[7]
4282 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
4283 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
4284 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4285 ; AVX2-FAST-PERLANE-NEXT: vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm2 # 32-byte Folded Reload
4286 ; AVX2-FAST-PERLANE-NEXT: # ymm2 = ymm8[0,1,2,3],mem[4,5,6,7]
4287 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4288 ; AVX2-FAST-PERLANE-NEXT: vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm1 # 32-byte Folded Reload
4289 ; AVX2-FAST-PERLANE-NEXT: # ymm1 = ymm14[0,1,2,3],mem[4,5,6,7]
4290 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4291 ; AVX2-FAST-PERLANE-NEXT: vmovaps 464(%rdi), %xmm0
4292 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4293 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5,6,7]
4294 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,1,0,2,4,5,4,6]
4295 ; AVX2-FAST-PERLANE-NEXT: vmovddup {{.*#+}} xmm11 = [4,2,4,2]
4296 ; AVX2-FAST-PERLANE-NEXT: # xmm11 = mem[0,0]
4297 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm2, %ymm11, %ymm1
4298 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm0[2,3,4,5,6,7]
4299 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm14 = ymm13[0,1],ymm10[2,3],ymm13[4,5,6,7]
4300 ; AVX2-FAST-PERLANE-NEXT: vbroadcastf128 {{.*#+}} ymm0 = [0,2,0,6,0,2,0,6]
4301 ; AVX2-FAST-PERLANE-NEXT: # ymm0 = mem[0,1,0,1]
4302 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm14, %ymm0, %ymm2
4303 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5,6,7]
4304 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4305 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
4306 ; AVX2-FAST-PERLANE-NEXT: vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm8 # 32-byte Folded Reload
4307 ; AVX2-FAST-PERLANE-NEXT: # ymm8 = ymm1[0,1,2,3],mem[4,5,6,7]
4308 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4309 ; AVX2-FAST-PERLANE-NEXT: vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm2 # 32-byte Folded Reload
4310 ; AVX2-FAST-PERLANE-NEXT: # ymm2 = ymm7[0,1,2,3],mem[4,5,6,7]
4311 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4312 ; AVX2-FAST-PERLANE-NEXT: vmovaps 272(%rdi), %xmm1
4313 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, (%rsp) # 32-byte Spill
4314 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1],ymm1[2,3],ymm2[4,5,6,7]
4315 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,1,0,2,4,5,4,6]
4316 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm8, %ymm11, %ymm2
4317 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm11, %ymm13
4318 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1],ymm1[2,3,4,5,6,7]
4319 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm7 = ymm9[0,1],ymm12[2,3],ymm9[4,5,6,7]
4320 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm7, %ymm0, %ymm2
4321 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5,6,7]
4322 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4323 ; AVX2-FAST-PERLANE-NEXT: vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm11 # 32-byte Folded Reload
4324 ; AVX2-FAST-PERLANE-NEXT: # ymm11 = ymm3[0,1,2,3],mem[4,5,6,7]
4325 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm8 = ymm4[0,1,2,3],ymm15[4,5,6,7]
4326 ; AVX2-FAST-PERLANE-NEXT: vmovaps 656(%rdi), %xmm12
4327 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm8[0,1],ymm12[2,3],ymm8[4,5,6,7]
4328 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,1,0,2,4,5,4,6]
4329 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm11, %ymm13, %ymm2
4330 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1],ymm1[2,3,4,5,6,7]
4331 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm15 = ymm6[0,1],ymm5[2,3],ymm6[4,5,6,7]
4332 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm15, %ymm0, %ymm2
4333 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5,6,7]
4334 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4335 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
4336 ; AVX2-FAST-PERLANE-NEXT: vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm9 # 32-byte Folded Reload
4337 ; AVX2-FAST-PERLANE-NEXT: # ymm9 = ymm1[0,1,2,3],mem[4,5,6,7]
4338 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
4339 ; AVX2-FAST-PERLANE-NEXT: vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
4340 ; AVX2-FAST-PERLANE-NEXT: # ymm2 = ymm1[0,1,2,3],mem[4,5,6,7]
4341 ; AVX2-FAST-PERLANE-NEXT: vmovaps 80(%rdi), %xmm1
4342 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm10 = ymm9[0,1],ymm1[2,3],ymm9[4,5,6,7]
4343 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm10 = ymm10[0,1,0,2,4,5,4,6]
4344 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm2, %ymm13, %ymm4
4345 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm10[2,3,4,5,6,7]
4346 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
4347 ; AVX2-FAST-PERLANE-NEXT: vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm10 # 32-byte Folded Reload
4348 ; AVX2-FAST-PERLANE-NEXT: # ymm10 = mem[0,1],ymm3[2,3],mem[4,5,6,7]
4349 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm10, %ymm0, %ymm0
4350 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm4[0,1,2,3,4],ymm0[5,6,7]
4351 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4352 ; AVX2-FAST-PERLANE-NEXT: vpermilps $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
4353 ; AVX2-FAST-PERLANE-NEXT: # ymm0 = mem[1,1,1,1,5,5,5,5]
4354 ; AVX2-FAST-PERLANE-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
4355 ; AVX2-FAST-PERLANE-NEXT: # ymm0 = ymm0[0,1,2],mem[3],ymm0[4,5,6,7]
4356 ; AVX2-FAST-PERLANE-NEXT: vmovddup {{.*#+}} xmm4 = [5,3,5,3]
4357 ; AVX2-FAST-PERLANE-NEXT: # xmm4 = mem[0,0]
4358 ; AVX2-FAST-PERLANE-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm13 # 32-byte Folded Reload
4359 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm13[0,1],ymm0[2,3,4,5,6,7]
4360 ; AVX2-FAST-PERLANE-NEXT: vbroadcastf128 {{.*#+}} ymm13 = [0,3,1,7,0,3,1,7]
4361 ; AVX2-FAST-PERLANE-NEXT: # ymm13 = mem[0,1,0,1]
4362 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm14, %ymm13, %ymm14
4363 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm14[5,6,7]
4364 ; AVX2-FAST-PERLANE-NEXT: vpermilps $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Folded Reload
4365 ; AVX2-FAST-PERLANE-NEXT: # ymm3 = mem[1,1,1,1,5,5,5,5]
4366 ; AVX2-FAST-PERLANE-NEXT: vblendps $8, (%rsp), %ymm3, %ymm3 # 32-byte Folded Reload
4367 ; AVX2-FAST-PERLANE-NEXT: # ymm3 = ymm3[0,1,2],mem[3],ymm3[4,5,6,7]
4368 ; AVX2-FAST-PERLANE-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm5 # 32-byte Folded Reload
4369 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm3 = ymm5[0,1],ymm3[2,3,4,5,6,7]
4370 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm7, %ymm13, %ymm5
4371 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3,4],ymm5[5,6,7]
4372 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm5 = ymm8[1,1,1,1,5,5,5,5]
4373 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2],ymm12[3],ymm5[4,5,6,7]
4374 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm11, %ymm4, %ymm6
4375 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm5 = ymm6[0,1],ymm5[2,3,4,5,6,7]
4376 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm15, %ymm13, %ymm6
4377 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3,4],ymm6[5,6,7]
4378 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm6 = ymm9[1,1,1,1,5,5,5,5]
4379 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm6[0,1,2],ymm1[3],ymm6[4,5,6,7]
4380 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm2, %ymm4, %ymm2
4381 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1],ymm1[2,3,4,5,6,7]
4382 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm10, %ymm13, %ymm2
4383 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5,6,7]
4384 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
4385 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm2, 96(%rsi)
4386 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
4387 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm2, 32(%rsi)
4388 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
4389 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm2, 64(%rsi)
4390 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
4391 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm2, (%rsi)
4392 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
4393 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm2, 96(%rdx)
4394 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
4395 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm2, 32(%rdx)
4396 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
4397 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm2, 64(%rdx)
4398 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
4399 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm2, (%rdx)
4400 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
4401 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm2, 32(%rcx)
4402 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
4403 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm2, 96(%rcx)
4404 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
4405 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm2, 64(%rcx)
4406 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
4407 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm2, (%rcx)
4408 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
4409 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm2, 96(%r8)
4410 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
4411 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm2, 32(%r8)
4412 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
4413 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm2, 64(%r8)
4414 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
4415 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm2, (%r8)
4416 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
4417 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm2, 96(%r9)
4418 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
4419 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm2, 32(%r9)
4420 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
4421 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm2, (%r9)
4422 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
4423 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm2, 64(%r9)
4424 ; AVX2-FAST-PERLANE-NEXT: movq {{[0-9]+}}(%rsp), %rax
4425 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm5, 96(%rax)
4426 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm3, 32(%rax)
4427 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 64(%rax)
4428 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm1, (%rax)
4429 ; AVX2-FAST-PERLANE-NEXT: addq $1160, %rsp # imm = 0x488
4430 ; AVX2-FAST-PERLANE-NEXT: vzeroupper
4431 ; AVX2-FAST-PERLANE-NEXT: retq
4433 ; AVX512F-LABEL: load_i32_stride6_vf32:
4435 ; AVX512F-NEXT: movq {{[0-9]+}}(%rsp), %rax
4436 ; AVX512F-NEXT: vmovdqa64 448(%rdi), %zmm0
4437 ; AVX512F-NEXT: vmovdqa64 384(%rdi), %zmm3
4438 ; AVX512F-NEXT: vmovdqa64 512(%rdi), %zmm2
4439 ; AVX512F-NEXT: vmovdqa64 576(%rdi), %zmm5
4440 ; AVX512F-NEXT: vmovdqa64 704(%rdi), %zmm6
4441 ; AVX512F-NEXT: vmovdqa64 640(%rdi), %zmm4
4442 ; AVX512F-NEXT: vmovdqa64 320(%rdi), %zmm10
4443 ; AVX512F-NEXT: vmovdqa64 256(%rdi), %zmm9
4444 ; AVX512F-NEXT: vmovdqa64 (%rdi), %zmm11
4445 ; AVX512F-NEXT: vmovdqa64 64(%rdi), %zmm1
4446 ; AVX512F-NEXT: vmovdqa64 128(%rdi), %zmm12
4447 ; AVX512F-NEXT: vmovdqa64 192(%rdi), %zmm13
4448 ; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm14 = [0,6,12,0,0,0,20,26,0,6,12,0,0,0,20,26]
4449 ; AVX512F-NEXT: # zmm14 = mem[0,1,2,3,0,1,2,3]
4450 ; AVX512F-NEXT: vmovdqa64 %zmm13, %zmm15
4451 ; AVX512F-NEXT: vpermt2d %zmm12, %zmm14, %zmm15
4452 ; AVX512F-NEXT: vmovdqa {{.*#+}} ymm7 = <0,6,12,18,24,30,u,u>
4453 ; AVX512F-NEXT: vmovdqa64 %zmm11, %zmm8
4454 ; AVX512F-NEXT: vpermt2d %zmm1, %zmm7, %zmm8
4455 ; AVX512F-NEXT: movb $56, %dil
4456 ; AVX512F-NEXT: kmovw %edi, %k2
4457 ; AVX512F-NEXT: vmovdqa64 %zmm15, %zmm8 {%k2}
4458 ; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm15 = [0,0,0,2,8,14,20,26,0,0,0,2,8,14,20,26]
4459 ; AVX512F-NEXT: # zmm15 = mem[0,1,2,3,0,1,2,3]
4460 ; AVX512F-NEXT: vmovdqa64 %zmm9, %zmm16
4461 ; AVX512F-NEXT: vpermt2d %zmm10, %zmm15, %zmm16
4462 ; AVX512F-NEXT: movw $-2048, %di # imm = 0xF800
4463 ; AVX512F-NEXT: kmovw %edi, %k1
4464 ; AVX512F-NEXT: vmovdqa32 %zmm16, %zmm8 {%k1}
4465 ; AVX512F-NEXT: vpermi2d %zmm6, %zmm4, %zmm15
4466 ; AVX512F-NEXT: vpermi2d %zmm2, %zmm5, %zmm14
4467 ; AVX512F-NEXT: vpermi2d %zmm0, %zmm3, %zmm7
4468 ; AVX512F-NEXT: vmovdqa64 %zmm14, %zmm7 {%k2}
4469 ; AVX512F-NEXT: vmovdqa32 %zmm15, %zmm7 {%k1}
4470 ; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm16 = [1,7,13,0,0,0,21,27,1,7,13,0,0,0,21,27]
4471 ; AVX512F-NEXT: # zmm16 = mem[0,1,2,3,0,1,2,3]
4472 ; AVX512F-NEXT: vmovdqa64 %zmm13, %zmm17
4473 ; AVX512F-NEXT: vpermt2d %zmm12, %zmm16, %zmm17
4474 ; AVX512F-NEXT: vmovdqa {{.*#+}} ymm14 = <1,7,13,19,25,31,u,u>
4475 ; AVX512F-NEXT: vmovdqa64 %zmm11, %zmm15
4476 ; AVX512F-NEXT: vpermt2d %zmm1, %zmm14, %zmm15
4477 ; AVX512F-NEXT: vmovdqa64 %zmm17, %zmm15 {%k2}
4478 ; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm17 = [0,0,0,3,9,15,21,27,0,0,0,3,9,15,21,27]
4479 ; AVX512F-NEXT: # zmm17 = mem[0,1,2,3,0,1,2,3]
4480 ; AVX512F-NEXT: vmovdqa64 %zmm9, %zmm18
4481 ; AVX512F-NEXT: vpermt2d %zmm10, %zmm17, %zmm18
4482 ; AVX512F-NEXT: vmovdqa32 %zmm18, %zmm15 {%k1}
4483 ; AVX512F-NEXT: vpermi2d %zmm6, %zmm4, %zmm17
4484 ; AVX512F-NEXT: vpermi2d %zmm2, %zmm5, %zmm16
4485 ; AVX512F-NEXT: vpermi2d %zmm0, %zmm3, %zmm14
4486 ; AVX512F-NEXT: vmovdqa64 %zmm16, %zmm14 {%k2}
4487 ; AVX512F-NEXT: vmovdqa32 %zmm17, %zmm14 {%k1}
4488 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} ymm18 = <2,8,14,20,26,u,u,u>
4489 ; AVX512F-NEXT: vmovdqa64 %zmm11, %zmm19
4490 ; AVX512F-NEXT: vpermt2d %zmm1, %zmm18, %zmm19
4491 ; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm17 = [18,24,30,0,0,0,6,12,18,24,30,0,0,0,6,12]
4492 ; AVX512F-NEXT: # zmm17 = mem[0,1,2,3,0,1,2,3]
4493 ; AVX512F-NEXT: vmovdqa64 %zmm12, %zmm16
4494 ; AVX512F-NEXT: vpermt2d %zmm13, %zmm17, %zmm16
4495 ; AVX512F-NEXT: movw $31, %di
4496 ; AVX512F-NEXT: kmovw %edi, %k2
4497 ; AVX512F-NEXT: vmovdqa32 %zmm19, %zmm16 {%k2}
4498 ; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm19 = [0,0,0,20,26,0,6,12,0,0,0,20,26,0,6,12]
4499 ; AVX512F-NEXT: # zmm19 = mem[0,1,2,3,0,1,2,3]
4500 ; AVX512F-NEXT: vmovdqa64 %zmm10, %zmm20
4501 ; AVX512F-NEXT: vpermt2d %zmm9, %zmm19, %zmm20
4502 ; AVX512F-NEXT: vmovdqa32 %zmm20, %zmm16 {%k1}
4503 ; AVX512F-NEXT: vpermi2d %zmm4, %zmm6, %zmm19
4504 ; AVX512F-NEXT: vpermi2d %zmm5, %zmm2, %zmm17
4505 ; AVX512F-NEXT: vpermi2d %zmm0, %zmm3, %zmm18
4506 ; AVX512F-NEXT: vmovdqa32 %zmm18, %zmm17 {%k2}
4507 ; AVX512F-NEXT: vmovdqa32 %zmm19, %zmm17 {%k1}
4508 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} ymm20 = <3,9,15,21,27,u,u,u>
4509 ; AVX512F-NEXT: vmovdqa64 %zmm11, %zmm21
4510 ; AVX512F-NEXT: vpermt2d %zmm1, %zmm20, %zmm21
4511 ; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm19 = [19,25,31,0,0,1,7,13,19,25,31,0,0,1,7,13]
4512 ; AVX512F-NEXT: # zmm19 = mem[0,1,2,3,0,1,2,3]
4513 ; AVX512F-NEXT: vmovdqa64 %zmm12, %zmm18
4514 ; AVX512F-NEXT: vpermt2d %zmm13, %zmm19, %zmm18
4515 ; AVX512F-NEXT: vmovdqa32 %zmm21, %zmm18 {%k2}
4516 ; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm21 = [0,0,0,21,27,1,7,13,0,0,0,21,27,1,7,13]
4517 ; AVX512F-NEXT: # zmm21 = mem[0,1,2,3,0,1,2,3]
4518 ; AVX512F-NEXT: vmovdqa64 %zmm10, %zmm22
4519 ; AVX512F-NEXT: vpermt2d %zmm9, %zmm21, %zmm22
4520 ; AVX512F-NEXT: vmovdqa32 %zmm22, %zmm18 {%k1}
4521 ; AVX512F-NEXT: vpermi2d %zmm4, %zmm6, %zmm21
4522 ; AVX512F-NEXT: vpermi2d %zmm5, %zmm2, %zmm19
4523 ; AVX512F-NEXT: vpermi2d %zmm0, %zmm3, %zmm20
4524 ; AVX512F-NEXT: vmovdqa32 %zmm20, %zmm19 {%k2}
4525 ; AVX512F-NEXT: vmovdqa32 %zmm21, %zmm19 {%k1}
4526 ; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm20 = [20,26,0,0,0,2,8,14,20,26,0,0,0,2,8,14]
4527 ; AVX512F-NEXT: # zmm20 = mem[0,1,2,3,0,1,2,3]
4528 ; AVX512F-NEXT: vmovdqa64 %zmm12, %zmm21
4529 ; AVX512F-NEXT: vpermt2d %zmm13, %zmm20, %zmm21
4530 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} ymm22 = <20,26,0,6,12,u,u,u>
4531 ; AVX512F-NEXT: vmovdqa64 %zmm1, %zmm23
4532 ; AVX512F-NEXT: vpermt2d %zmm11, %zmm22, %zmm23
4533 ; AVX512F-NEXT: movw $992, %di # imm = 0x3E0
4534 ; AVX512F-NEXT: kmovw %edi, %k1
4535 ; AVX512F-NEXT: vmovdqa32 %zmm21, %zmm23 {%k1}
4536 ; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm21 = [0,0,0,6,12,18,24,30,0,0,0,6,12,18,24,30]
4537 ; AVX512F-NEXT: # zmm21 = mem[0,1,2,3,0,1,2,3]
4538 ; AVX512F-NEXT: vmovdqa64 %zmm9, %zmm24
4539 ; AVX512F-NEXT: vpermt2d %zmm10, %zmm21, %zmm24
4540 ; AVX512F-NEXT: movb $-32, %dil
4541 ; AVX512F-NEXT: kmovw %edi, %k2
4542 ; AVX512F-NEXT: vmovdqa64 %zmm24, %zmm23 {%k2}
4543 ; AVX512F-NEXT: vpermi2d %zmm6, %zmm4, %zmm21
4544 ; AVX512F-NEXT: vpermi2d %zmm5, %zmm2, %zmm20
4545 ; AVX512F-NEXT: vpermi2d %zmm3, %zmm0, %zmm22
4546 ; AVX512F-NEXT: vmovdqa32 %zmm20, %zmm22 {%k1}
4547 ; AVX512F-NEXT: vmovdqa64 %zmm21, %zmm22 {%k2}
4548 ; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm20 = [21,27,0,0,0,3,9,15,21,27,0,0,0,3,9,15]
4549 ; AVX512F-NEXT: # zmm20 = mem[0,1,2,3,0,1,2,3]
4550 ; AVX512F-NEXT: vpermt2d %zmm13, %zmm20, %zmm12
4551 ; AVX512F-NEXT: vmovdqa {{.*#+}} ymm13 = <21,27,1,7,13,u,u,u>
4552 ; AVX512F-NEXT: vpermt2d %zmm11, %zmm13, %zmm1
4553 ; AVX512F-NEXT: vmovdqa32 %zmm12, %zmm1 {%k1}
4554 ; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm11 = [0,0,1,7,13,19,25,31,0,0,1,7,13,19,25,31]
4555 ; AVX512F-NEXT: # zmm11 = mem[0,1,2,3,0,1,2,3]
4556 ; AVX512F-NEXT: vpermt2d %zmm10, %zmm11, %zmm9
4557 ; AVX512F-NEXT: vmovdqa64 %zmm9, %zmm1 {%k2}
4558 ; AVX512F-NEXT: vpermt2d %zmm6, %zmm11, %zmm4
4559 ; AVX512F-NEXT: vpermt2d %zmm5, %zmm20, %zmm2
4560 ; AVX512F-NEXT: vpermt2d %zmm3, %zmm13, %zmm0
4561 ; AVX512F-NEXT: vmovdqa32 %zmm2, %zmm0 {%k1}
4562 ; AVX512F-NEXT: vmovdqa64 %zmm4, %zmm0 {%k2}
4563 ; AVX512F-NEXT: vmovdqa64 %zmm7, 64(%rsi)
4564 ; AVX512F-NEXT: vmovdqa64 %zmm8, (%rsi)
4565 ; AVX512F-NEXT: vmovdqa64 %zmm14, 64(%rdx)
4566 ; AVX512F-NEXT: vmovdqa64 %zmm15, (%rdx)
4567 ; AVX512F-NEXT: vmovdqa64 %zmm17, 64(%rcx)
4568 ; AVX512F-NEXT: vmovdqa64 %zmm16, (%rcx)
4569 ; AVX512F-NEXT: vmovdqa64 %zmm19, 64(%r8)
4570 ; AVX512F-NEXT: vmovdqa64 %zmm18, (%r8)
4571 ; AVX512F-NEXT: vmovdqa64 %zmm22, 64(%r9)
4572 ; AVX512F-NEXT: vmovdqa64 %zmm23, (%r9)
4573 ; AVX512F-NEXT: vmovdqa64 %zmm0, 64(%rax)
4574 ; AVX512F-NEXT: vmovdqa64 %zmm1, (%rax)
4575 ; AVX512F-NEXT: vzeroupper
4576 ; AVX512F-NEXT: retq
4578 ; AVX512BW-LABEL: load_i32_stride6_vf32:
4579 ; AVX512BW: # %bb.0:
4580 ; AVX512BW-NEXT: movq {{[0-9]+}}(%rsp), %rax
4581 ; AVX512BW-NEXT: vmovdqa64 448(%rdi), %zmm0
4582 ; AVX512BW-NEXT: vmovdqa64 384(%rdi), %zmm3
4583 ; AVX512BW-NEXT: vmovdqa64 512(%rdi), %zmm2
4584 ; AVX512BW-NEXT: vmovdqa64 576(%rdi), %zmm5
4585 ; AVX512BW-NEXT: vmovdqa64 704(%rdi), %zmm6
4586 ; AVX512BW-NEXT: vmovdqa64 640(%rdi), %zmm4
4587 ; AVX512BW-NEXT: vmovdqa64 320(%rdi), %zmm10
4588 ; AVX512BW-NEXT: vmovdqa64 256(%rdi), %zmm9
4589 ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm11
4590 ; AVX512BW-NEXT: vmovdqa64 64(%rdi), %zmm1
4591 ; AVX512BW-NEXT: vmovdqa64 128(%rdi), %zmm12
4592 ; AVX512BW-NEXT: vmovdqa64 192(%rdi), %zmm13
4593 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm14 = [0,6,12,0,0,0,20,26,0,6,12,0,0,0,20,26]
4594 ; AVX512BW-NEXT: # zmm14 = mem[0,1,2,3,0,1,2,3]
4595 ; AVX512BW-NEXT: vmovdqa64 %zmm13, %zmm15
4596 ; AVX512BW-NEXT: vpermt2d %zmm12, %zmm14, %zmm15
4597 ; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm7 = <0,6,12,18,24,30,u,u>
4598 ; AVX512BW-NEXT: vmovdqa64 %zmm11, %zmm8
4599 ; AVX512BW-NEXT: vpermt2d %zmm1, %zmm7, %zmm8
4600 ; AVX512BW-NEXT: movb $56, %dil
4601 ; AVX512BW-NEXT: kmovd %edi, %k2
4602 ; AVX512BW-NEXT: vmovdqa64 %zmm15, %zmm8 {%k2}
4603 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm15 = [0,0,0,2,8,14,20,26,0,0,0,2,8,14,20,26]
4604 ; AVX512BW-NEXT: # zmm15 = mem[0,1,2,3,0,1,2,3]
4605 ; AVX512BW-NEXT: vmovdqa64 %zmm9, %zmm16
4606 ; AVX512BW-NEXT: vpermt2d %zmm10, %zmm15, %zmm16
4607 ; AVX512BW-NEXT: movw $-2048, %di # imm = 0xF800
4608 ; AVX512BW-NEXT: kmovd %edi, %k1
4609 ; AVX512BW-NEXT: vmovdqa32 %zmm16, %zmm8 {%k1}
4610 ; AVX512BW-NEXT: vpermi2d %zmm6, %zmm4, %zmm15
4611 ; AVX512BW-NEXT: vpermi2d %zmm2, %zmm5, %zmm14
4612 ; AVX512BW-NEXT: vpermi2d %zmm0, %zmm3, %zmm7
4613 ; AVX512BW-NEXT: vmovdqa64 %zmm14, %zmm7 {%k2}
4614 ; AVX512BW-NEXT: vmovdqa32 %zmm15, %zmm7 {%k1}
4615 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm16 = [1,7,13,0,0,0,21,27,1,7,13,0,0,0,21,27]
4616 ; AVX512BW-NEXT: # zmm16 = mem[0,1,2,3,0,1,2,3]
4617 ; AVX512BW-NEXT: vmovdqa64 %zmm13, %zmm17
4618 ; AVX512BW-NEXT: vpermt2d %zmm12, %zmm16, %zmm17
4619 ; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm14 = <1,7,13,19,25,31,u,u>
4620 ; AVX512BW-NEXT: vmovdqa64 %zmm11, %zmm15
4621 ; AVX512BW-NEXT: vpermt2d %zmm1, %zmm14, %zmm15
4622 ; AVX512BW-NEXT: vmovdqa64 %zmm17, %zmm15 {%k2}
4623 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm17 = [0,0,0,3,9,15,21,27,0,0,0,3,9,15,21,27]
4624 ; AVX512BW-NEXT: # zmm17 = mem[0,1,2,3,0,1,2,3]
4625 ; AVX512BW-NEXT: vmovdqa64 %zmm9, %zmm18
4626 ; AVX512BW-NEXT: vpermt2d %zmm10, %zmm17, %zmm18
4627 ; AVX512BW-NEXT: vmovdqa32 %zmm18, %zmm15 {%k1}
4628 ; AVX512BW-NEXT: vpermi2d %zmm6, %zmm4, %zmm17
4629 ; AVX512BW-NEXT: vpermi2d %zmm2, %zmm5, %zmm16
4630 ; AVX512BW-NEXT: vpermi2d %zmm0, %zmm3, %zmm14
4631 ; AVX512BW-NEXT: vmovdqa64 %zmm16, %zmm14 {%k2}
4632 ; AVX512BW-NEXT: vmovdqa32 %zmm17, %zmm14 {%k1}
4633 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} ymm18 = <2,8,14,20,26,u,u,u>
4634 ; AVX512BW-NEXT: vmovdqa64 %zmm11, %zmm19
4635 ; AVX512BW-NEXT: vpermt2d %zmm1, %zmm18, %zmm19
4636 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm17 = [18,24,30,0,0,0,6,12,18,24,30,0,0,0,6,12]
4637 ; AVX512BW-NEXT: # zmm17 = mem[0,1,2,3,0,1,2,3]
4638 ; AVX512BW-NEXT: vmovdqa64 %zmm12, %zmm16
4639 ; AVX512BW-NEXT: vpermt2d %zmm13, %zmm17, %zmm16
4640 ; AVX512BW-NEXT: movw $31, %di
4641 ; AVX512BW-NEXT: kmovd %edi, %k2
4642 ; AVX512BW-NEXT: vmovdqa32 %zmm19, %zmm16 {%k2}
4643 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm19 = [0,0,0,20,26,0,6,12,0,0,0,20,26,0,6,12]
4644 ; AVX512BW-NEXT: # zmm19 = mem[0,1,2,3,0,1,2,3]
4645 ; AVX512BW-NEXT: vmovdqa64 %zmm10, %zmm20
4646 ; AVX512BW-NEXT: vpermt2d %zmm9, %zmm19, %zmm20
4647 ; AVX512BW-NEXT: vmovdqa32 %zmm20, %zmm16 {%k1}
4648 ; AVX512BW-NEXT: vpermi2d %zmm4, %zmm6, %zmm19
4649 ; AVX512BW-NEXT: vpermi2d %zmm5, %zmm2, %zmm17
4650 ; AVX512BW-NEXT: vpermi2d %zmm0, %zmm3, %zmm18
4651 ; AVX512BW-NEXT: vmovdqa32 %zmm18, %zmm17 {%k2}
4652 ; AVX512BW-NEXT: vmovdqa32 %zmm19, %zmm17 {%k1}
4653 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} ymm20 = <3,9,15,21,27,u,u,u>
4654 ; AVX512BW-NEXT: vmovdqa64 %zmm11, %zmm21
4655 ; AVX512BW-NEXT: vpermt2d %zmm1, %zmm20, %zmm21
4656 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm19 = [19,25,31,0,0,1,7,13,19,25,31,0,0,1,7,13]
4657 ; AVX512BW-NEXT: # zmm19 = mem[0,1,2,3,0,1,2,3]
4658 ; AVX512BW-NEXT: vmovdqa64 %zmm12, %zmm18
4659 ; AVX512BW-NEXT: vpermt2d %zmm13, %zmm19, %zmm18
4660 ; AVX512BW-NEXT: vmovdqa32 %zmm21, %zmm18 {%k2}
4661 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm21 = [0,0,0,21,27,1,7,13,0,0,0,21,27,1,7,13]
4662 ; AVX512BW-NEXT: # zmm21 = mem[0,1,2,3,0,1,2,3]
4663 ; AVX512BW-NEXT: vmovdqa64 %zmm10, %zmm22
4664 ; AVX512BW-NEXT: vpermt2d %zmm9, %zmm21, %zmm22
4665 ; AVX512BW-NEXT: vmovdqa32 %zmm22, %zmm18 {%k1}
4666 ; AVX512BW-NEXT: vpermi2d %zmm4, %zmm6, %zmm21
4667 ; AVX512BW-NEXT: vpermi2d %zmm5, %zmm2, %zmm19
4668 ; AVX512BW-NEXT: vpermi2d %zmm0, %zmm3, %zmm20
4669 ; AVX512BW-NEXT: vmovdqa32 %zmm20, %zmm19 {%k2}
4670 ; AVX512BW-NEXT: vmovdqa32 %zmm21, %zmm19 {%k1}
4671 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm20 = [20,26,0,0,0,2,8,14,20,26,0,0,0,2,8,14]
4672 ; AVX512BW-NEXT: # zmm20 = mem[0,1,2,3,0,1,2,3]
4673 ; AVX512BW-NEXT: vmovdqa64 %zmm12, %zmm21
4674 ; AVX512BW-NEXT: vpermt2d %zmm13, %zmm20, %zmm21
4675 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} ymm22 = <20,26,0,6,12,u,u,u>
4676 ; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm23
4677 ; AVX512BW-NEXT: vpermt2d %zmm11, %zmm22, %zmm23
4678 ; AVX512BW-NEXT: movw $992, %di # imm = 0x3E0
4679 ; AVX512BW-NEXT: kmovd %edi, %k1
4680 ; AVX512BW-NEXT: vmovdqa32 %zmm21, %zmm23 {%k1}
4681 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm21 = [0,0,0,6,12,18,24,30,0,0,0,6,12,18,24,30]
4682 ; AVX512BW-NEXT: # zmm21 = mem[0,1,2,3,0,1,2,3]
4683 ; AVX512BW-NEXT: vmovdqa64 %zmm9, %zmm24
4684 ; AVX512BW-NEXT: vpermt2d %zmm10, %zmm21, %zmm24
4685 ; AVX512BW-NEXT: movb $-32, %dil
4686 ; AVX512BW-NEXT: kmovd %edi, %k2
4687 ; AVX512BW-NEXT: vmovdqa64 %zmm24, %zmm23 {%k2}
4688 ; AVX512BW-NEXT: vpermi2d %zmm6, %zmm4, %zmm21
4689 ; AVX512BW-NEXT: vpermi2d %zmm5, %zmm2, %zmm20
4690 ; AVX512BW-NEXT: vpermi2d %zmm3, %zmm0, %zmm22
4691 ; AVX512BW-NEXT: vmovdqa32 %zmm20, %zmm22 {%k1}
4692 ; AVX512BW-NEXT: vmovdqa64 %zmm21, %zmm22 {%k2}
4693 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm20 = [21,27,0,0,0,3,9,15,21,27,0,0,0,3,9,15]
4694 ; AVX512BW-NEXT: # zmm20 = mem[0,1,2,3,0,1,2,3]
4695 ; AVX512BW-NEXT: vpermt2d %zmm13, %zmm20, %zmm12
4696 ; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm13 = <21,27,1,7,13,u,u,u>
4697 ; AVX512BW-NEXT: vpermt2d %zmm11, %zmm13, %zmm1
4698 ; AVX512BW-NEXT: vmovdqa32 %zmm12, %zmm1 {%k1}
4699 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm11 = [0,0,1,7,13,19,25,31,0,0,1,7,13,19,25,31]
4700 ; AVX512BW-NEXT: # zmm11 = mem[0,1,2,3,0,1,2,3]
4701 ; AVX512BW-NEXT: vpermt2d %zmm10, %zmm11, %zmm9
4702 ; AVX512BW-NEXT: vmovdqa64 %zmm9, %zmm1 {%k2}
4703 ; AVX512BW-NEXT: vpermt2d %zmm6, %zmm11, %zmm4
4704 ; AVX512BW-NEXT: vpermt2d %zmm5, %zmm20, %zmm2
4705 ; AVX512BW-NEXT: vpermt2d %zmm3, %zmm13, %zmm0
4706 ; AVX512BW-NEXT: vmovdqa32 %zmm2, %zmm0 {%k1}
4707 ; AVX512BW-NEXT: vmovdqa64 %zmm4, %zmm0 {%k2}
4708 ; AVX512BW-NEXT: vmovdqa64 %zmm7, 64(%rsi)
4709 ; AVX512BW-NEXT: vmovdqa64 %zmm8, (%rsi)
4710 ; AVX512BW-NEXT: vmovdqa64 %zmm14, 64(%rdx)
4711 ; AVX512BW-NEXT: vmovdqa64 %zmm15, (%rdx)
4712 ; AVX512BW-NEXT: vmovdqa64 %zmm17, 64(%rcx)
4713 ; AVX512BW-NEXT: vmovdqa64 %zmm16, (%rcx)
4714 ; AVX512BW-NEXT: vmovdqa64 %zmm19, 64(%r8)
4715 ; AVX512BW-NEXT: vmovdqa64 %zmm18, (%r8)
4716 ; AVX512BW-NEXT: vmovdqa64 %zmm22, 64(%r9)
4717 ; AVX512BW-NEXT: vmovdqa64 %zmm23, (%r9)
4718 ; AVX512BW-NEXT: vmovdqa64 %zmm0, 64(%rax)
4719 ; AVX512BW-NEXT: vmovdqa64 %zmm1, (%rax)
4720 ; AVX512BW-NEXT: vzeroupper
4721 ; AVX512BW-NEXT: retq
4722 %wide.vec = load <192 x i32>, ptr %in.vec, align 64
4723 %strided.vec0 = shufflevector <192 x i32> %wide.vec, <192 x i32> poison, <32 x i32> <i32 0, i32 6, i32 12, i32 18, i32 24, i32 30, i32 36, i32 42, i32 48, i32 54, i32 60, i32 66, i32 72, i32 78, i32 84, i32 90, i32 96, i32 102, i32 108, i32 114, i32 120, i32 126, i32 132, i32 138, i32 144, i32 150, i32 156, i32 162, i32 168, i32 174, i32 180, i32 186>
4724 %strided.vec1 = shufflevector <192 x i32> %wide.vec, <192 x i32> poison, <32 x i32> <i32 1, i32 7, i32 13, i32 19, i32 25, i32 31, i32 37, i32 43, i32 49, i32 55, i32 61, i32 67, i32 73, i32 79, i32 85, i32 91, i32 97, i32 103, i32 109, i32 115, i32 121, i32 127, i32 133, i32 139, i32 145, i32 151, i32 157, i32 163, i32 169, i32 175, i32 181, i32 187>
4725 %strided.vec2 = shufflevector <192 x i32> %wide.vec, <192 x i32> poison, <32 x i32> <i32 2, i32 8, i32 14, i32 20, i32 26, i32 32, i32 38, i32 44, i32 50, i32 56, i32 62, i32 68, i32 74, i32 80, i32 86, i32 92, i32 98, i32 104, i32 110, i32 116, i32 122, i32 128, i32 134, i32 140, i32 146, i32 152, i32 158, i32 164, i32 170, i32 176, i32 182, i32 188>
4726 %strided.vec3 = shufflevector <192 x i32> %wide.vec, <192 x i32> poison, <32 x i32> <i32 3, i32 9, i32 15, i32 21, i32 27, i32 33, i32 39, i32 45, i32 51, i32 57, i32 63, i32 69, i32 75, i32 81, i32 87, i32 93, i32 99, i32 105, i32 111, i32 117, i32 123, i32 129, i32 135, i32 141, i32 147, i32 153, i32 159, i32 165, i32 171, i32 177, i32 183, i32 189>
4727 %strided.vec4 = shufflevector <192 x i32> %wide.vec, <192 x i32> poison, <32 x i32> <i32 4, i32 10, i32 16, i32 22, i32 28, i32 34, i32 40, i32 46, i32 52, i32 58, i32 64, i32 70, i32 76, i32 82, i32 88, i32 94, i32 100, i32 106, i32 112, i32 118, i32 124, i32 130, i32 136, i32 142, i32 148, i32 154, i32 160, i32 166, i32 172, i32 178, i32 184, i32 190>
4728 %strided.vec5 = shufflevector <192 x i32> %wide.vec, <192 x i32> poison, <32 x i32> <i32 5, i32 11, i32 17, i32 23, i32 29, i32 35, i32 41, i32 47, i32 53, i32 59, i32 65, i32 71, i32 77, i32 83, i32 89, i32 95, i32 101, i32 107, i32 113, i32 119, i32 125, i32 131, i32 137, i32 143, i32 149, i32 155, i32 161, i32 167, i32 173, i32 179, i32 185, i32 191>
4729 store <32 x i32> %strided.vec0, ptr %out.vec0, align 64
4730 store <32 x i32> %strided.vec1, ptr %out.vec1, align 64
4731 store <32 x i32> %strided.vec2, ptr %out.vec2, align 64
4732 store <32 x i32> %strided.vec3, ptr %out.vec3, align 64
4733 store <32 x i32> %strided.vec4, ptr %out.vec4, align 64
4734 store <32 x i32> %strided.vec5, ptr %out.vec5, align 64
4738 define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2, ptr %out.vec3, ptr %out.vec4, ptr %out.vec5) nounwind {
4739 ; SSE-LABEL: load_i32_stride6_vf64:
4741 ; SSE-NEXT: subq $2184, %rsp # imm = 0x888
4742 ; SSE-NEXT: movdqa 912(%rdi), %xmm7
4743 ; SSE-NEXT: movdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4744 ; SSE-NEXT: movdqa 928(%rdi), %xmm3
4745 ; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4746 ; SSE-NEXT: movdqa 864(%rdi), %xmm8
4747 ; SSE-NEXT: movdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4748 ; SSE-NEXT: movdqa 880(%rdi), %xmm4
4749 ; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4750 ; SSE-NEXT: movdqa 528(%rdi), %xmm9
4751 ; SSE-NEXT: movdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4752 ; SSE-NEXT: movdqa 544(%rdi), %xmm5
4753 ; SSE-NEXT: movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4754 ; SSE-NEXT: movdqa 480(%rdi), %xmm10
4755 ; SSE-NEXT: movdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4756 ; SSE-NEXT: movdqa 496(%rdi), %xmm6
4757 ; SSE-NEXT: movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4758 ; SSE-NEXT: movdqa 144(%rdi), %xmm11
4759 ; SSE-NEXT: movdqa %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4760 ; SSE-NEXT: movdqa 160(%rdi), %xmm2
4761 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4762 ; SSE-NEXT: movdqa 96(%rdi), %xmm12
4763 ; SSE-NEXT: movdqa %xmm12, (%rsp) # 16-byte Spill
4764 ; SSE-NEXT: movdqa 112(%rdi), %xmm0
4765 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4766 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
4767 ; SSE-NEXT: movdqa %xmm12, %xmm1
4768 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
4769 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,2,3,3]
4770 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm11[0,0,1,1]
4771 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
4772 ; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm1[0],xmm2[1]
4773 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4774 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm6[2,3,2,3]
4775 ; SSE-NEXT: movdqa %xmm10, %xmm1
4776 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
4777 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm5[2,2,3,3]
4778 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm9[0,0,1,1]
4779 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
4780 ; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm1[0],xmm2[1]
4781 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4782 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm4[2,3,2,3]
4783 ; SSE-NEXT: movdqa %xmm8, %xmm1
4784 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
4785 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm3[2,2,3,3]
4786 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm7[0,0,1,1]
4787 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
4788 ; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm1[0],xmm2[1]
4789 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4790 ; SSE-NEXT: movdqa 1248(%rdi), %xmm2
4791 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4792 ; SSE-NEXT: movdqa 1264(%rdi), %xmm0
4793 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4794 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
4795 ; SSE-NEXT: movdqa %xmm2, %xmm1
4796 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
4797 ; SSE-NEXT: movdqa 1296(%rdi), %xmm3
4798 ; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4799 ; SSE-NEXT: movdqa 1312(%rdi), %xmm0
4800 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4801 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,2,3,3]
4802 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm3[0,0,1,1]
4803 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
4804 ; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm1[0],xmm2[1]
4805 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4806 ; SSE-NEXT: movdqa (%rdi), %xmm1
4807 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4808 ; SSE-NEXT: movdqa 16(%rdi), %xmm0
4809 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4810 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
4811 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
4812 ; SSE-NEXT: movdqa 64(%rdi), %xmm0
4813 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4814 ; SSE-NEXT: movdqa 48(%rdi), %xmm2
4815 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4816 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,2,3,3]
4817 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,1,1]
4818 ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1]
4819 ; SSE-NEXT: movsd {{.*#+}} xmm3 = xmm1[0],xmm3[1]
4820 ; SSE-NEXT: movapd %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4821 ; SSE-NEXT: movdqa 384(%rdi), %xmm1
4822 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4823 ; SSE-NEXT: movdqa 400(%rdi), %xmm0
4824 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4825 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
4826 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
4827 ; SSE-NEXT: movdqa 432(%rdi), %xmm2
4828 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4829 ; SSE-NEXT: movdqa 448(%rdi), %xmm0
4830 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4831 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,2,3,3]
4832 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,1,1]
4833 ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1]
4834 ; SSE-NEXT: movsd {{.*#+}} xmm3 = xmm1[0],xmm3[1]
4835 ; SSE-NEXT: movapd %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4836 ; SSE-NEXT: movdqa 768(%rdi), %xmm1
4837 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4838 ; SSE-NEXT: movdqa 784(%rdi), %xmm0
4839 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4840 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
4841 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
4842 ; SSE-NEXT: movdqa 816(%rdi), %xmm2
4843 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4844 ; SSE-NEXT: movdqa 832(%rdi), %xmm0
4845 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4846 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,2,3,3]
4847 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,1,1]
4848 ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1]
4849 ; SSE-NEXT: movsd {{.*#+}} xmm3 = xmm1[0],xmm3[1]
4850 ; SSE-NEXT: movapd %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4851 ; SSE-NEXT: movdqa 1152(%rdi), %xmm1
4852 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4853 ; SSE-NEXT: movdqa 1168(%rdi), %xmm0
4854 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4855 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
4856 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
4857 ; SSE-NEXT: movdqa 1200(%rdi), %xmm2
4858 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4859 ; SSE-NEXT: movdqa 1216(%rdi), %xmm0
4860 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4861 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,2,3,3]
4862 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,1,1]
4863 ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1]
4864 ; SSE-NEXT: movsd {{.*#+}} xmm3 = xmm1[0],xmm3[1]
4865 ; SSE-NEXT: movapd %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4866 ; SSE-NEXT: movdqa 288(%rdi), %xmm2
4867 ; SSE-NEXT: movdqa 304(%rdi), %xmm0
4868 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4869 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
4870 ; SSE-NEXT: movdqa %xmm2, %xmm1
4871 ; SSE-NEXT: movdqa %xmm2, %xmm15
4872 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4873 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
4874 ; SSE-NEXT: movdqa 336(%rdi), %xmm7
4875 ; SSE-NEXT: movdqa 352(%rdi), %xmm0
4876 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4877 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,2,3,3]
4878 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm7[0,0,1,1]
4879 ; SSE-NEXT: movdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4880 ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1]
4881 ; SSE-NEXT: movsd {{.*#+}} xmm3 = xmm1[0],xmm3[1]
4882 ; SSE-NEXT: movapd %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4883 ; SSE-NEXT: movdqa 672(%rdi), %xmm1
4884 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4885 ; SSE-NEXT: movdqa 688(%rdi), %xmm0
4886 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4887 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
4888 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
4889 ; SSE-NEXT: movdqa 720(%rdi), %xmm3
4890 ; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4891 ; SSE-NEXT: movdqa 736(%rdi), %xmm0
4892 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4893 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,2,3,3]
4894 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,0,1,1]
4895 ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1]
4896 ; SSE-NEXT: movsd {{.*#+}} xmm3 = xmm1[0],xmm3[1]
4897 ; SSE-NEXT: movapd %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4898 ; SSE-NEXT: movdqa 1056(%rdi), %xmm2
4899 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4900 ; SSE-NEXT: movdqa 1072(%rdi), %xmm0
4901 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4902 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
4903 ; SSE-NEXT: movdqa %xmm2, %xmm1
4904 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
4905 ; SSE-NEXT: movdqa 1104(%rdi), %xmm2
4906 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4907 ; SSE-NEXT: movdqa 1120(%rdi), %xmm0
4908 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4909 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,2,3,3]
4910 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,1,1]
4911 ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1]
4912 ; SSE-NEXT: movsd {{.*#+}} xmm3 = xmm1[0],xmm3[1]
4913 ; SSE-NEXT: movapd %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4914 ; SSE-NEXT: movdqa 1440(%rdi), %xmm1
4915 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4916 ; SSE-NEXT: movdqa 1456(%rdi), %xmm0
4917 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4918 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
4919 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
4920 ; SSE-NEXT: movdqa 1488(%rdi), %xmm2
4921 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4922 ; SSE-NEXT: movdqa 1504(%rdi), %xmm0
4923 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4924 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,2,3,3]
4925 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,1,1]
4926 ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1]
4927 ; SSE-NEXT: movsd {{.*#+}} xmm3 = xmm1[0],xmm3[1]
4928 ; SSE-NEXT: movapd %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4929 ; SSE-NEXT: movdqa 192(%rdi), %xmm5
4930 ; SSE-NEXT: movdqa 208(%rdi), %xmm6
4931 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm6[2,3,2,3]
4932 ; SSE-NEXT: movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4933 ; SSE-NEXT: movdqa %xmm5, %xmm3
4934 ; SSE-NEXT: movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4935 ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
4936 ; SSE-NEXT: movdqa 240(%rdi), %xmm2
4937 ; SSE-NEXT: movdqa 256(%rdi), %xmm1
4938 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm1[2,2,3,3]
4939 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4940 ; SSE-NEXT: pshufd {{.*#+}} xmm12 = xmm2[0,0,1,1]
4941 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4942 ; SSE-NEXT: punpckldq {{.*#+}} xmm12 = xmm12[0],xmm4[0],xmm12[1],xmm4[1]
4943 ; SSE-NEXT: movsd {{.*#+}} xmm12 = xmm3[0],xmm12[1]
4944 ; SSE-NEXT: movapd %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4945 ; SSE-NEXT: movdqa 576(%rdi), %xmm10
4946 ; SSE-NEXT: movdqa 592(%rdi), %xmm14
4947 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm14[2,3,2,3]
4948 ; SSE-NEXT: movdqa %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4949 ; SSE-NEXT: movdqa %xmm10, %xmm4
4950 ; SSE-NEXT: movdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4951 ; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
4952 ; SSE-NEXT: movdqa 624(%rdi), %xmm11
4953 ; SSE-NEXT: movdqa 640(%rdi), %xmm13
4954 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm13[2,2,3,3]
4955 ; SSE-NEXT: movdqa %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4956 ; SSE-NEXT: pshufd {{.*#+}} xmm12 = xmm11[0,0,1,1]
4957 ; SSE-NEXT: movdqa %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4958 ; SSE-NEXT: punpckldq {{.*#+}} xmm12 = xmm12[0],xmm3[0],xmm12[1],xmm3[1]
4959 ; SSE-NEXT: movsd {{.*#+}} xmm12 = xmm4[0],xmm12[1]
4960 ; SSE-NEXT: movapd %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4961 ; SSE-NEXT: movdqa 960(%rdi), %xmm4
4962 ; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4963 ; SSE-NEXT: movdqa 976(%rdi), %xmm0
4964 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4965 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,2,3]
4966 ; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
4967 ; SSE-NEXT: movdqa 1008(%rdi), %xmm8
4968 ; SSE-NEXT: movdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4969 ; SSE-NEXT: movdqa 1024(%rdi), %xmm0
4970 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,2,3,3]
4971 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4972 ; SSE-NEXT: pshufd {{.*#+}} xmm12 = xmm8[0,0,1,1]
4973 ; SSE-NEXT: punpckldq {{.*#+}} xmm12 = xmm12[0],xmm3[0],xmm12[1],xmm3[1]
4974 ; SSE-NEXT: movsd {{.*#+}} xmm12 = xmm4[0],xmm12[1]
4975 ; SSE-NEXT: movapd %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4976 ; SSE-NEXT: movdqa 1344(%rdi), %xmm9
4977 ; SSE-NEXT: movdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4978 ; SSE-NEXT: movdqa 1360(%rdi), %xmm3
4979 ; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4980 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,3,2,3]
4981 ; SSE-NEXT: movdqa %xmm9, %xmm4
4982 ; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
4983 ; SSE-NEXT: movdqa 1392(%rdi), %xmm9
4984 ; SSE-NEXT: movdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4985 ; SSE-NEXT: movdqa 1408(%rdi), %xmm3
4986 ; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4987 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,2,3,3]
4988 ; SSE-NEXT: pshufd {{.*#+}} xmm12 = xmm9[0,0,1,1]
4989 ; SSE-NEXT: punpckldq {{.*#+}} xmm12 = xmm12[0],xmm3[0],xmm12[1],xmm3[1]
4990 ; SSE-NEXT: movsd {{.*#+}} xmm12 = xmm4[0],xmm12[1]
4991 ; SSE-NEXT: movapd %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4992 ; SSE-NEXT: pshufd $85, (%rsp), %xmm3 # 16-byte Folded Reload
4993 ; SSE-NEXT: # xmm3 = mem[1,1,1,1]
4994 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
4995 ; SSE-NEXT: # xmm4 = mem[3,3,3,3]
4996 ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
4997 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
4998 ; SSE-NEXT: # xmm4 = mem[2,3,2,3]
4999 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
5000 ; SSE-NEXT: movdqa %xmm9, %xmm12
5001 ; SSE-NEXT: punpckldq {{.*#+}} xmm12 = xmm12[0],xmm4[0],xmm12[1],xmm4[1]
5002 ; SSE-NEXT: movsd {{.*#+}} xmm12 = xmm3[0],xmm12[1]
5003 ; SSE-NEXT: movapd %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5004 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
5005 ; SSE-NEXT: # xmm3 = mem[1,1,1,1]
5006 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
5007 ; SSE-NEXT: # xmm4 = mem[3,3,3,3]
5008 ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
5009 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
5010 ; SSE-NEXT: # xmm4 = mem[2,3,2,3]
5011 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
5012 ; SSE-NEXT: movdqa %xmm8, %xmm12
5013 ; SSE-NEXT: punpckldq {{.*#+}} xmm12 = xmm12[0],xmm4[0],xmm12[1],xmm4[1]
5014 ; SSE-NEXT: movsd {{.*#+}} xmm12 = xmm3[0],xmm12[1]
5015 ; SSE-NEXT: movapd %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5016 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm15[1,1,1,1]
5017 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
5018 ; SSE-NEXT: # xmm4 = mem[3,3,3,3]
5019 ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
5020 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
5021 ; SSE-NEXT: # xmm4 = mem[2,3,2,3]
5022 ; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm4[0],xmm7[1],xmm4[1]
5023 ; SSE-NEXT: movsd {{.*#+}} xmm7 = xmm3[0],xmm7[1]
5024 ; SSE-NEXT: movapd %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5025 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm5[1,1,1,1]
5026 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm6[3,3,3,3]
5027 ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
5028 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm1[2,3,2,3]
5029 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1]
5030 ; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm3[0],xmm2[1]
5031 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5032 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
5033 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm15[1,1,1,1]
5034 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
5035 ; SSE-NEXT: # xmm4 = mem[3,3,3,3]
5036 ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
5037 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
5038 ; SSE-NEXT: # xmm4 = mem[2,3,2,3]
5039 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
5040 ; SSE-NEXT: movdqa %xmm12, %xmm7
5041 ; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm4[0],xmm7[1],xmm4[1]
5042 ; SSE-NEXT: movsd {{.*#+}} xmm7 = xmm3[0],xmm7[1]
5043 ; SSE-NEXT: movapd %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5044 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
5045 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm7[1,1,1,1]
5046 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
5047 ; SSE-NEXT: # xmm4 = mem[3,3,3,3]
5048 ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
5049 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
5050 ; SSE-NEXT: # xmm4 = mem[2,3,2,3]
5051 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
5052 ; SSE-NEXT: movdqa %xmm2, %xmm5
5053 ; SSE-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1]
5054 ; SSE-NEXT: movsd {{.*#+}} xmm5 = xmm3[0],xmm5[1]
5055 ; SSE-NEXT: movapd %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5056 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
5057 ; SSE-NEXT: # xmm3 = mem[1,1,1,1]
5058 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
5059 ; SSE-NEXT: # xmm4 = mem[3,3,3,3]
5060 ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
5061 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
5062 ; SSE-NEXT: # xmm4 = mem[2,3,2,3]
5063 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
5064 ; SSE-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1]
5065 ; SSE-NEXT: movsd {{.*#+}} xmm5 = xmm3[0],xmm5[1]
5066 ; SSE-NEXT: movapd %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5067 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm10[1,1,1,1]
5068 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm14[3,3,3,3]
5069 ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
5070 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm13[2,3,2,3]
5071 ; SSE-NEXT: punpckldq {{.*#+}} xmm11 = xmm11[0],xmm4[0],xmm11[1],xmm4[1]
5072 ; SSE-NEXT: movsd {{.*#+}} xmm11 = xmm3[0],xmm11[1]
5073 ; SSE-NEXT: movapd %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5074 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
5075 ; SSE-NEXT: # xmm3 = mem[1,1,1,1]
5076 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
5077 ; SSE-NEXT: # xmm4 = mem[3,3,3,3]
5078 ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
5079 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
5080 ; SSE-NEXT: # xmm4 = mem[2,3,2,3]
5081 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
5082 ; SSE-NEXT: movdqa %xmm11, %xmm1
5083 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1]
5084 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm3[0],xmm1[1]
5085 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5086 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
5087 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm6[1,1,1,1]
5088 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
5089 ; SSE-NEXT: # xmm4 = mem[3,3,3,3]
5090 ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
5091 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
5092 ; SSE-NEXT: # xmm4 = mem[2,3,2,3]
5093 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
5094 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1]
5095 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm3[0],xmm1[1]
5096 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5097 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
5098 ; SSE-NEXT: # xmm3 = mem[1,1,1,1]
5099 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
5100 ; SSE-NEXT: # xmm4 = mem[3,3,3,3]
5101 ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
5102 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
5103 ; SSE-NEXT: # xmm4 = mem[2,3,2,3]
5104 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
5105 ; SSE-NEXT: movdqa %xmm13, %xmm1
5106 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1]
5107 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm3[0],xmm1[1]
5108 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5109 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
5110 ; SSE-NEXT: # xmm3 = mem[1,1,1,1]
5111 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
5112 ; SSE-NEXT: # xmm4 = mem[3,3,3,3]
5113 ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
5114 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm0[2,3,2,3]
5115 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
5116 ; SSE-NEXT: movdqa %xmm14, %xmm0
5117 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
5118 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm3[0],xmm0[1]
5119 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5120 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
5121 ; SSE-NEXT: # xmm3 = mem[1,1,1,1]
5122 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
5123 ; SSE-NEXT: # xmm4 = mem[3,3,3,3]
5124 ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
5125 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
5126 ; SSE-NEXT: # xmm4 = mem[2,3,2,3]
5127 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5128 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
5129 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm3[0],xmm0[1]
5130 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5131 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
5132 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm10[1,1,1,1]
5133 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
5134 ; SSE-NEXT: # xmm4 = mem[3,3,3,3]
5135 ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
5136 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
5137 ; SSE-NEXT: # xmm4 = mem[2,3,2,3]
5138 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
5139 ; SSE-NEXT: movdqa %xmm5, %xmm0
5140 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
5141 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm3[0],xmm0[1]
5142 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5143 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
5144 ; SSE-NEXT: # xmm3 = mem[1,1,1,1]
5145 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
5146 ; SSE-NEXT: # xmm4 = mem[3,3,3,3]
5147 ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
5148 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
5149 ; SSE-NEXT: # xmm4 = mem[2,3,2,3]
5150 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5151 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
5152 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm3[0],xmm0[1]
5153 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5154 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
5155 ; SSE-NEXT: # xmm3 = mem[1,1,1,1]
5156 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
5157 ; SSE-NEXT: # xmm4 = mem[3,3,3,3]
5158 ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
5159 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
5160 ; SSE-NEXT: # xmm4 = mem[2,3,2,3]
5161 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5162 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
5163 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm3[0],xmm0[1]
5164 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5165 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm8[2,2,3,3]
5166 ; SSE-NEXT: movdqa 80(%rdi), %xmm1
5167 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5168 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[0,0,1,1]
5169 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
5170 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
5171 ; SSE-NEXT: # xmm3 = mem[2,3,2,3]
5172 ; SSE-NEXT: movdqa 32(%rdi), %xmm1
5173 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5174 ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
5175 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm3[0],xmm0[1]
5176 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5177 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm9[2,2,3,3]
5178 ; SSE-NEXT: movdqa 176(%rdi), %xmm1
5179 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5180 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[0,0,1,1]
5181 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
5182 ; SSE-NEXT: pshufd $238, (%rsp), %xmm3 # 16-byte Folded Reload
5183 ; SSE-NEXT: # xmm3 = mem[2,3,2,3]
5184 ; SSE-NEXT: movdqa 128(%rdi), %xmm1
5185 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5186 ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
5187 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm3[0],xmm0[1]
5188 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5189 ; SSE-NEXT: pshufd $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
5190 ; SSE-NEXT: # xmm0 = mem[2,2,3,3]
5191 ; SSE-NEXT: movdqa 272(%rdi), %xmm1
5192 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5193 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[0,0,1,1]
5194 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
5195 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
5196 ; SSE-NEXT: # xmm3 = mem[2,3,2,3]
5197 ; SSE-NEXT: movdqa 224(%rdi), %xmm1
5198 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5199 ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
5200 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm3[0],xmm0[1]
5201 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5202 ; SSE-NEXT: pshufd $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
5203 ; SSE-NEXT: # xmm0 = mem[2,2,3,3]
5204 ; SSE-NEXT: movdqa 368(%rdi), %xmm1
5205 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5206 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[0,0,1,1]
5207 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
5208 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
5209 ; SSE-NEXT: # xmm3 = mem[2,3,2,3]
5210 ; SSE-NEXT: movdqa 320(%rdi), %xmm1
5211 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5212 ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
5213 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm3[0],xmm0[1]
5214 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5215 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,2,3,3]
5216 ; SSE-NEXT: movdqa 464(%rdi), %xmm1
5217 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5218 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[0,0,1,1]
5219 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
5220 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm7[2,3,2,3]
5221 ; SSE-NEXT: movdqa 416(%rdi), %xmm1
5222 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5223 ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
5224 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm3[0],xmm0[1]
5225 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5226 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm12[2,2,3,3]
5227 ; SSE-NEXT: movdqa 560(%rdi), %xmm1
5228 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5229 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[0,0,1,1]
5230 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
5231 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm15[2,3,2,3]
5232 ; SSE-NEXT: movdqa 512(%rdi), %xmm2
5233 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5234 ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
5235 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm3[0],xmm0[1]
5236 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5237 ; SSE-NEXT: pshufd $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
5238 ; SSE-NEXT: # xmm0 = mem[2,2,3,3]
5239 ; SSE-NEXT: movdqa 656(%rdi), %xmm1
5240 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5241 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[0,0,1,1]
5242 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
5243 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
5244 ; SSE-NEXT: # xmm3 = mem[2,3,2,3]
5245 ; SSE-NEXT: movdqa 608(%rdi), %xmm9
5246 ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm9[0],xmm3[1],xmm9[1]
5247 ; SSE-NEXT: movdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5248 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm3[0],xmm0[1]
5249 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5250 ; SSE-NEXT: pshufd $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
5251 ; SSE-NEXT: # xmm0 = mem[2,2,3,3]
5252 ; SSE-NEXT: movdqa 752(%rdi), %xmm1
5253 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5254 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[0,0,1,1]
5255 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
5256 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
5257 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm12[2,3,2,3]
5258 ; SSE-NEXT: movdqa 704(%rdi), %xmm2
5259 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5260 ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
5261 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm3[0],xmm0[1]
5262 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5263 ; SSE-NEXT: pshufd $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
5264 ; SSE-NEXT: # xmm0 = mem[2,2,3,3]
5265 ; SSE-NEXT: movdqa 848(%rdi), %xmm1
5266 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5267 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[0,0,1,1]
5268 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
5269 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm6[2,3,2,3]
5270 ; SSE-NEXT: movdqa 800(%rdi), %xmm2
5271 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5272 ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
5273 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm3[0],xmm0[1]
5274 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5275 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm11[2,2,3,3]
5276 ; SSE-NEXT: movdqa 944(%rdi), %xmm1
5277 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5278 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[0,0,1,1]
5279 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
5280 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
5281 ; SSE-NEXT: # xmm3 = mem[2,3,2,3]
5282 ; SSE-NEXT: movdqa 896(%rdi), %xmm2
5283 ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
5284 ; SSE-NEXT: movdqa %xmm2, %xmm8
5285 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5286 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm3[0],xmm0[1]
5287 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5288 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm14[2,2,3,3]
5289 ; SSE-NEXT: movdqa 1040(%rdi), %xmm1
5290 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5291 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[0,0,1,1]
5292 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
5293 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
5294 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm7[2,3,2,3]
5295 ; SSE-NEXT: movdqa 992(%rdi), %xmm1
5296 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5297 ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
5298 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm3[0],xmm0[1]
5299 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5300 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm13[2,2,3,3]
5301 ; SSE-NEXT: movdqa 1136(%rdi), %xmm1
5302 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5303 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[0,0,1,1]
5304 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
5305 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
5306 ; SSE-NEXT: # xmm3 = mem[2,3,2,3]
5307 ; SSE-NEXT: movdqa 1088(%rdi), %xmm6
5308 ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm6[0],xmm3[1],xmm6[1]
5309 ; SSE-NEXT: movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5310 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm3[0],xmm0[1]
5311 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5312 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm5[2,2,3,3]
5313 ; SSE-NEXT: movdqa 1232(%rdi), %xmm2
5314 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5315 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,1,1]
5316 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
5317 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm10[2,3,2,3]
5318 ; SSE-NEXT: movdqa 1184(%rdi), %xmm1
5319 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5320 ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
5321 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm3[0],xmm0[1]
5322 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5323 ; SSE-NEXT: pshufd $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
5324 ; SSE-NEXT: # xmm0 = mem[2,2,3,3]
5325 ; SSE-NEXT: movdqa 1328(%rdi), %xmm1
5326 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5327 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[0,0,1,1]
5328 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
5329 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
5330 ; SSE-NEXT: # xmm3 = mem[2,3,2,3]
5331 ; SSE-NEXT: movdqa 1280(%rdi), %xmm1
5332 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5333 ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
5334 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm3[0],xmm0[1]
5335 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5336 ; SSE-NEXT: pshufd $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
5337 ; SSE-NEXT: # xmm0 = mem[2,2,3,3]
5338 ; SSE-NEXT: movdqa 1424(%rdi), %xmm1
5339 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5340 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[0,0,1,1]
5341 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
5342 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
5343 ; SSE-NEXT: # xmm3 = mem[2,3,2,3]
5344 ; SSE-NEXT: movdqa 1376(%rdi), %xmm1
5345 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5346 ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
5347 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm3[0],xmm0[1]
5348 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5349 ; SSE-NEXT: pshufd $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
5350 ; SSE-NEXT: # xmm0 = mem[2,2,3,3]
5351 ; SSE-NEXT: movdqa 1520(%rdi), %xmm1
5352 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5353 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[0,0,1,1]
5354 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
5355 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
5356 ; SSE-NEXT: # xmm3 = mem[2,3,2,3]
5357 ; SSE-NEXT: movdqa 1472(%rdi), %xmm1
5358 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5359 ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
5360 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm3[0],xmm0[1]
5361 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5362 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
5363 ; SSE-NEXT: # xmm3 = mem[3,3,3,3]
5364 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
5365 ; SSE-NEXT: # xmm4 = mem[1,1,1,1]
5366 ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
5367 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
5368 ; SSE-NEXT: # xmm0 = mem[2,3,2,3]
5369 ; SSE-NEXT: punpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
5370 ; SSE-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
5371 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm3[0],xmm0[1]
5372 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5373 ; SSE-NEXT: pshufd $255, (%rsp), %xmm2 # 16-byte Folded Reload
5374 ; SSE-NEXT: # xmm2 = mem[3,3,3,3]
5375 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
5376 ; SSE-NEXT: # xmm3 = mem[1,1,1,1]
5377 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
5378 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
5379 ; SSE-NEXT: # xmm0 = mem[2,3,2,3]
5380 ; SSE-NEXT: punpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
5381 ; SSE-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
5382 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm2[0],xmm0[1]
5383 ; SSE-NEXT: movapd %xmm0, (%rsp) # 16-byte Spill
5384 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
5385 ; SSE-NEXT: # xmm0 = mem[3,3,3,3]
5386 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
5387 ; SSE-NEXT: # xmm2 = mem[1,1,1,1]
5388 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
5389 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
5390 ; SSE-NEXT: # xmm1 = mem[2,3,2,3]
5391 ; SSE-NEXT: punpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
5392 ; SSE-NEXT: # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1]
5393 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
5394 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5395 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
5396 ; SSE-NEXT: # xmm0 = mem[3,3,3,3]
5397 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
5398 ; SSE-NEXT: # xmm1 = mem[1,1,1,1]
5399 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
5400 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
5401 ; SSE-NEXT: # xmm1 = mem[2,3,2,3]
5402 ; SSE-NEXT: punpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
5403 ; SSE-NEXT: # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1]
5404 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
5405 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5406 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
5407 ; SSE-NEXT: # xmm0 = mem[3,3,3,3]
5408 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
5409 ; SSE-NEXT: # xmm1 = mem[1,1,1,1]
5410 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
5411 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
5412 ; SSE-NEXT: # xmm1 = mem[2,3,2,3]
5413 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
5414 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm15[0],xmm1[1],xmm15[1]
5415 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
5416 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5417 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
5418 ; SSE-NEXT: # xmm0 = mem[3,3,3,3]
5419 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
5420 ; SSE-NEXT: # xmm1 = mem[1,1,1,1]
5421 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
5422 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
5423 ; SSE-NEXT: # xmm1 = mem[2,3,2,3]
5424 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
5425 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
5426 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
5427 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5428 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
5429 ; SSE-NEXT: # xmm0 = mem[3,3,3,3]
5430 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm9[1,1,1,1]
5431 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
5432 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
5433 ; SSE-NEXT: # xmm1 = mem[2,3,2,3]
5434 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
5435 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm14[0],xmm1[1],xmm14[1]
5436 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
5437 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5438 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm12[3,3,3,3]
5439 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
5440 ; SSE-NEXT: # xmm1 = mem[1,1,1,1]
5441 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
5442 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
5443 ; SSE-NEXT: # xmm1 = mem[2,3,2,3]
5444 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
5445 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm13[0],xmm1[1],xmm13[1]
5446 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
5447 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5448 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
5449 ; SSE-NEXT: # xmm0 = mem[3,3,3,3]
5450 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
5451 ; SSE-NEXT: # xmm1 = mem[1,1,1,1]
5452 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
5453 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
5454 ; SSE-NEXT: # xmm1 = mem[2,3,2,3]
5455 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
5456 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm11[0],xmm1[1],xmm11[1]
5457 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
5458 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5459 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
5460 ; SSE-NEXT: # xmm0 = mem[3,3,3,3]
5461 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm8[1,1,1,1]
5462 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
5463 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
5464 ; SSE-NEXT: # xmm1 = mem[2,3,2,3]
5465 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
5466 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm9[0],xmm1[1],xmm9[1]
5467 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
5468 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5469 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm7[3,3,3,3]
5470 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
5471 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm12[1,1,1,1]
5472 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
5473 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
5474 ; SSE-NEXT: # xmm1 = mem[2,3,2,3]
5475 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
5476 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm8[0],xmm1[1],xmm8[1]
5477 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
5478 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5479 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
5480 ; SSE-NEXT: # xmm0 = mem[3,3,3,3]
5481 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm6[1,1,1,1]
5482 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
5483 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
5484 ; SSE-NEXT: # xmm1 = mem[2,3,2,3]
5485 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
5486 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm7[0],xmm1[1],xmm7[1]
5487 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
5488 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5489 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm10[3,3,3,3]
5490 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
5491 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm10[1,1,1,1]
5492 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
5493 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
5494 ; SSE-NEXT: # xmm1 = mem[2,3,2,3]
5495 ; SSE-NEXT: punpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
5496 ; SSE-NEXT: # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1]
5497 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
5498 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5499 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
5500 ; SSE-NEXT: # xmm0 = mem[3,3,3,3]
5501 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
5502 ; SSE-NEXT: # xmm1 = mem[1,1,1,1]
5503 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
5504 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
5505 ; SSE-NEXT: # xmm1 = mem[2,3,2,3]
5506 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
5507 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1]
5508 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
5509 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5510 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
5511 ; SSE-NEXT: # xmm0 = mem[3,3,3,3]
5512 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
5513 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm6[1,1,1,1]
5514 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
5515 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
5516 ; SSE-NEXT: # xmm1 = mem[2,3,2,3]
5517 ; SSE-NEXT: punpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
5518 ; SSE-NEXT: # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1]
5519 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
5520 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5521 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
5522 ; SSE-NEXT: # xmm0 = mem[3,3,3,3]
5523 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
5524 ; SSE-NEXT: # xmm1 = mem[1,1,1,1]
5525 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
5526 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
5527 ; SSE-NEXT: # xmm1 = mem[2,3,2,3]
5528 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
5529 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1]
5530 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
5531 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5532 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
5533 ; SSE-NEXT: # xmm0 = mem[2,3,2,3]
5534 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
5535 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
5536 ; SSE-NEXT: pshufd $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
5537 ; SSE-NEXT: # xmm0 = mem[2,2,3,3]
5538 ; SSE-NEXT: pshufd $80, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
5539 ; SSE-NEXT: # xmm2 = mem[0,0,1,1]
5540 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
5541 ; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm1[0],xmm2[1]
5542 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5543 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
5544 ; SSE-NEXT: # xmm0 = mem[2,3,2,3]
5545 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
5546 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
5547 ; SSE-NEXT: pshufd $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
5548 ; SSE-NEXT: # xmm3 = mem[2,2,3,3]
5549 ; SSE-NEXT: pshufd $80, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
5550 ; SSE-NEXT: # xmm2 = mem[0,0,1,1]
5551 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
5552 ; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm1[0],xmm2[1]
5553 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5554 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
5555 ; SSE-NEXT: # xmm0 = mem[2,3,2,3]
5556 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
5557 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
5558 ; SSE-NEXT: pshufd $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
5559 ; SSE-NEXT: # xmm0 = mem[2,2,3,3]
5560 ; SSE-NEXT: pshufd $80, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
5561 ; SSE-NEXT: # xmm2 = mem[0,0,1,1]
5562 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
5563 ; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm1[0],xmm2[1]
5564 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5565 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
5566 ; SSE-NEXT: # xmm0 = mem[2,3,2,3]
5567 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
5568 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
5569 ; SSE-NEXT: pshufd $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
5570 ; SSE-NEXT: # xmm0 = mem[2,2,3,3]
5571 ; SSE-NEXT: pshufd $80, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
5572 ; SSE-NEXT: # xmm2 = mem[0,0,1,1]
5573 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
5574 ; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm1[0],xmm2[1]
5575 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5576 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
5577 ; SSE-NEXT: # xmm0 = mem[2,3,2,3]
5578 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
5579 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
5580 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm15[2,2,3,3]
5581 ; SSE-NEXT: pshufd $80, {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Folded Reload
5582 ; SSE-NEXT: # xmm15 = mem[0,0,1,1]
5583 ; SSE-NEXT: punpckldq {{.*#+}} xmm15 = xmm15[0],xmm0[0],xmm15[1],xmm0[1]
5584 ; SSE-NEXT: movsd {{.*#+}} xmm15 = xmm1[0],xmm15[1]
5585 ; SSE-NEXT: movapd %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5586 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
5587 ; SSE-NEXT: # xmm0 = mem[2,3,2,3]
5588 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
5589 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
5590 ; SSE-NEXT: pshufd $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
5591 ; SSE-NEXT: # xmm0 = mem[2,2,3,3]
5592 ; SSE-NEXT: pshufd $80, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
5593 ; SSE-NEXT: # xmm2 = mem[0,0,1,1]
5594 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
5595 ; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm1[0],xmm2[1]
5596 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5597 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
5598 ; SSE-NEXT: # xmm0 = mem[2,3,2,3]
5599 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
5600 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
5601 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm14[2,2,3,3]
5602 ; SSE-NEXT: pshufd $80, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
5603 ; SSE-NEXT: # xmm2 = mem[0,0,1,1]
5604 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
5605 ; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm1[0],xmm2[1]
5606 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5607 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
5608 ; SSE-NEXT: # xmm0 = mem[2,3,2,3]
5609 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
5610 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
5611 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm13[2,2,3,3]
5612 ; SSE-NEXT: pshufd $80, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
5613 ; SSE-NEXT: # xmm2 = mem[0,0,1,1]
5614 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
5615 ; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm1[0],xmm2[1]
5616 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5617 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
5618 ; SSE-NEXT: # xmm0 = mem[2,3,2,3]
5619 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
5620 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
5621 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm11[2,2,3,3]
5622 ; SSE-NEXT: pshufd $80, {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Folded Reload
5623 ; SSE-NEXT: # xmm11 = mem[0,0,1,1]
5624 ; SSE-NEXT: punpckldq {{.*#+}} xmm11 = xmm11[0],xmm0[0],xmm11[1],xmm0[1]
5625 ; SSE-NEXT: movsd {{.*#+}} xmm11 = xmm1[0],xmm11[1]
5626 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
5627 ; SSE-NEXT: # xmm0 = mem[2,3,2,3]
5628 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
5629 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
5630 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm9[2,2,3,3]
5631 ; SSE-NEXT: pshufd $80, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Folded Reload
5632 ; SSE-NEXT: # xmm9 = mem[0,0,1,1]
5633 ; SSE-NEXT: punpckldq {{.*#+}} xmm9 = xmm9[0],xmm0[0],xmm9[1],xmm0[1]
5634 ; SSE-NEXT: movsd {{.*#+}} xmm9 = xmm1[0],xmm9[1]
5635 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm12[2,3,2,3]
5636 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
5637 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
5638 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm8[2,2,3,3]
5639 ; SSE-NEXT: pshufd $80, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Folded Reload
5640 ; SSE-NEXT: # xmm8 = mem[0,0,1,1]
5641 ; SSE-NEXT: punpckldq {{.*#+}} xmm8 = xmm8[0],xmm0[0],xmm8[1],xmm0[1]
5642 ; SSE-NEXT: movsd {{.*#+}} xmm8 = xmm1[0],xmm8[1]
5643 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
5644 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm12[2,3,2,3]
5645 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
5646 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
5647 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm7[2,2,3,3]
5648 ; SSE-NEXT: pshufd $80, {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Folded Reload
5649 ; SSE-NEXT: # xmm13 = mem[0,0,1,1]
5650 ; SSE-NEXT: punpckldq {{.*#+}} xmm13 = xmm13[0],xmm0[0],xmm13[1],xmm0[1]
5651 ; SSE-NEXT: movsd {{.*#+}} xmm13 = xmm1[0],xmm13[1]
5652 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm10[2,3,2,3]
5653 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
5654 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
5655 ; SSE-NEXT: pshufd $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
5656 ; SSE-NEXT: # xmm0 = mem[2,2,3,3]
5657 ; SSE-NEXT: pshufd $80, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload
5658 ; SSE-NEXT: # xmm7 = mem[0,0,1,1]
5659 ; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm0[0],xmm7[1],xmm0[1]
5660 ; SSE-NEXT: movsd {{.*#+}} xmm7 = xmm1[0],xmm7[1]
5661 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
5662 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm10[2,3,2,3]
5663 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
5664 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
5665 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm5[2,2,3,3]
5666 ; SSE-NEXT: pshufd $80, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Folded Reload
5667 ; SSE-NEXT: # xmm5 = mem[0,0,1,1]
5668 ; SSE-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1]
5669 ; SSE-NEXT: movsd {{.*#+}} xmm5 = xmm1[0],xmm5[1]
5670 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm6[2,3,2,3]
5671 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5672 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
5673 ; SSE-NEXT: pshufd $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
5674 ; SSE-NEXT: # xmm1 = mem[2,2,3,3]
5675 ; SSE-NEXT: pshufd $80, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
5676 ; SSE-NEXT: # xmm3 = mem[0,0,1,1]
5677 ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
5678 ; SSE-NEXT: movsd {{.*#+}} xmm3 = xmm0[0],xmm3[1]
5679 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
5680 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm6[2,3,2,3]
5681 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
5682 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
5683 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm4[2,2,3,3]
5684 ; SSE-NEXT: pshufd $80, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
5685 ; SSE-NEXT: # xmm2 = mem[0,0,1,1]
5686 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
5687 ; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm1[0],xmm2[1]
5688 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
5689 ; SSE-NEXT: # xmm0 = mem[1,1,1,1]
5690 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
5691 ; SSE-NEXT: # xmm1 = mem[3,3,3,3]
5692 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
5693 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
5694 ; SSE-NEXT: # xmm1 = mem[2,3,2,3]
5695 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
5696 ; SSE-NEXT: punpckldq {{.*#+}} xmm15 = xmm15[0],xmm1[0],xmm15[1],xmm1[1]
5697 ; SSE-NEXT: movsd {{.*#+}} xmm15 = xmm0[0],xmm15[1]
5698 ; SSE-NEXT: movapd %xmm15, %xmm4
5699 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
5700 ; SSE-NEXT: # xmm0 = mem[1,1,1,1]
5701 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
5702 ; SSE-NEXT: # xmm1 = mem[3,3,3,3]
5703 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
5704 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
5705 ; SSE-NEXT: # xmm1 = mem[2,3,2,3]
5706 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
5707 ; SSE-NEXT: punpckldq {{.*#+}} xmm15 = xmm15[0],xmm1[0],xmm15[1],xmm1[1]
5708 ; SSE-NEXT: movsd {{.*#+}} xmm15 = xmm0[0],xmm15[1]
5709 ; SSE-NEXT: movapd %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5710 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
5711 ; SSE-NEXT: # xmm0 = mem[1,1,1,1]
5712 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
5713 ; SSE-NEXT: # xmm1 = mem[3,3,3,3]
5714 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
5715 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
5716 ; SSE-NEXT: # xmm1 = mem[2,3,2,3]
5717 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
5718 ; SSE-NEXT: punpckldq {{.*#+}} xmm15 = xmm15[0],xmm1[0],xmm15[1],xmm1[1]
5719 ; SSE-NEXT: movsd {{.*#+}} xmm15 = xmm0[0],xmm15[1]
5720 ; SSE-NEXT: movapd %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5721 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
5722 ; SSE-NEXT: # xmm0 = mem[1,1,1,1]
5723 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
5724 ; SSE-NEXT: # xmm1 = mem[3,3,3,3]
5725 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
5726 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
5727 ; SSE-NEXT: # xmm1 = mem[2,3,2,3]
5728 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
5729 ; SSE-NEXT: punpckldq {{.*#+}} xmm15 = xmm15[0],xmm1[0],xmm15[1],xmm1[1]
5730 ; SSE-NEXT: movsd {{.*#+}} xmm15 = xmm0[0],xmm15[1]
5731 ; SSE-NEXT: movapd %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5732 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
5733 ; SSE-NEXT: # xmm0 = mem[1,1,1,1]
5734 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
5735 ; SSE-NEXT: # xmm1 = mem[3,3,3,3]
5736 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
5737 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
5738 ; SSE-NEXT: # xmm1 = mem[2,3,2,3]
5739 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
5740 ; SSE-NEXT: punpckldq {{.*#+}} xmm15 = xmm15[0],xmm1[0],xmm15[1],xmm1[1]
5741 ; SSE-NEXT: movsd {{.*#+}} xmm15 = xmm0[0],xmm15[1]
5742 ; SSE-NEXT: movapd %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5743 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
5744 ; SSE-NEXT: # xmm0 = mem[1,1,1,1]
5745 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
5746 ; SSE-NEXT: # xmm1 = mem[3,3,3,3]
5747 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
5748 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
5749 ; SSE-NEXT: # xmm1 = mem[2,3,2,3]
5750 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
5751 ; SSE-NEXT: punpckldq {{.*#+}} xmm15 = xmm15[0],xmm1[0],xmm15[1],xmm1[1]
5752 ; SSE-NEXT: movsd {{.*#+}} xmm15 = xmm0[0],xmm15[1]
5753 ; SSE-NEXT: movapd %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5754 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
5755 ; SSE-NEXT: # xmm0 = mem[1,1,1,1]
5756 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
5757 ; SSE-NEXT: # xmm1 = mem[3,3,3,3]
5758 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
5759 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
5760 ; SSE-NEXT: # xmm1 = mem[2,3,2,3]
5761 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
5762 ; SSE-NEXT: punpckldq {{.*#+}} xmm14 = xmm14[0],xmm1[0],xmm14[1],xmm1[1]
5763 ; SSE-NEXT: movsd {{.*#+}} xmm14 = xmm0[0],xmm14[1]
5764 ; SSE-NEXT: movapd %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5765 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
5766 ; SSE-NEXT: # xmm0 = mem[1,1,1,1]
5767 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
5768 ; SSE-NEXT: # xmm1 = mem[3,3,3,3]
5769 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
5770 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
5771 ; SSE-NEXT: # xmm1 = mem[2,3,2,3]
5772 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
5773 ; SSE-NEXT: punpckldq {{.*#+}} xmm14 = xmm14[0],xmm1[0],xmm14[1],xmm1[1]
5774 ; SSE-NEXT: movsd {{.*#+}} xmm14 = xmm0[0],xmm14[1]
5775 ; SSE-NEXT: movapd %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5776 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
5777 ; SSE-NEXT: # xmm0 = mem[1,1,1,1]
5778 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
5779 ; SSE-NEXT: # xmm1 = mem[3,3,3,3]
5780 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
5781 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
5782 ; SSE-NEXT: # xmm1 = mem[2,3,2,3]
5783 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
5784 ; SSE-NEXT: punpckldq {{.*#+}} xmm14 = xmm14[0],xmm1[0],xmm14[1],xmm1[1]
5785 ; SSE-NEXT: movsd {{.*#+}} xmm14 = xmm0[0],xmm14[1]
5786 ; SSE-NEXT: movapd %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5787 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
5788 ; SSE-NEXT: # xmm0 = mem[1,1,1,1]
5789 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
5790 ; SSE-NEXT: # xmm1 = mem[3,3,3,3]
5791 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
5792 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
5793 ; SSE-NEXT: # xmm1 = mem[2,3,2,3]
5794 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
5795 ; SSE-NEXT: punpckldq {{.*#+}} xmm14 = xmm14[0],xmm1[0],xmm14[1],xmm1[1]
5796 ; SSE-NEXT: movsd {{.*#+}} xmm14 = xmm0[0],xmm14[1]
5797 ; SSE-NEXT: movapd %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5798 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
5799 ; SSE-NEXT: # xmm0 = mem[1,1,1,1]
5800 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
5801 ; SSE-NEXT: # xmm1 = mem[3,3,3,3]
5802 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
5803 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
5804 ; SSE-NEXT: # xmm1 = mem[2,3,2,3]
5805 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
5806 ; SSE-NEXT: punpckldq {{.*#+}} xmm14 = xmm14[0],xmm1[0],xmm14[1],xmm1[1]
5807 ; SSE-NEXT: movsd {{.*#+}} xmm14 = xmm0[0],xmm14[1]
5808 ; SSE-NEXT: movapd %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5809 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
5810 ; SSE-NEXT: # xmm0 = mem[1,1,1,1]
5811 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm12[3,3,3,3]
5812 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
5813 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
5814 ; SSE-NEXT: # xmm1 = mem[2,3,2,3]
5815 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
5816 ; SSE-NEXT: punpckldq {{.*#+}} xmm12 = xmm12[0],xmm1[0],xmm12[1],xmm1[1]
5817 ; SSE-NEXT: movsd {{.*#+}} xmm12 = xmm0[0],xmm12[1]
5818 ; SSE-NEXT: movapd %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5819 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
5820 ; SSE-NEXT: # xmm0 = mem[1,1,1,1]
5821 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
5822 ; SSE-NEXT: # xmm1 = mem[3,3,3,3]
5823 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
5824 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
5825 ; SSE-NEXT: # xmm1 = mem[2,3,2,3]
5826 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
5827 ; SSE-NEXT: punpckldq {{.*#+}} xmm12 = xmm12[0],xmm1[0],xmm12[1],xmm1[1]
5828 ; SSE-NEXT: movsd {{.*#+}} xmm12 = xmm0[0],xmm12[1]
5829 ; SSE-NEXT: movapd %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5830 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
5831 ; SSE-NEXT: # xmm0 = mem[1,1,1,1]
5832 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm10[3,3,3,3]
5833 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
5834 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
5835 ; SSE-NEXT: # xmm1 = mem[2,3,2,3]
5836 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
5837 ; SSE-NEXT: punpckldq {{.*#+}} xmm15 = xmm15[0],xmm1[0],xmm15[1],xmm1[1]
5838 ; SSE-NEXT: movsd {{.*#+}} xmm15 = xmm0[0],xmm15[1]
5839 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
5840 ; SSE-NEXT: # xmm0 = mem[1,1,1,1]
5841 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
5842 ; SSE-NEXT: # xmm1 = mem[3,3,3,3]
5843 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
5844 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
5845 ; SSE-NEXT: # xmm1 = mem[2,3,2,3]
5846 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
5847 ; SSE-NEXT: punpckldq {{.*#+}} xmm12 = xmm12[0],xmm1[0],xmm12[1],xmm1[1]
5848 ; SSE-NEXT: movsd {{.*#+}} xmm12 = xmm0[0],xmm12[1]
5849 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
5850 ; SSE-NEXT: # xmm0 = mem[1,1,1,1]
5851 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm6[3,3,3,3]
5852 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
5853 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
5854 ; SSE-NEXT: # xmm1 = mem[2,3,2,3]
5855 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
5856 ; SSE-NEXT: punpckldq {{.*#+}} xmm14 = xmm14[0],xmm1[0],xmm14[1],xmm1[1]
5857 ; SSE-NEXT: movsd {{.*#+}} xmm14 = xmm0[0],xmm14[1]
5858 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5859 ; SSE-NEXT: movaps %xmm0, 224(%rsi)
5860 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5861 ; SSE-NEXT: movaps %xmm0, 160(%rsi)
5862 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5863 ; SSE-NEXT: movaps %xmm0, 96(%rsi)
5864 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5865 ; SSE-NEXT: movaps %xmm0, 32(%rsi)
5866 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5867 ; SSE-NEXT: movaps %xmm0, 240(%rsi)
5868 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5869 ; SSE-NEXT: movaps %xmm0, 176(%rsi)
5870 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5871 ; SSE-NEXT: movaps %xmm0, 112(%rsi)
5872 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5873 ; SSE-NEXT: movaps %xmm0, 48(%rsi)
5874 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5875 ; SSE-NEXT: movaps %xmm0, 192(%rsi)
5876 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5877 ; SSE-NEXT: movaps %xmm0, 128(%rsi)
5878 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5879 ; SSE-NEXT: movaps %xmm0, 64(%rsi)
5880 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5881 ; SSE-NEXT: movaps %xmm0, (%rsi)
5882 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5883 ; SSE-NEXT: movaps %xmm0, 208(%rsi)
5884 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5885 ; SSE-NEXT: movaps %xmm0, 144(%rsi)
5886 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5887 ; SSE-NEXT: movaps %xmm0, 80(%rsi)
5888 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5889 ; SSE-NEXT: movaps %xmm0, 16(%rsi)
5890 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5891 ; SSE-NEXT: movaps %xmm0, 224(%rdx)
5892 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5893 ; SSE-NEXT: movaps %xmm0, 240(%rdx)
5894 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5895 ; SSE-NEXT: movaps %xmm0, 192(%rdx)
5896 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5897 ; SSE-NEXT: movaps %xmm0, 208(%rdx)
5898 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5899 ; SSE-NEXT: movaps %xmm0, 160(%rdx)
5900 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5901 ; SSE-NEXT: movaps %xmm0, 176(%rdx)
5902 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5903 ; SSE-NEXT: movaps %xmm0, 128(%rdx)
5904 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5905 ; SSE-NEXT: movaps %xmm0, 144(%rdx)
5906 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5907 ; SSE-NEXT: movaps %xmm0, 96(%rdx)
5908 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5909 ; SSE-NEXT: movaps %xmm0, 112(%rdx)
5910 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5911 ; SSE-NEXT: movaps %xmm0, 64(%rdx)
5912 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5913 ; SSE-NEXT: movaps %xmm0, 80(%rdx)
5914 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5915 ; SSE-NEXT: movaps %xmm0, 32(%rdx)
5916 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5917 ; SSE-NEXT: movaps %xmm0, 48(%rdx)
5918 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5919 ; SSE-NEXT: movaps %xmm0, (%rdx)
5920 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5921 ; SSE-NEXT: movaps %xmm0, 16(%rdx)
5922 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5923 ; SSE-NEXT: movaps %xmm0, 240(%rcx)
5924 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5925 ; SSE-NEXT: movaps %xmm0, 224(%rcx)
5926 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5927 ; SSE-NEXT: movaps %xmm0, 208(%rcx)
5928 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5929 ; SSE-NEXT: movaps %xmm0, 192(%rcx)
5930 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5931 ; SSE-NEXT: movaps %xmm0, 176(%rcx)
5932 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5933 ; SSE-NEXT: movaps %xmm0, 160(%rcx)
5934 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5935 ; SSE-NEXT: movaps %xmm0, 144(%rcx)
5936 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5937 ; SSE-NEXT: movaps %xmm0, 128(%rcx)
5938 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5939 ; SSE-NEXT: movaps %xmm0, 112(%rcx)
5940 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5941 ; SSE-NEXT: movaps %xmm0, 96(%rcx)
5942 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5943 ; SSE-NEXT: movaps %xmm0, 80(%rcx)
5944 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5945 ; SSE-NEXT: movaps %xmm0, 64(%rcx)
5946 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5947 ; SSE-NEXT: movaps %xmm0, 48(%rcx)
5948 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5949 ; SSE-NEXT: movaps %xmm0, 32(%rcx)
5950 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5951 ; SSE-NEXT: movaps %xmm0, 16(%rcx)
5952 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5953 ; SSE-NEXT: movaps %xmm0, (%rcx)
5954 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5955 ; SSE-NEXT: movaps %xmm0, 240(%r8)
5956 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5957 ; SSE-NEXT: movaps %xmm0, 224(%r8)
5958 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5959 ; SSE-NEXT: movaps %xmm0, 208(%r8)
5960 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5961 ; SSE-NEXT: movaps %xmm0, 192(%r8)
5962 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5963 ; SSE-NEXT: movaps %xmm0, 176(%r8)
5964 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5965 ; SSE-NEXT: movaps %xmm0, 160(%r8)
5966 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5967 ; SSE-NEXT: movaps %xmm0, 144(%r8)
5968 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5969 ; SSE-NEXT: movaps %xmm0, 128(%r8)
5970 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5971 ; SSE-NEXT: movaps %xmm0, 112(%r8)
5972 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5973 ; SSE-NEXT: movaps %xmm0, 96(%r8)
5974 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5975 ; SSE-NEXT: movaps %xmm0, 80(%r8)
5976 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5977 ; SSE-NEXT: movaps %xmm0, 64(%r8)
5978 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5979 ; SSE-NEXT: movaps %xmm0, 48(%r8)
5980 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5981 ; SSE-NEXT: movaps %xmm0, 32(%r8)
5982 ; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
5983 ; SSE-NEXT: movaps %xmm0, 16(%r8)
5984 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5985 ; SSE-NEXT: movaps %xmm0, (%r8)
5986 ; SSE-NEXT: movapd %xmm2, 240(%r9)
5987 ; SSE-NEXT: movapd %xmm3, 224(%r9)
5988 ; SSE-NEXT: movapd %xmm5, 208(%r9)
5989 ; SSE-NEXT: movapd %xmm7, 192(%r9)
5990 ; SSE-NEXT: movapd %xmm13, 176(%r9)
5991 ; SSE-NEXT: movapd %xmm8, 160(%r9)
5992 ; SSE-NEXT: movapd %xmm9, 144(%r9)
5993 ; SSE-NEXT: movapd %xmm11, 128(%r9)
5994 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5995 ; SSE-NEXT: movaps %xmm0, 112(%r9)
5996 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5997 ; SSE-NEXT: movaps %xmm0, 96(%r9)
5998 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5999 ; SSE-NEXT: movaps %xmm0, 80(%r9)
6000 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6001 ; SSE-NEXT: movaps %xmm0, 64(%r9)
6002 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6003 ; SSE-NEXT: movaps %xmm0, 48(%r9)
6004 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6005 ; SSE-NEXT: movaps %xmm0, 32(%r9)
6006 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6007 ; SSE-NEXT: movaps %xmm0, 16(%r9)
6008 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6009 ; SSE-NEXT: movaps %xmm0, (%r9)
6010 ; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax
6011 ; SSE-NEXT: movapd %xmm14, 240(%rax)
6012 ; SSE-NEXT: movapd %xmm12, 224(%rax)
6013 ; SSE-NEXT: movapd %xmm15, 208(%rax)
6014 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6015 ; SSE-NEXT: movaps %xmm0, 192(%rax)
6016 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6017 ; SSE-NEXT: movaps %xmm0, 176(%rax)
6018 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6019 ; SSE-NEXT: movaps %xmm0, 160(%rax)
6020 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6021 ; SSE-NEXT: movaps %xmm0, 144(%rax)
6022 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6023 ; SSE-NEXT: movaps %xmm0, 128(%rax)
6024 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6025 ; SSE-NEXT: movaps %xmm0, 112(%rax)
6026 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6027 ; SSE-NEXT: movaps %xmm0, 96(%rax)
6028 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6029 ; SSE-NEXT: movaps %xmm0, 80(%rax)
6030 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6031 ; SSE-NEXT: movaps %xmm0, 64(%rax)
6032 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6033 ; SSE-NEXT: movaps %xmm0, 48(%rax)
6034 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6035 ; SSE-NEXT: movaps %xmm0, 32(%rax)
6036 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6037 ; SSE-NEXT: movaps %xmm0, 16(%rax)
6038 ; SSE-NEXT: movapd %xmm4, (%rax)
6039 ; SSE-NEXT: addq $2184, %rsp # imm = 0x888
6042 ; AVX1-ONLY-LABEL: load_i32_stride6_vf64:
6043 ; AVX1-ONLY: # %bb.0:
6044 ; AVX1-ONLY-NEXT: subq $2584, %rsp # imm = 0xA18
6045 ; AVX1-ONLY-NEXT: vmovaps 608(%rdi), %ymm6
6046 ; AVX1-ONLY-NEXT: vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6047 ; AVX1-ONLY-NEXT: vmovaps 672(%rdi), %ymm2
6048 ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6049 ; AVX1-ONLY-NEXT: vmovaps 640(%rdi), %ymm3
6050 ; AVX1-ONLY-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6051 ; AVX1-ONLY-NEXT: vmovapd 352(%rdi), %ymm4
6052 ; AVX1-ONLY-NEXT: vmovupd %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6053 ; AVX1-ONLY-NEXT: vmovapd 320(%rdi), %ymm5
6054 ; AVX1-ONLY-NEXT: vmovupd %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6055 ; AVX1-ONLY-NEXT: vmovaps 224(%rdi), %ymm7
6056 ; AVX1-ONLY-NEXT: vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6057 ; AVX1-ONLY-NEXT: vmovaps 192(%rdi), %ymm8
6058 ; AVX1-ONLY-NEXT: vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6059 ; AVX1-ONLY-NEXT: vmovaps 288(%rdi), %ymm1
6060 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6061 ; AVX1-ONLY-NEXT: vmovaps 256(%rdi), %ymm0
6062 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6063 ; AVX1-ONLY-NEXT: vinsertf128 $1, 288(%rdi), %ymm0, %ymm9
6064 ; AVX1-ONLY-NEXT: vmovups %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6065 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm1[2,0],ymm9[0,0],ymm1[6,4],ymm9[4,4]
6066 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0],ymm9[2,2],ymm0[6,4],ymm9[6,6]
6067 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm8[0,1,2,3],ymm7[4,5],ymm8[6,7]
6068 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6069 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm1, %xmm9
6070 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm9[2,3]
6071 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm9[0,3]
6072 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
6073 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm5[2,3],ymm4[0,1]
6074 ; AVX1-ONLY-NEXT: vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6075 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm1 = ymm1[0],ymm5[0],ymm1[3],ymm5[2]
6076 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,1,2,0,4,5,6,4]
6077 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
6078 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6079 ; AVX1-ONLY-NEXT: vinsertf128 $1, 672(%rdi), %ymm3, %ymm1
6080 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6081 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm2[2,0],ymm1[0,0],ymm2[6,4],ymm1[4,4]
6082 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0],ymm1[2,2],ymm0[6,4],ymm1[6,6]
6083 ; AVX1-ONLY-NEXT: vmovaps 576(%rdi), %ymm1
6084 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6085 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm6[4,5],ymm1[6,7]
6086 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6087 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm1, %xmm2
6088 ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6089 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3]
6090 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,3]
6091 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
6092 ; AVX1-ONLY-NEXT: vmovapd 736(%rdi), %ymm1
6093 ; AVX1-ONLY-NEXT: vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6094 ; AVX1-ONLY-NEXT: vmovapd 704(%rdi), %ymm2
6095 ; AVX1-ONLY-NEXT: vmovupd %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6096 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm2[2,3],ymm1[0,1]
6097 ; AVX1-ONLY-NEXT: vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6098 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[3],ymm2[2]
6099 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,1,2,0,4,5,6,4]
6100 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
6101 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6102 ; AVX1-ONLY-NEXT: vmovaps 1056(%rdi), %ymm1
6103 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6104 ; AVX1-ONLY-NEXT: vmovaps 1024(%rdi), %ymm0
6105 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6106 ; AVX1-ONLY-NEXT: vinsertf128 $1, 1056(%rdi), %ymm0, %ymm2
6107 ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6108 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm1[2,0],ymm2[0,0],ymm1[6,4],ymm2[4,4]
6109 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0],ymm2[2,2],ymm0[6,4],ymm2[6,6]
6110 ; AVX1-ONLY-NEXT: vmovaps 992(%rdi), %ymm1
6111 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6112 ; AVX1-ONLY-NEXT: vmovaps 960(%rdi), %ymm2
6113 ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6114 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5],ymm2[6,7]
6115 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6116 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm1, %xmm13
6117 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm13[2,3]
6118 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm13[0,3]
6119 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
6120 ; AVX1-ONLY-NEXT: vmovapd 1120(%rdi), %ymm1
6121 ; AVX1-ONLY-NEXT: vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6122 ; AVX1-ONLY-NEXT: vmovapd 1088(%rdi), %ymm2
6123 ; AVX1-ONLY-NEXT: vmovupd %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6124 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm2[2,3],ymm1[0,1]
6125 ; AVX1-ONLY-NEXT: vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6126 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[3],ymm2[2]
6127 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,1,2,0,4,5,6,4]
6128 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
6129 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6130 ; AVX1-ONLY-NEXT: vmovaps 1440(%rdi), %ymm1
6131 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6132 ; AVX1-ONLY-NEXT: vmovaps 1408(%rdi), %ymm0
6133 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6134 ; AVX1-ONLY-NEXT: vinsertf128 $1, 1440(%rdi), %ymm0, %ymm11
6135 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm1[2,0],ymm11[0,0],ymm1[6,4],ymm11[4,4]
6136 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0],ymm11[2,2],ymm0[6,4],ymm11[6,6]
6137 ; AVX1-ONLY-NEXT: vmovaps 1376(%rdi), %ymm1
6138 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6139 ; AVX1-ONLY-NEXT: vmovaps 1344(%rdi), %ymm2
6140 ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6141 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5],ymm2[6,7]
6142 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6143 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm1, %xmm10
6144 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm10[2,3]
6145 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm10[0,3]
6146 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
6147 ; AVX1-ONLY-NEXT: vmovapd 1504(%rdi), %ymm1
6148 ; AVX1-ONLY-NEXT: vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6149 ; AVX1-ONLY-NEXT: vmovapd 1472(%rdi), %ymm2
6150 ; AVX1-ONLY-NEXT: vmovupd %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6151 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm2[2,3],ymm1[0,1]
6152 ; AVX1-ONLY-NEXT: vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6153 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[3],ymm2[2]
6154 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,1,2,0,4,5,6,4]
6155 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
6156 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6157 ; AVX1-ONLY-NEXT: vmovaps 96(%rdi), %ymm1
6158 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6159 ; AVX1-ONLY-NEXT: vmovaps 64(%rdi), %ymm0
6160 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6161 ; AVX1-ONLY-NEXT: vinsertf128 $1, 96(%rdi), %ymm0, %ymm8
6162 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm1[2,0],ymm8[0,0],ymm1[6,4],ymm8[4,4]
6163 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0],ymm8[2,2],ymm0[6,4],ymm8[6,6]
6164 ; AVX1-ONLY-NEXT: vmovaps 32(%rdi), %ymm1
6165 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6166 ; AVX1-ONLY-NEXT: vmovaps (%rdi), %ymm2
6167 ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6168 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5],ymm2[6,7]
6169 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6170 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm1, %xmm7
6171 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm7[2,3]
6172 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm7[0,3]
6173 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
6174 ; AVX1-ONLY-NEXT: vmovapd 160(%rdi), %ymm1
6175 ; AVX1-ONLY-NEXT: vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6176 ; AVX1-ONLY-NEXT: vmovapd 128(%rdi), %ymm2
6177 ; AVX1-ONLY-NEXT: vmovupd %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6178 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm2[2,3],ymm1[0,1]
6179 ; AVX1-ONLY-NEXT: vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6180 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[3],ymm2[2]
6181 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,1,2,0,4,5,6,4]
6182 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
6183 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6184 ; AVX1-ONLY-NEXT: vmovaps 480(%rdi), %ymm1
6185 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6186 ; AVX1-ONLY-NEXT: vmovaps 448(%rdi), %ymm0
6187 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6188 ; AVX1-ONLY-NEXT: vinsertf128 $1, 480(%rdi), %ymm0, %ymm6
6189 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm1[2,0],ymm6[0,0],ymm1[6,4],ymm6[4,4]
6190 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0],ymm6[2,2],ymm0[6,4],ymm6[6,6]
6191 ; AVX1-ONLY-NEXT: vmovaps 416(%rdi), %ymm1
6192 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6193 ; AVX1-ONLY-NEXT: vmovaps 384(%rdi), %ymm2
6194 ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6195 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5],ymm2[6,7]
6196 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6197 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm1, %xmm4
6198 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm4[2,3]
6199 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm4[0,3]
6200 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
6201 ; AVX1-ONLY-NEXT: vmovapd 544(%rdi), %ymm1
6202 ; AVX1-ONLY-NEXT: vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6203 ; AVX1-ONLY-NEXT: vmovapd 512(%rdi), %ymm2
6204 ; AVX1-ONLY-NEXT: vmovupd %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6205 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm2[2,3],ymm1[0,1]
6206 ; AVX1-ONLY-NEXT: vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6207 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[3],ymm2[2]
6208 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,1,2,0,4,5,6,4]
6209 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
6210 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6211 ; AVX1-ONLY-NEXT: vmovaps 864(%rdi), %ymm1
6212 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6213 ; AVX1-ONLY-NEXT: vmovaps 832(%rdi), %ymm0
6214 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6215 ; AVX1-ONLY-NEXT: vinsertf128 $1, 864(%rdi), %ymm0, %ymm3
6216 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm1[2,0],ymm3[0,0],ymm1[6,4],ymm3[4,4]
6217 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0],ymm3[2,2],ymm0[6,4],ymm3[6,6]
6218 ; AVX1-ONLY-NEXT: vmovaps 800(%rdi), %ymm1
6219 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6220 ; AVX1-ONLY-NEXT: vmovaps 768(%rdi), %ymm2
6221 ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6222 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5],ymm2[6,7]
6223 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6224 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm1, %xmm2
6225 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3]
6226 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,3]
6227 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
6228 ; AVX1-ONLY-NEXT: vmovapd 928(%rdi), %ymm1
6229 ; AVX1-ONLY-NEXT: vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6230 ; AVX1-ONLY-NEXT: vmovapd 896(%rdi), %ymm5
6231 ; AVX1-ONLY-NEXT: vmovupd %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6232 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm5[2,3],ymm1[0,1]
6233 ; AVX1-ONLY-NEXT: vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6234 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm1 = ymm1[0],ymm5[0],ymm1[3],ymm5[2]
6235 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,1,2,0,4,5,6,4]
6236 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
6237 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6238 ; AVX1-ONLY-NEXT: vmovaps 1248(%rdi), %ymm5
6239 ; AVX1-ONLY-NEXT: vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6240 ; AVX1-ONLY-NEXT: vmovaps 1216(%rdi), %ymm0
6241 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6242 ; AVX1-ONLY-NEXT: vinsertf128 $1, 1248(%rdi), %ymm0, %ymm1
6243 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6244 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm5[2,0],ymm1[0,0],ymm5[6,4],ymm1[4,4]
6245 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm15 = ymm0[2,0],ymm1[2,2],ymm0[6,4],ymm1[6,6]
6246 ; AVX1-ONLY-NEXT: vmovaps 1184(%rdi), %ymm0
6247 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6248 ; AVX1-ONLY-NEXT: vmovaps 1152(%rdi), %ymm5
6249 ; AVX1-ONLY-NEXT: vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6250 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm5[0,1,2,3],ymm0[4,5],ymm5[6,7]
6251 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6252 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm0, %xmm5
6253 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm14 = xmm0[0,1],xmm5[2,3]
6254 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm14 = xmm14[0,2],xmm5[0,3]
6255 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm14 = ymm14[0,1,2],ymm15[3,4,5,6,7]
6256 ; AVX1-ONLY-NEXT: vmovapd 1312(%rdi), %ymm12
6257 ; AVX1-ONLY-NEXT: vmovupd %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6258 ; AVX1-ONLY-NEXT: vmovapd 1280(%rdi), %ymm0
6259 ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6260 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm0[2,3],ymm12[0,1]
6261 ; AVX1-ONLY-NEXT: vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6262 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm15 = ymm1[0],ymm0[0],ymm1[3],ymm0[2]
6263 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm15 = ymm15[0,1,2,0,4,5,6,4]
6264 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm14 = ymm14[0,1,2,3,4,5],ymm15[6,7]
6265 ; AVX1-ONLY-NEXT: vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6266 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
6267 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
6268 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm14 = ymm12[3,0],ymm0[1,0],ymm12[7,4],ymm0[5,4]
6269 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm14 = ymm14[2,0],ymm0[2,3],ymm14[6,4],ymm0[6,7]
6270 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
6271 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm15 = xmm0[1,0],xmm9[3,0]
6272 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm15 = xmm15[0,2],xmm9[1,3]
6273 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm14 = ymm15[0,1,2],ymm14[3,4,5,6,7]
6274 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
6275 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
6276 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm15 = ymm0[3,1],ymm12[1,3],ymm0[7,5],ymm12[5,7]
6277 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm15 = ymm15[0,1,2,0,4,5,6,4]
6278 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm9 = ymm14[0,1,2,3,4,5],ymm15[6,7]
6279 ; AVX1-ONLY-NEXT: vmovups %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6280 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
6281 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
6282 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm14 = ymm9[3,0],ymm0[1,0],ymm9[7,4],ymm0[5,4]
6283 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm14 = ymm14[2,0],ymm0[2,3],ymm14[6,4],ymm0[6,7]
6284 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
6285 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6286 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm15 = xmm0[1,0],xmm1[3,0]
6287 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm15 = xmm15[0,2],xmm1[1,3]
6288 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm14 = ymm15[0,1,2],ymm14[3,4,5,6,7]
6289 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
6290 ; AVX1-ONLY-NEXT: vshufps $215, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm15 # 32-byte Folded Reload
6291 ; AVX1-ONLY-NEXT: # ymm15 = ymm0[3,1],mem[1,3],ymm0[7,5],mem[5,7]
6292 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm15 = ymm15[0,1,2,0,4,5,6,4]
6293 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm14 = ymm14[0,1,2,3,4,5],ymm15[6,7]
6294 ; AVX1-ONLY-NEXT: vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6295 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
6296 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
6297 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm14 = ymm0[3,0],ymm1[1,0],ymm0[7,4],ymm1[5,4]
6298 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm14 = ymm14[2,0],ymm1[2,3],ymm14[6,4],ymm1[6,7]
6299 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
6300 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm15 = xmm0[1,0],xmm13[3,0]
6301 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm13 = xmm15[0,2],xmm13[1,3]
6302 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm13 = ymm13[0,1,2],ymm14[3,4,5,6,7]
6303 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
6304 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
6305 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm14 = ymm0[3,1],ymm15[1,3],ymm0[7,5],ymm15[5,7]
6306 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm14 = ymm14[0,1,2,0,4,5,6,4]
6307 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm13 = ymm13[0,1,2,3,4,5],ymm14[6,7]
6308 ; AVX1-ONLY-NEXT: vmovups %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6309 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
6310 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm13 = ymm14[3,0],ymm11[1,0],ymm14[7,4],ymm11[5,4]
6311 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm11 = ymm13[2,0],ymm11[2,3],ymm13[6,4],ymm11[6,7]
6312 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
6313 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm13 = xmm0[1,0],xmm10[3,0]
6314 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm10 = xmm13[0,2],xmm10[1,3]
6315 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0,1,2],ymm11[3,4,5,6,7]
6316 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
6317 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
6318 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm11 = ymm0[3,1],ymm13[1,3],ymm0[7,5],ymm13[5,7]
6319 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm11 = ymm11[0,1,2,0,4,5,6,4]
6320 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0,1,2,3,4,5],ymm11[6,7]
6321 ; AVX1-ONLY-NEXT: vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6322 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
6323 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm10 = ymm11[3,0],ymm8[1,0],ymm11[7,4],ymm8[5,4]
6324 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm8 = ymm10[2,0],ymm8[2,3],ymm10[6,4],ymm8[6,7]
6325 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
6326 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm10 = xmm0[1,0],xmm7[3,0]
6327 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm7 = xmm10[0,2],xmm7[1,3]
6328 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2],ymm8[3,4,5,6,7]
6329 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
6330 ; AVX1-ONLY-NEXT: vshufps $215, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm8 # 32-byte Folded Reload
6331 ; AVX1-ONLY-NEXT: # ymm8 = ymm0[3,1],mem[1,3],ymm0[7,5],mem[5,7]
6332 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm8 = ymm8[0,1,2,0,4,5,6,4]
6333 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3,4,5],ymm8[6,7]
6334 ; AVX1-ONLY-NEXT: vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6335 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
6336 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm7 = ymm8[3,0],ymm6[1,0],ymm8[7,4],ymm6[5,4]
6337 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm6 = ymm7[2,0],ymm6[2,3],ymm7[6,4],ymm6[6,7]
6338 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
6339 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm7 = xmm0[1,0],xmm4[3,0]
6340 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm4 = xmm7[0,2],xmm4[1,3]
6341 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2],ymm6[3,4,5,6,7]
6342 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
6343 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
6344 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm6 = ymm0[3,1],ymm7[1,3],ymm0[7,5],ymm7[5,7]
6345 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm6 = ymm6[0,1,2,0,4,5,6,4]
6346 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3,4,5],ymm6[6,7]
6347 ; AVX1-ONLY-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6348 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
6349 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm4 = ymm6[3,0],ymm3[1,0],ymm6[7,4],ymm3[5,4]
6350 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm3 = ymm4[2,0],ymm3[2,3],ymm4[6,4],ymm3[6,7]
6351 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
6352 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm4 = xmm0[1,0],xmm2[3,0]
6353 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm2 = xmm4[0,2],xmm2[1,3]
6354 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2],ymm3[3,4,5,6,7]
6355 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
6356 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
6357 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm3 = ymm0[3,1],ymm4[1,3],ymm0[7,5],ymm4[5,7]
6358 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm3 = ymm3[0,1,2,0,4,5,6,4]
6359 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5],ymm3[6,7]
6360 ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6361 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
6362 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
6363 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm2 = ymm3[3,0],ymm0[1,0],ymm3[7,4],ymm0[5,4]
6364 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm2[2,0],ymm0[2,3],ymm2[6,4],ymm0[6,7]
6365 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
6366 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm2 = xmm0[1,0],xmm5[3,0]
6367 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm2[0,2],xmm5[1,3]
6368 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
6369 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
6370 ; AVX1-ONLY-NEXT: vshufps $215, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm1 # 32-byte Folded Reload
6371 ; AVX1-ONLY-NEXT: # ymm1 = ymm2[3,1],mem[1,3],ymm2[7,5],mem[5,7]
6372 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,1,2,0,4,5,6,4]
6373 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
6374 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6375 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
6376 ; AVX1-ONLY-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm2 # 32-byte Folded Reload
6377 ; AVX1-ONLY-NEXT: # ymm2 = ymm0[0,1],mem[2,3],ymm0[4,5,6,7]
6378 ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6379 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
6380 ; AVX1-ONLY-NEXT: vshufps $38, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
6381 ; AVX1-ONLY-NEXT: # ymm0 = ymm0[2,1],mem[2,0],ymm0[6,5],mem[6,4]
6382 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,0,1]
6383 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm2, %xmm1
6384 ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6385 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm2[2,0],xmm1[2,3]
6386 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
6387 ; AVX1-ONLY-NEXT: vblendps $48, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm12 # 32-byte Folded Reload
6388 ; AVX1-ONLY-NEXT: # ymm12 = ymm12[0,1,2,3],mem[4,5],ymm12[6,7]
6389 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm12[2,3,0,1]
6390 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6391 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,0],ymm12[2,0],ymm1[4,4],ymm12[6,4]
6392 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
6393 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6394 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
6395 ; AVX1-ONLY-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm2 # 32-byte Folded Reload
6396 ; AVX1-ONLY-NEXT: # ymm2 = ymm0[0,1],mem[2,3],ymm0[4,5,6,7]
6397 ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6398 ; AVX1-ONLY-NEXT: vshufps $38, {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm0 # 32-byte Folded Reload
6399 ; AVX1-ONLY-NEXT: # ymm0 = ymm9[2,1],mem[2,0],ymm9[6,5],mem[6,4]
6400 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,0,1]
6401 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm2, %xmm1
6402 ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6403 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm2[2,0],xmm1[2,3]
6404 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
6405 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
6406 ; AVX1-ONLY-NEXT: vblendps $48, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
6407 ; AVX1-ONLY-NEXT: # ymm2 = ymm1[0,1,2,3],mem[4,5],ymm1[6,7]
6408 ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6409 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm2[2,3,0,1]
6410 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6411 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,0],ymm2[2,0],ymm1[4,4],ymm2[6,4]
6412 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
6413 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6414 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
6415 ; AVX1-ONLY-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm2 # 32-byte Folded Reload
6416 ; AVX1-ONLY-NEXT: # ymm2 = ymm0[0,1],mem[2,3],ymm0[4,5,6,7]
6417 ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6418 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
6419 ; AVX1-ONLY-NEXT: vshufps $38, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
6420 ; AVX1-ONLY-NEXT: # ymm0 = ymm0[2,1],mem[2,0],ymm0[6,5],mem[6,4]
6421 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,0,1]
6422 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm2, %xmm1
6423 ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6424 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm2[2,0],xmm1[2,3]
6425 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
6426 ; AVX1-ONLY-NEXT: vblendps $48, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm2 # 32-byte Folded Reload
6427 ; AVX1-ONLY-NEXT: # ymm2 = ymm15[0,1,2,3],mem[4,5],ymm15[6,7]
6428 ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6429 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm2[2,3,0,1]
6430 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6431 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,0],ymm2[2,0],ymm1[4,4],ymm2[6,4]
6432 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
6433 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6434 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
6435 ; AVX1-ONLY-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm2 # 32-byte Folded Reload
6436 ; AVX1-ONLY-NEXT: # ymm2 = ymm0[0,1],mem[2,3],ymm0[4,5,6,7]
6437 ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6438 ; AVX1-ONLY-NEXT: vshufps $38, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm0 # 32-byte Folded Reload
6439 ; AVX1-ONLY-NEXT: # ymm0 = ymm14[2,1],mem[2,0],ymm14[6,5],mem[6,4]
6440 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,0,1]
6441 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm2, %xmm1
6442 ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6443 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm2[2,0],xmm1[2,3]
6444 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
6445 ; AVX1-ONLY-NEXT: vblendps $48, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm10 # 32-byte Folded Reload
6446 ; AVX1-ONLY-NEXT: # ymm10 = ymm13[0,1,2,3],mem[4,5],ymm13[6,7]
6447 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm10[2,3,0,1]
6448 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6449 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,0],ymm10[2,0],ymm1[4,4],ymm10[6,4]
6450 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
6451 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6452 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
6453 ; AVX1-ONLY-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm2 # 32-byte Folded Reload
6454 ; AVX1-ONLY-NEXT: # ymm2 = ymm0[0,1],mem[2,3],ymm0[4,5,6,7]
6455 ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6456 ; AVX1-ONLY-NEXT: vshufps $38, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm0 # 32-byte Folded Reload
6457 ; AVX1-ONLY-NEXT: # ymm0 = ymm11[2,1],mem[2,0],ymm11[6,5],mem[6,4]
6458 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,0,1]
6459 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm2, %xmm1
6460 ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6461 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm2[2,0],xmm1[2,3]
6462 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
6463 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
6464 ; AVX1-ONLY-NEXT: vblendps $48, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
6465 ; AVX1-ONLY-NEXT: # ymm2 = ymm1[0,1,2,3],mem[4,5],ymm1[6,7]
6466 ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6467 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm2[2,3,0,1]
6468 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6469 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,0],ymm2[2,0],ymm1[4,4],ymm2[6,4]
6470 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
6471 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6472 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
6473 ; AVX1-ONLY-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm2 # 32-byte Folded Reload
6474 ; AVX1-ONLY-NEXT: # ymm2 = ymm0[0,1],mem[2,3],ymm0[4,5,6,7]
6475 ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6476 ; AVX1-ONLY-NEXT: vshufps $38, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm0 # 32-byte Folded Reload
6477 ; AVX1-ONLY-NEXT: # ymm0 = ymm8[2,1],mem[2,0],ymm8[6,5],mem[6,4]
6478 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,0,1]
6479 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm2, %xmm1
6480 ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6481 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm2[2,0],xmm1[2,3]
6482 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
6483 ; AVX1-ONLY-NEXT: vblendps $48, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm9 # 32-byte Folded Reload
6484 ; AVX1-ONLY-NEXT: # ymm9 = ymm7[0,1,2,3],mem[4,5],ymm7[6,7]
6485 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm9[2,3,0,1]
6486 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6487 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,0],ymm9[2,0],ymm1[4,4],ymm9[6,4]
6488 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
6489 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6490 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
6491 ; AVX1-ONLY-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm8 # 32-byte Folded Reload
6492 ; AVX1-ONLY-NEXT: # ymm8 = ymm0[0,1],mem[2,3],ymm0[4,5,6,7]
6493 ; AVX1-ONLY-NEXT: vshufps $38, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm0 # 32-byte Folded Reload
6494 ; AVX1-ONLY-NEXT: # ymm0 = ymm6[2,1],mem[2,0],ymm6[6,5],mem[6,4]
6495 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,0,1]
6496 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm8, %xmm1
6497 ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6498 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm8[2,0],xmm1[2,3]
6499 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
6500 ; AVX1-ONLY-NEXT: vblendps $48, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm6 # 32-byte Folded Reload
6501 ; AVX1-ONLY-NEXT: # ymm6 = ymm4[0,1,2,3],mem[4,5],ymm4[6,7]
6502 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm13 = ymm6[2,3,0,1]
6503 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm2 = ymm13[0,0],ymm6[2,0],ymm13[4,4],ymm6[6,4]
6504 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm2[5,6,7]
6505 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6506 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
6507 ; AVX1-ONLY-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm5 # 32-byte Folded Reload
6508 ; AVX1-ONLY-NEXT: # ymm5 = ymm0[0,1],mem[2,3],ymm0[4,5,6,7]
6509 ; AVX1-ONLY-NEXT: vmovaps %ymm3, %ymm4
6510 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
6511 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm2 = ymm4[2,1],ymm3[2,0],ymm4[6,5],ymm3[6,4]
6512 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm2[2,3,0,1]
6513 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm5, %xmm7
6514 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm15 = xmm5[2,0],xmm7[2,3]
6515 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm15[0,1,2],ymm0[3,4,5,6,7]
6516 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
6517 ; AVX1-ONLY-NEXT: vblendps $48, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm15 # 32-byte Folded Reload
6518 ; AVX1-ONLY-NEXT: # ymm15 = ymm1[0,1,2,3],mem[4,5],ymm1[6,7]
6519 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm15[2,3,0,1]
6520 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm11 = ymm2[0,0],ymm15[2,0],ymm2[4,4],ymm15[6,4]
6521 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm11[5,6,7]
6522 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6523 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
6524 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,1],ymm12[3,1],ymm0[4,5],ymm12[7,5]
6525 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
6526 ; AVX1-ONLY-NEXT: vshufps $247, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm11 # 16-byte Folded Reload
6527 ; AVX1-ONLY-NEXT: # xmm11 = xmm1[3,1],mem[3,3]
6528 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
6529 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
6530 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm12 = ymm1[3,1],ymm14[2,1],ymm1[7,5],ymm14[6,5]
6531 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm12 = ymm12[2,3,0,1]
6532 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm11 = ymm11[0,1,2],ymm12[3,4,5,6,7]
6533 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm11[0,1,2,3,4],ymm0[5,6,7]
6534 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6535 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
6536 ; AVX1-ONLY-NEXT: vshufps $116, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
6537 ; AVX1-ONLY-NEXT: # ymm0 = ymm0[0,1],mem[3,1],ymm0[4,5],mem[7,5]
6538 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
6539 ; AVX1-ONLY-NEXT: vshufps $247, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm11 # 16-byte Folded Reload
6540 ; AVX1-ONLY-NEXT: # xmm11 = xmm1[3,1],mem[3,3]
6541 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
6542 ; AVX1-ONLY-NEXT: vshufps $103, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm12 # 32-byte Folded Reload
6543 ; AVX1-ONLY-NEXT: # ymm12 = ymm12[3,1],mem[2,1],ymm12[7,5],mem[6,5]
6544 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm12 = ymm12[2,3,0,1]
6545 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm11 = ymm11[0,1,2],ymm12[3,4,5,6,7]
6546 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm11[0,1,2,3,4],ymm0[5,6,7]
6547 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6548 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
6549 ; AVX1-ONLY-NEXT: vshufps $116, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 32-byte Folded Reload
6550 ; AVX1-ONLY-NEXT: # ymm1 = ymm0[0,1],mem[3,1],ymm0[4,5],mem[7,5]
6551 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
6552 ; AVX1-ONLY-NEXT: vshufps $247, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm11 # 16-byte Folded Reload
6553 ; AVX1-ONLY-NEXT: # xmm11 = xmm0[3,1],mem[3,3]
6554 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
6555 ; AVX1-ONLY-NEXT: vshufps $103, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm12 # 32-byte Folded Reload
6556 ; AVX1-ONLY-NEXT: # ymm12 = ymm0[3,1],mem[2,1],ymm0[7,5],mem[6,5]
6557 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm12 = ymm12[2,3,0,1]
6558 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm11 = ymm11[0,1,2],ymm12[3,4,5,6,7]
6559 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm11[0,1,2,3,4],ymm1[5,6,7]
6560 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6561 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
6562 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,1],ymm10[3,1],ymm0[4,5],ymm10[7,5]
6563 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
6564 ; AVX1-ONLY-NEXT: vshufps $247, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm10 # 16-byte Folded Reload
6565 ; AVX1-ONLY-NEXT: # xmm10 = xmm1[3,1],mem[3,3]
6566 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
6567 ; AVX1-ONLY-NEXT: vshufps $103, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm11 # 32-byte Folded Reload
6568 ; AVX1-ONLY-NEXT: # ymm11 = ymm11[3,1],mem[2,1],ymm11[7,5],mem[6,5]
6569 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm11 = ymm11[2,3,0,1]
6570 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0,1,2],ymm11[3,4,5,6,7]
6571 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm10[0,1,2,3,4],ymm0[5,6,7]
6572 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6573 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm2[0,1],ymm15[3,1],ymm2[4,5],ymm15[7,5]
6574 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm5[3,1],xmm7[3,3]
6575 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm2 = ymm4[3,1],ymm3[2,1],ymm4[7,5],ymm3[6,5]
6576 ; AVX1-ONLY-NEXT: vmovaps %ymm4, %ymm15
6577 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm2[2,3,0,1]
6578 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4,5,6,7]
6579 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5,6,7]
6580 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6581 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm13[0,1],ymm6[3,1],ymm13[4,5],ymm6[7,5]
6582 ; AVX1-ONLY-NEXT: vshufps $247, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8, %xmm1 # 16-byte Folded Reload
6583 ; AVX1-ONLY-NEXT: # xmm1 = xmm8[3,1],mem[3,3]
6584 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
6585 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
6586 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm2 = ymm3[3,1],ymm6[2,1],ymm3[7,5],ymm6[6,5]
6587 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm2[2,3,0,1]
6588 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4,5,6,7]
6589 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5,6,7]
6590 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6591 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
6592 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,1],ymm9[3,1],ymm0[4,5],ymm9[7,5]
6593 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
6594 ; AVX1-ONLY-NEXT: vshufps $247, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
6595 ; AVX1-ONLY-NEXT: # xmm1 = xmm1[3,1],mem[3,3]
6596 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
6597 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
6598 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm2 = ymm4[3,1],ymm8[2,1],ymm4[7,5],ymm8[6,5]
6599 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm2[2,3,0,1]
6600 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4,5,6,7]
6601 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5,6,7]
6602 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6603 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
6604 ; AVX1-ONLY-NEXT: vshufps $116, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
6605 ; AVX1-ONLY-NEXT: # ymm0 = ymm0[0,1],mem[3,1],ymm0[4,5],mem[7,5]
6606 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
6607 ; AVX1-ONLY-NEXT: vshufps $247, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
6608 ; AVX1-ONLY-NEXT: # xmm1 = xmm1[3,1],mem[3,3]
6609 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
6610 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
6611 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm2 = ymm5[3,1],ymm7[2,1],ymm5[7,5],ymm7[6,5]
6612 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm2[2,3,0,1]
6613 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4,5,6,7]
6614 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5,6,7]
6615 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6616 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
6617 ; AVX1-ONLY-NEXT: vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm2 # 32-byte Folded Reload
6618 ; AVX1-ONLY-NEXT: # ymm2 = mem[0,1],ymm0[2,3],mem[4,5,6,7]
6619 ; AVX1-ONLY-NEXT: vmovaps 32(%rdi), %xmm0
6620 ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6621 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,2,3,3]
6622 ; AVX1-ONLY-NEXT: vmovaps 16(%rdi), %xmm1
6623 ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6624 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3]
6625 ; AVX1-ONLY-NEXT: vmovapd 80(%rdi), %xmm1
6626 ; AVX1-ONLY-NEXT: vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6627 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm1 = ymm1[1],ymm7[0],ymm1[2],ymm7[2]
6628 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm5[0,1],ymm1[2,0],ymm5[4,5],ymm1[6,4]
6629 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5,6,7]
6630 ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6631 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm10 = ymm2[2,3,0,1]
6632 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm2[2,0],ymm10[0,0],ymm2[6,4],ymm10[4,4]
6633 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm10[0,2],ymm1[2,0],ymm10[4,6],ymm1[6,4]
6634 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
6635 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6636 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
6637 ; AVX1-ONLY-NEXT: vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm2 # 32-byte Folded Reload
6638 ; AVX1-ONLY-NEXT: # ymm2 = mem[0,1],ymm0[2,3],mem[4,5,6,7]
6639 ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6640 ; AVX1-ONLY-NEXT: vmovaps 224(%rdi), %xmm0
6641 ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6642 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,2,3,3]
6643 ; AVX1-ONLY-NEXT: vmovaps 208(%rdi), %xmm1
6644 ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6645 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3]
6646 ; AVX1-ONLY-NEXT: vmovapd 272(%rdi), %xmm1
6647 ; AVX1-ONLY-NEXT: vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6648 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm1 = ymm1[1],ymm14[0],ymm1[2],ymm14[2]
6649 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
6650 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm5[0,1],ymm1[2,0],ymm5[4,5],ymm1[6,4]
6651 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5,6,7]
6652 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm7 = ymm2[2,3,0,1]
6653 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm2[2,0],ymm7[0,0],ymm2[6,4],ymm7[4,4]
6654 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm7[0,2],ymm1[2,0],ymm7[4,6],ymm1[6,4]
6655 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
6656 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6657 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
6658 ; AVX1-ONLY-NEXT: vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm2 # 32-byte Folded Reload
6659 ; AVX1-ONLY-NEXT: # ymm2 = mem[0,1],ymm0[2,3],mem[4,5,6,7]
6660 ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6661 ; AVX1-ONLY-NEXT: vmovaps 416(%rdi), %xmm0
6662 ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6663 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,2,3,3]
6664 ; AVX1-ONLY-NEXT: vmovaps 400(%rdi), %xmm1
6665 ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6666 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3]
6667 ; AVX1-ONLY-NEXT: vmovapd 464(%rdi), %xmm1
6668 ; AVX1-ONLY-NEXT: vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6669 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm1 = ymm1[1],ymm8[0],ymm1[2],ymm8[2]
6670 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm4[0,1],ymm1[2,0],ymm4[4,5],ymm1[6,4]
6671 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5,6,7]
6672 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm5 = ymm2[2,3,0,1]
6673 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm2[2,0],ymm5[0,0],ymm2[6,4],ymm5[4,4]
6674 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm5[0,2],ymm1[2,0],ymm5[4,6],ymm1[6,4]
6675 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
6676 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6677 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
6678 ; AVX1-ONLY-NEXT: vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm12 # 32-byte Folded Reload
6679 ; AVX1-ONLY-NEXT: # ymm12 = mem[0,1],ymm0[2,3],mem[4,5,6,7]
6680 ; AVX1-ONLY-NEXT: vmovaps 608(%rdi), %xmm0
6681 ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6682 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,2,3,3]
6683 ; AVX1-ONLY-NEXT: vmovaps 592(%rdi), %xmm1
6684 ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6685 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3]
6686 ; AVX1-ONLY-NEXT: vmovapd 656(%rdi), %xmm1
6687 ; AVX1-ONLY-NEXT: vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6688 ; AVX1-ONLY-NEXT: vshufpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
6689 ; AVX1-ONLY-NEXT: # ymm1 = ymm1[1],mem[0],ymm1[2],mem[2]
6690 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
6691 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm2[0,1],ymm1[2,0],ymm2[4,5],ymm1[6,4]
6692 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5,6,7]
6693 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm12[2,3,0,1]
6694 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm12[2,0],ymm4[0,0],ymm12[6,4],ymm4[4,4]
6695 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm4[0,2],ymm1[2,0],ymm4[4,6],ymm1[6,4]
6696 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
6697 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6698 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
6699 ; AVX1-ONLY-NEXT: vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm11 # 32-byte Folded Reload
6700 ; AVX1-ONLY-NEXT: # ymm11 = mem[0,1],ymm0[2,3],mem[4,5,6,7]
6701 ; AVX1-ONLY-NEXT: vmovaps 800(%rdi), %xmm0
6702 ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6703 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,2,3,3]
6704 ; AVX1-ONLY-NEXT: vmovaps 784(%rdi), %xmm1
6705 ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6706 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3]
6707 ; AVX1-ONLY-NEXT: vmovapd 848(%rdi), %xmm1
6708 ; AVX1-ONLY-NEXT: vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6709 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm1 = ymm1[1],ymm6[0],ymm1[2],ymm6[2]
6710 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm3[0,1],ymm1[2,0],ymm3[4,5],ymm1[6,4]
6711 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5,6,7]
6712 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm3 = ymm11[2,3,0,1]
6713 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm11[2,0],ymm3[0,0],ymm11[6,4],ymm3[4,4]
6714 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm3[0,2],ymm1[2,0],ymm3[4,6],ymm1[6,4]
6715 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
6716 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6717 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
6718 ; AVX1-ONLY-NEXT: vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm14 # 32-byte Folded Reload
6719 ; AVX1-ONLY-NEXT: # ymm14 = mem[0,1],ymm0[2,3],mem[4,5,6,7]
6720 ; AVX1-ONLY-NEXT: vmovaps 992(%rdi), %xmm0
6721 ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6722 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,2,3,3]
6723 ; AVX1-ONLY-NEXT: vmovaps 976(%rdi), %xmm1
6724 ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6725 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3]
6726 ; AVX1-ONLY-NEXT: vmovapd 1040(%rdi), %xmm1
6727 ; AVX1-ONLY-NEXT: vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6728 ; AVX1-ONLY-NEXT: vshufpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
6729 ; AVX1-ONLY-NEXT: # ymm1 = ymm1[1],mem[0],ymm1[2],mem[2]
6730 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
6731 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm2[0,1],ymm1[2,0],ymm2[4,5],ymm1[6,4]
6732 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5,6,7]
6733 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm14[2,3,0,1]
6734 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm14[2,0],ymm2[0,0],ymm14[6,4],ymm2[4,4]
6735 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm2[0,2],ymm1[2,0],ymm2[4,6],ymm1[6,4]
6736 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
6737 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6738 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
6739 ; AVX1-ONLY-NEXT: vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm8 # 32-byte Folded Reload
6740 ; AVX1-ONLY-NEXT: # ymm8 = mem[0,1],ymm0[2,3],mem[4,5,6,7]
6741 ; AVX1-ONLY-NEXT: vmovaps 1184(%rdi), %xmm0
6742 ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6743 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,2,3,3]
6744 ; AVX1-ONLY-NEXT: vmovaps 1168(%rdi), %xmm1
6745 ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6746 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3]
6747 ; AVX1-ONLY-NEXT: vmovapd 1232(%rdi), %xmm1
6748 ; AVX1-ONLY-NEXT: vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6749 ; AVX1-ONLY-NEXT: vshufpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
6750 ; AVX1-ONLY-NEXT: # ymm1 = ymm1[1],mem[0],ymm1[2],mem[2]
6751 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm15[0,1],ymm1[2,0],ymm15[4,5],ymm1[6,4]
6752 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5,6,7]
6753 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm8[2,3,0,1]
6754 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm6 = ymm8[2,0],ymm1[0,0],ymm8[6,4],ymm1[4,4]
6755 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm6 = ymm1[0,2],ymm6[2,0],ymm1[4,6],ymm6[6,4]
6756 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm6[5,6,7]
6757 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6758 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
6759 ; AVX1-ONLY-NEXT: vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm6 # 32-byte Folded Reload
6760 ; AVX1-ONLY-NEXT: # ymm6 = mem[0,1],ymm0[2,3],mem[4,5,6,7]
6761 ; AVX1-ONLY-NEXT: vmovaps 1376(%rdi), %xmm0
6762 ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6763 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,2,3,3]
6764 ; AVX1-ONLY-NEXT: vmovaps 1360(%rdi), %xmm9
6765 ; AVX1-ONLY-NEXT: vmovaps %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6766 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm0 = xmm9[0],xmm0[1],xmm9[2,3]
6767 ; AVX1-ONLY-NEXT: vmovapd 1424(%rdi), %xmm9
6768 ; AVX1-ONLY-NEXT: vmovupd %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6769 ; AVX1-ONLY-NEXT: vshufpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm15 # 32-byte Folded Reload
6770 ; AVX1-ONLY-NEXT: # ymm15 = ymm9[1],mem[0],ymm9[2],mem[2]
6771 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
6772 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm15 = ymm9[0,1],ymm15[2,0],ymm9[4,5],ymm15[6,4]
6773 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm15 = ymm0[0,1],ymm15[2,3,4,5,6,7]
6774 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm6[2,3,0,1]
6775 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm13 = ymm6[2,0],ymm0[0,0],ymm6[6,4],ymm0[4,4]
6776 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm13 = ymm0[0,2],ymm13[2,0],ymm0[4,6],ymm13[6,4]
6777 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm9 = ymm15[0,1,2,3,4],ymm13[5,6,7]
6778 ; AVX1-ONLY-NEXT: vmovups %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6779 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
6780 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm13 = ymm9[3,0],ymm10[1,0],ymm9[7,4],ymm10[5,4]
6781 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm9 = ymm10[0,3],ymm13[2,0],ymm10[4,7],ymm13[6,4]
6782 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
6783 ; AVX1-ONLY-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm10, %xmm13 # 16-byte Folded Reload
6784 ; AVX1-ONLY-NEXT: # xmm13 = xmm10[0,1],mem[2,3]
6785 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
6786 ; AVX1-ONLY-NEXT: vshufps $215, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm15 # 32-byte Folded Reload
6787 ; AVX1-ONLY-NEXT: # ymm15 = ymm10[3,1],mem[1,3],ymm10[7,5],mem[5,7]
6788 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
6789 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm15 = ymm10[1,1],ymm15[2,0],ymm10[5,5],ymm15[6,4]
6790 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm13 = xmm13[1,3,2,3]
6791 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm13 = ymm13[0,1],ymm15[2,3,4,5,6,7]
6792 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm9 = ymm13[0,1,2,3,4],ymm9[5,6,7]
6793 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
6794 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm13 = ymm10[3,0],ymm7[1,0],ymm10[7,4],ymm7[5,4]
6795 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm7 = ymm7[0,3],ymm13[2,0],ymm7[4,7],ymm13[6,4]
6796 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
6797 ; AVX1-ONLY-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm10, %xmm13 # 16-byte Folded Reload
6798 ; AVX1-ONLY-NEXT: # xmm13 = xmm10[0,1],mem[2,3]
6799 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
6800 ; AVX1-ONLY-NEXT: vshufps $215, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm15 # 32-byte Folded Reload
6801 ; AVX1-ONLY-NEXT: # ymm15 = ymm10[3,1],mem[1,3],ymm10[7,5],mem[5,7]
6802 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
6803 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm15 = ymm10[1,1],ymm15[2,0],ymm10[5,5],ymm15[6,4]
6804 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm13 = xmm13[1,3,2,3]
6805 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm13 = ymm13[0,1],ymm15[2,3,4,5,6,7]
6806 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm7 = ymm13[0,1,2,3,4],ymm7[5,6,7]
6807 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
6808 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm13 = ymm10[3,0],ymm5[1,0],ymm10[7,4],ymm5[5,4]
6809 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm5 = ymm5[0,3],ymm13[2,0],ymm5[4,7],ymm13[6,4]
6810 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
6811 ; AVX1-ONLY-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm10, %xmm13 # 16-byte Folded Reload
6812 ; AVX1-ONLY-NEXT: # xmm13 = xmm10[0,1],mem[2,3]
6813 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
6814 ; AVX1-ONLY-NEXT: vshufps $215, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm15 # 32-byte Folded Reload
6815 ; AVX1-ONLY-NEXT: # ymm15 = ymm10[3,1],mem[1,3],ymm10[7,5],mem[5,7]
6816 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
6817 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm15 = ymm10[1,1],ymm15[2,0],ymm10[5,5],ymm15[6,4]
6818 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm13 = xmm13[1,3,2,3]
6819 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm13 = ymm13[0,1],ymm15[2,3,4,5,6,7]
6820 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm5 = ymm13[0,1,2,3,4],ymm5[5,6,7]
6821 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm12 = ymm12[3,0],ymm4[1,0],ymm12[7,4],ymm4[5,4]
6822 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm4 = ymm4[0,3],ymm12[2,0],ymm4[4,7],ymm12[6,4]
6823 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
6824 ; AVX1-ONLY-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm10, %xmm12 # 16-byte Folded Reload
6825 ; AVX1-ONLY-NEXT: # xmm12 = xmm10[0,1],mem[2,3]
6826 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
6827 ; AVX1-ONLY-NEXT: vshufps $215, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm13 # 32-byte Folded Reload
6828 ; AVX1-ONLY-NEXT: # ymm13 = ymm10[3,1],mem[1,3],ymm10[7,5],mem[5,7]
6829 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
6830 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm13 = ymm10[1,1],ymm13[2,0],ymm10[5,5],ymm13[6,4]
6831 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm12 = xmm12[1,3,2,3]
6832 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm12 = ymm12[0,1],ymm13[2,3,4,5,6,7]
6833 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm4 = ymm12[0,1,2,3,4],ymm4[5,6,7]
6834 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm11 = ymm11[3,0],ymm3[1,0],ymm11[7,4],ymm3[5,4]
6835 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm3 = ymm3[0,3],ymm11[2,0],ymm3[4,7],ymm11[6,4]
6836 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
6837 ; AVX1-ONLY-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm10, %xmm11 # 16-byte Folded Reload
6838 ; AVX1-ONLY-NEXT: # xmm11 = xmm10[0,1],mem[2,3]
6839 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
6840 ; AVX1-ONLY-NEXT: vshufps $215, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm12 # 32-byte Folded Reload
6841 ; AVX1-ONLY-NEXT: # ymm12 = ymm10[3,1],mem[1,3],ymm10[7,5],mem[5,7]
6842 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
6843 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm12 = ymm10[1,1],ymm12[2,0],ymm10[5,5],ymm12[6,4]
6844 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm11 = xmm11[1,3,2,3]
6845 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm11 = ymm11[0,1],ymm12[2,3,4,5,6,7]
6846 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm3 = ymm11[0,1,2,3,4],ymm3[5,6,7]
6847 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm10 = ymm14[3,0],ymm2[1,0],ymm14[7,4],ymm2[5,4]
6848 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm2 = ymm2[0,3],ymm10[2,0],ymm2[4,7],ymm10[6,4]
6849 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
6850 ; AVX1-ONLY-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm10, %xmm10 # 16-byte Folded Reload
6851 ; AVX1-ONLY-NEXT: # xmm10 = xmm10[0,1],mem[2,3]
6852 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
6853 ; AVX1-ONLY-NEXT: vshufps $215, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm11 # 32-byte Folded Reload
6854 ; AVX1-ONLY-NEXT: # ymm11 = ymm11[3,1],mem[1,3],ymm11[7,5],mem[5,7]
6855 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
6856 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm11 = ymm12[1,1],ymm11[2,0],ymm12[5,5],ymm11[6,4]
6857 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm10 = xmm10[1,3,2,3]
6858 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0,1],ymm11[2,3,4,5,6,7]
6859 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm10[0,1,2,3,4],ymm2[5,6,7]
6860 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm8 = ymm8[3,0],ymm1[1,0],ymm8[7,4],ymm1[5,4]
6861 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,3],ymm8[2,0],ymm1[4,7],ymm8[6,4]
6862 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
6863 ; AVX1-ONLY-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8, %xmm8 # 16-byte Folded Reload
6864 ; AVX1-ONLY-NEXT: # xmm8 = xmm8[0,1],mem[2,3]
6865 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
6866 ; AVX1-ONLY-NEXT: vshufps $215, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm10 # 32-byte Folded Reload
6867 ; AVX1-ONLY-NEXT: # ymm10 = ymm10[3,1],mem[1,3],ymm10[7,5],mem[5,7]
6868 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
6869 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm10 = ymm11[1,1],ymm10[2,0],ymm11[5,5],ymm10[6,4]
6870 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm8 = xmm8[1,3,2,3]
6871 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1],ymm10[2,3,4,5,6,7]
6872 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm8[0,1,2,3,4],ymm1[5,6,7]
6873 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm6 = ymm6[3,0],ymm0[1,0],ymm6[7,4],ymm0[5,4]
6874 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,3],ymm6[2,0],ymm0[4,7],ymm6[6,4]
6875 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
6876 ; AVX1-ONLY-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6, %xmm6 # 16-byte Folded Reload
6877 ; AVX1-ONLY-NEXT: # xmm6 = xmm6[0,1],mem[2,3]
6878 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
6879 ; AVX1-ONLY-NEXT: vshufps $215, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm8 # 32-byte Folded Reload
6880 ; AVX1-ONLY-NEXT: # ymm8 = ymm8[3,1],mem[1,3],ymm8[7,5],mem[5,7]
6881 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
6882 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm8 = ymm10[1,1],ymm8[2,0],ymm10[5,5],ymm8[6,4]
6883 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm6 = xmm6[1,3,2,3]
6884 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1],ymm8[2,3,4,5,6,7]
6885 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm6[0,1,2,3,4],ymm0[5,6,7]
6886 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
6887 ; AVX1-ONLY-NEXT: vmovaps %ymm6, 192(%rsi)
6888 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
6889 ; AVX1-ONLY-NEXT: vmovaps %ymm6, 128(%rsi)
6890 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
6891 ; AVX1-ONLY-NEXT: vmovaps %ymm6, 64(%rsi)
6892 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
6893 ; AVX1-ONLY-NEXT: vmovaps %ymm6, (%rsi)
6894 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
6895 ; AVX1-ONLY-NEXT: vmovaps %ymm6, 224(%rsi)
6896 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
6897 ; AVX1-ONLY-NEXT: vmovaps %ymm6, 160(%rsi)
6898 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
6899 ; AVX1-ONLY-NEXT: vmovaps %ymm6, 96(%rsi)
6900 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
6901 ; AVX1-ONLY-NEXT: vmovaps %ymm6, 32(%rsi)
6902 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
6903 ; AVX1-ONLY-NEXT: vmovaps %ymm6, 192(%rdx)
6904 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
6905 ; AVX1-ONLY-NEXT: vmovaps %ymm6, 128(%rdx)
6906 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
6907 ; AVX1-ONLY-NEXT: vmovaps %ymm6, 64(%rdx)
6908 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
6909 ; AVX1-ONLY-NEXT: vmovaps %ymm6, (%rdx)
6910 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
6911 ; AVX1-ONLY-NEXT: vmovaps %ymm6, 224(%rdx)
6912 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
6913 ; AVX1-ONLY-NEXT: vmovaps %ymm6, 160(%rdx)
6914 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
6915 ; AVX1-ONLY-NEXT: vmovaps %ymm6, 96(%rdx)
6916 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
6917 ; AVX1-ONLY-NEXT: vmovaps %ymm6, 32(%rdx)
6918 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
6919 ; AVX1-ONLY-NEXT: vmovaps %ymm6, 192(%rcx)
6920 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
6921 ; AVX1-ONLY-NEXT: vmovaps %ymm6, 128(%rcx)
6922 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
6923 ; AVX1-ONLY-NEXT: vmovaps %ymm6, 64(%rcx)
6924 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
6925 ; AVX1-ONLY-NEXT: vmovaps %ymm6, (%rcx)
6926 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
6927 ; AVX1-ONLY-NEXT: vmovaps %ymm6, 224(%rcx)
6928 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
6929 ; AVX1-ONLY-NEXT: vmovaps %ymm6, 160(%rcx)
6930 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
6931 ; AVX1-ONLY-NEXT: vmovaps %ymm6, 96(%rcx)
6932 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
6933 ; AVX1-ONLY-NEXT: vmovaps %ymm6, 32(%rcx)
6934 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
6935 ; AVX1-ONLY-NEXT: vmovaps %ymm6, (%r8)
6936 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
6937 ; AVX1-ONLY-NEXT: vmovaps %ymm6, 64(%r8)
6938 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
6939 ; AVX1-ONLY-NEXT: vmovaps %ymm6, 128(%r8)
6940 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
6941 ; AVX1-ONLY-NEXT: vmovaps %ymm6, 192(%r8)
6942 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
6943 ; AVX1-ONLY-NEXT: vmovaps %ymm6, 224(%r8)
6944 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
6945 ; AVX1-ONLY-NEXT: vmovaps %ymm6, 160(%r8)
6946 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
6947 ; AVX1-ONLY-NEXT: vmovaps %ymm6, 96(%r8)
6948 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
6949 ; AVX1-ONLY-NEXT: vmovaps %ymm6, 32(%r8)
6950 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
6951 ; AVX1-ONLY-NEXT: vmovaps %ymm6, 224(%r9)
6952 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
6953 ; AVX1-ONLY-NEXT: vmovaps %ymm6, 192(%r9)
6954 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
6955 ; AVX1-ONLY-NEXT: vmovaps %ymm6, 160(%r9)
6956 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
6957 ; AVX1-ONLY-NEXT: vmovaps %ymm6, 128(%r9)
6958 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
6959 ; AVX1-ONLY-NEXT: vmovaps %ymm6, 96(%r9)
6960 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
6961 ; AVX1-ONLY-NEXT: vmovaps %ymm6, 64(%r9)
6962 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
6963 ; AVX1-ONLY-NEXT: vmovaps %ymm6, 32(%r9)
6964 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
6965 ; AVX1-ONLY-NEXT: vmovaps %ymm6, (%r9)
6966 ; AVX1-ONLY-NEXT: movq {{[0-9]+}}(%rsp), %rax
6967 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 224(%rax)
6968 ; AVX1-ONLY-NEXT: vmovaps %ymm1, 192(%rax)
6969 ; AVX1-ONLY-NEXT: vmovaps %ymm2, 160(%rax)
6970 ; AVX1-ONLY-NEXT: vmovaps %ymm3, 128(%rax)
6971 ; AVX1-ONLY-NEXT: vmovaps %ymm4, 96(%rax)
6972 ; AVX1-ONLY-NEXT: vmovaps %ymm5, 64(%rax)
6973 ; AVX1-ONLY-NEXT: vmovaps %ymm7, 32(%rax)
6974 ; AVX1-ONLY-NEXT: vmovaps %ymm9, (%rax)
6975 ; AVX1-ONLY-NEXT: addq $2584, %rsp # imm = 0xA18
6976 ; AVX1-ONLY-NEXT: vzeroupper
6977 ; AVX1-ONLY-NEXT: retq
6979 ; AVX2-SLOW-LABEL: load_i32_stride6_vf64:
6980 ; AVX2-SLOW: # %bb.0:
6981 ; AVX2-SLOW-NEXT: subq $2504, %rsp # imm = 0x9C8
6982 ; AVX2-SLOW-NEXT: vmovaps 672(%rdi), %ymm2
6983 ; AVX2-SLOW-NEXT: vmovaps 640(%rdi), %ymm3
6984 ; AVX2-SLOW-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6985 ; AVX2-SLOW-NEXT: vmovaps 608(%rdi), %ymm4
6986 ; AVX2-SLOW-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6987 ; AVX2-SLOW-NEXT: vmovaps 320(%rdi), %ymm5
6988 ; AVX2-SLOW-NEXT: vmovups %ymm5, (%rsp) # 32-byte Spill
6989 ; AVX2-SLOW-NEXT: vmovaps 352(%rdi), %ymm6
6990 ; AVX2-SLOW-NEXT: vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6991 ; AVX2-SLOW-NEXT: vmovaps 288(%rdi), %ymm7
6992 ; AVX2-SLOW-NEXT: vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6993 ; AVX2-SLOW-NEXT: vmovaps 256(%rdi), %ymm9
6994 ; AVX2-SLOW-NEXT: vmovups %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6995 ; AVX2-SLOW-NEXT: vmovaps 224(%rdi), %ymm0
6996 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6997 ; AVX2-SLOW-NEXT: vmovaps 192(%rdi), %ymm1
6998 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6999 ; AVX2-SLOW-NEXT: vmovaps {{.*#+}} xmm8 = <0,6,4,u>
7000 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm14 = ymm1[0,1,2,3],ymm0[4,5],ymm1[6,7]
7001 ; AVX2-SLOW-NEXT: vpermps %ymm14, %ymm8, %ymm0
7002 ; AVX2-SLOW-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm9[0,1],ymm7[0,1]
7003 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm7[6,7]
7004 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7005 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,2,2,2,4,6,6,6]
7006 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
7007 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm6[0,1,2,3],ymm5[4,5,6,7]
7008 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7009 ; AVX2-SLOW-NEXT: vbroadcastsd {{.*#+}} ymm5 = [4,2,4,2,4,2,4,2]
7010 ; AVX2-SLOW-NEXT: vpermps %ymm1, %ymm5, %ymm1
7011 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
7012 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7013 ; AVX2-SLOW-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7014 ; AVX2-SLOW-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm3[0,1],ymm2[0,1]
7015 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm0[0,1,2,3,4,5],ymm2[6,7]
7016 ; AVX2-SLOW-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7017 ; AVX2-SLOW-NEXT: vmovaps 576(%rdi), %ymm0
7018 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7019 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm4[4,5],ymm0[6,7]
7020 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7021 ; AVX2-SLOW-NEXT: vpermps %ymm0, %ymm8, %ymm0
7022 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm1 = ymm2[0,2,2,2,4,6,6,6]
7023 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
7024 ; AVX2-SLOW-NEXT: vmovaps 704(%rdi), %ymm1
7025 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7026 ; AVX2-SLOW-NEXT: vmovaps 736(%rdi), %ymm2
7027 ; AVX2-SLOW-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7028 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
7029 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7030 ; AVX2-SLOW-NEXT: vpermps %ymm1, %ymm5, %ymm1
7031 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
7032 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7033 ; AVX2-SLOW-NEXT: vmovaps 1056(%rdi), %ymm1
7034 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7035 ; AVX2-SLOW-NEXT: vmovaps 1024(%rdi), %ymm0
7036 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7037 ; AVX2-SLOW-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[0,1],ymm1[0,1]
7038 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm0[0,1,2,3,4,5],ymm1[6,7]
7039 ; AVX2-SLOW-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7040 ; AVX2-SLOW-NEXT: vmovaps 992(%rdi), %ymm0
7041 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7042 ; AVX2-SLOW-NEXT: vmovaps 960(%rdi), %ymm1
7043 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7044 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5],ymm1[6,7]
7045 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7046 ; AVX2-SLOW-NEXT: vpermps %ymm0, %ymm8, %ymm0
7047 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm1 = ymm2[0,2,2,2,4,6,6,6]
7048 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
7049 ; AVX2-SLOW-NEXT: vmovaps 1088(%rdi), %ymm1
7050 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7051 ; AVX2-SLOW-NEXT: vmovaps 1120(%rdi), %ymm2
7052 ; AVX2-SLOW-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7053 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
7054 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7055 ; AVX2-SLOW-NEXT: vpermps %ymm1, %ymm5, %ymm1
7056 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
7057 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7058 ; AVX2-SLOW-NEXT: vmovaps 1440(%rdi), %ymm1
7059 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7060 ; AVX2-SLOW-NEXT: vmovaps 1408(%rdi), %ymm0
7061 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7062 ; AVX2-SLOW-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[0,1],ymm1[0,1]
7063 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm0[0,1,2,3,4,5],ymm1[6,7]
7064 ; AVX2-SLOW-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7065 ; AVX2-SLOW-NEXT: vmovaps 1376(%rdi), %ymm0
7066 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7067 ; AVX2-SLOW-NEXT: vmovaps 1344(%rdi), %ymm1
7068 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7069 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5],ymm1[6,7]
7070 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7071 ; AVX2-SLOW-NEXT: vpermps %ymm0, %ymm8, %ymm0
7072 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm1 = ymm2[0,2,2,2,4,6,6,6]
7073 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
7074 ; AVX2-SLOW-NEXT: vmovaps 1472(%rdi), %ymm1
7075 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7076 ; AVX2-SLOW-NEXT: vmovaps 1504(%rdi), %ymm2
7077 ; AVX2-SLOW-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7078 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
7079 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7080 ; AVX2-SLOW-NEXT: vpermps %ymm1, %ymm5, %ymm1
7081 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
7082 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7083 ; AVX2-SLOW-NEXT: vmovaps 96(%rdi), %ymm1
7084 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7085 ; AVX2-SLOW-NEXT: vmovaps 64(%rdi), %ymm0
7086 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7087 ; AVX2-SLOW-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[0,1],ymm1[0,1]
7088 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm15 = ymm0[0,1,2,3,4,5],ymm1[6,7]
7089 ; AVX2-SLOW-NEXT: vmovaps (%rdi), %ymm0
7090 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7091 ; AVX2-SLOW-NEXT: vmovaps 32(%rdi), %ymm1
7092 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7093 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm12 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
7094 ; AVX2-SLOW-NEXT: vpermps %ymm12, %ymm8, %ymm0
7095 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm1 = ymm15[0,2,2,2,4,6,6,6]
7096 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
7097 ; AVX2-SLOW-NEXT: vmovaps 128(%rdi), %ymm1
7098 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7099 ; AVX2-SLOW-NEXT: vmovaps 160(%rdi), %ymm2
7100 ; AVX2-SLOW-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7101 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm11 = ymm2[0,1,2,3],ymm1[4,5,6,7]
7102 ; AVX2-SLOW-NEXT: vpermps %ymm11, %ymm5, %ymm1
7103 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
7104 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7105 ; AVX2-SLOW-NEXT: vmovaps 480(%rdi), %ymm1
7106 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7107 ; AVX2-SLOW-NEXT: vmovaps 448(%rdi), %ymm0
7108 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7109 ; AVX2-SLOW-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[0,1],ymm1[0,1]
7110 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm10 = ymm0[0,1,2,3,4,5],ymm1[6,7]
7111 ; AVX2-SLOW-NEXT: vmovaps 416(%rdi), %ymm0
7112 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7113 ; AVX2-SLOW-NEXT: vmovaps 384(%rdi), %ymm1
7114 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7115 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm9 = ymm1[0,1,2,3],ymm0[4,5],ymm1[6,7]
7116 ; AVX2-SLOW-NEXT: vpermps %ymm9, %ymm8, %ymm0
7117 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm1 = ymm10[0,2,2,2,4,6,6,6]
7118 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
7119 ; AVX2-SLOW-NEXT: vmovaps 512(%rdi), %ymm1
7120 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7121 ; AVX2-SLOW-NEXT: vmovaps 544(%rdi), %ymm2
7122 ; AVX2-SLOW-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7123 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm7 = ymm2[0,1,2,3],ymm1[4,5,6,7]
7124 ; AVX2-SLOW-NEXT: vpermps %ymm7, %ymm5, %ymm1
7125 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
7126 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7127 ; AVX2-SLOW-NEXT: vmovaps 864(%rdi), %ymm1
7128 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7129 ; AVX2-SLOW-NEXT: vmovaps 832(%rdi), %ymm0
7130 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7131 ; AVX2-SLOW-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[0,1],ymm1[0,1]
7132 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm6 = ymm0[0,1,2,3,4,5],ymm1[6,7]
7133 ; AVX2-SLOW-NEXT: vmovaps 800(%rdi), %ymm0
7134 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7135 ; AVX2-SLOW-NEXT: vmovaps 768(%rdi), %ymm1
7136 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7137 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm4 = ymm1[0,1,2,3],ymm0[4,5],ymm1[6,7]
7138 ; AVX2-SLOW-NEXT: vpermps %ymm4, %ymm8, %ymm0
7139 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm1 = ymm6[0,2,2,2,4,6,6,6]
7140 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
7141 ; AVX2-SLOW-NEXT: vmovaps 896(%rdi), %ymm1
7142 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7143 ; AVX2-SLOW-NEXT: vmovaps 928(%rdi), %ymm2
7144 ; AVX2-SLOW-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7145 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm3 = ymm2[0,1,2,3],ymm1[4,5,6,7]
7146 ; AVX2-SLOW-NEXT: vpermps %ymm3, %ymm5, %ymm1
7147 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
7148 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7149 ; AVX2-SLOW-NEXT: vmovaps 1184(%rdi), %ymm0
7150 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7151 ; AVX2-SLOW-NEXT: vmovaps 1152(%rdi), %ymm1
7152 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7153 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm1[0,1,2,3],ymm0[4,5],ymm1[6,7]
7154 ; AVX2-SLOW-NEXT: vpermps %ymm2, %ymm8, %ymm0
7155 ; AVX2-SLOW-NEXT: vmovaps 1248(%rdi), %ymm8
7156 ; AVX2-SLOW-NEXT: vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7157 ; AVX2-SLOW-NEXT: vmovaps 1216(%rdi), %ymm1
7158 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7159 ; AVX2-SLOW-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[0,1],ymm8[0,1]
7160 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm8 = ymm1[0,1,2,3,4,5],ymm8[6,7]
7161 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm1 = ymm8[0,2,2,2,4,6,6,6]
7162 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
7163 ; AVX2-SLOW-NEXT: vmovaps 1280(%rdi), %ymm1
7164 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7165 ; AVX2-SLOW-NEXT: vmovaps 1312(%rdi), %ymm13
7166 ; AVX2-SLOW-NEXT: vmovups %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7167 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm13[0,1,2,3],ymm1[4,5,6,7]
7168 ; AVX2-SLOW-NEXT: vpermps %ymm1, %ymm5, %ymm5
7169 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm5[6,7]
7170 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7171 ; AVX2-SLOW-NEXT: vmovaps {{.*#+}} xmm5 = <1,7,5,u>
7172 ; AVX2-SLOW-NEXT: vpermps %ymm14, %ymm5, %ymm0
7173 ; AVX2-SLOW-NEXT: vpermilps $237, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Folded Reload
7174 ; AVX2-SLOW-NEXT: # ymm13 = mem[1,3,2,3,5,7,6,7]
7175 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm13 = ymm0[0,1,2],ymm13[3,4,5,6,7]
7176 ; AVX2-SLOW-NEXT: vbroadcastsd {{.*#+}} ymm0 = [5,3,5,3,5,3,5,3]
7177 ; AVX2-SLOW-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm14 # 32-byte Folded Reload
7178 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm13 = ymm13[0,1,2,3,4,5],ymm14[6,7]
7179 ; AVX2-SLOW-NEXT: vmovups %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7180 ; AVX2-SLOW-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm13 # 32-byte Folded Reload
7181 ; AVX2-SLOW-NEXT: vpermilps $237, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Folded Reload
7182 ; AVX2-SLOW-NEXT: # ymm14 = mem[1,3,2,3,5,7,6,7]
7183 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm13 = ymm13[0,1,2],ymm14[3,4,5,6,7]
7184 ; AVX2-SLOW-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm14 # 32-byte Folded Reload
7185 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm13 = ymm13[0,1,2,3,4,5],ymm14[6,7]
7186 ; AVX2-SLOW-NEXT: vmovups %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7187 ; AVX2-SLOW-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm13 # 32-byte Folded Reload
7188 ; AVX2-SLOW-NEXT: vpermilps $237, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Folded Reload
7189 ; AVX2-SLOW-NEXT: # ymm14 = mem[1,3,2,3,5,7,6,7]
7190 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm13 = ymm13[0,1,2],ymm14[3,4,5,6,7]
7191 ; AVX2-SLOW-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm14 # 32-byte Folded Reload
7192 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm13 = ymm13[0,1,2,3,4,5],ymm14[6,7]
7193 ; AVX2-SLOW-NEXT: vmovups %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7194 ; AVX2-SLOW-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm13 # 32-byte Folded Reload
7195 ; AVX2-SLOW-NEXT: vpermilps $237, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Folded Reload
7196 ; AVX2-SLOW-NEXT: # ymm14 = mem[1,3,2,3,5,7,6,7]
7197 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm13 = ymm13[0,1,2],ymm14[3,4,5,6,7]
7198 ; AVX2-SLOW-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm14 # 32-byte Folded Reload
7199 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm13 = ymm13[0,1,2,3,4,5],ymm14[6,7]
7200 ; AVX2-SLOW-NEXT: vmovups %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7201 ; AVX2-SLOW-NEXT: vpermps %ymm12, %ymm5, %ymm12
7202 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm13 = ymm15[1,3,2,3,5,7,6,7]
7203 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm12 = ymm12[0,1,2],ymm13[3,4,5,6,7]
7204 ; AVX2-SLOW-NEXT: vpermps %ymm11, %ymm0, %ymm11
7205 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm11 = ymm12[0,1,2,3,4,5],ymm11[6,7]
7206 ; AVX2-SLOW-NEXT: vmovups %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7207 ; AVX2-SLOW-NEXT: vpermps %ymm9, %ymm5, %ymm9
7208 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm10 = ymm10[1,3,2,3,5,7,6,7]
7209 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm9 = ymm9[0,1,2],ymm10[3,4,5,6,7]
7210 ; AVX2-SLOW-NEXT: vpermps %ymm7, %ymm0, %ymm7
7211 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm7 = ymm9[0,1,2,3,4,5],ymm7[6,7]
7212 ; AVX2-SLOW-NEXT: vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7213 ; AVX2-SLOW-NEXT: vpermps %ymm4, %ymm5, %ymm4
7214 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm6 = ymm6[1,3,2,3,5,7,6,7]
7215 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2],ymm6[3,4,5,6,7]
7216 ; AVX2-SLOW-NEXT: vpermps %ymm3, %ymm0, %ymm3
7217 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0,1,2,3,4,5],ymm3[6,7]
7218 ; AVX2-SLOW-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7219 ; AVX2-SLOW-NEXT: vpermps %ymm2, %ymm5, %ymm2
7220 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm3 = ymm8[1,3,2,3,5,7,6,7]
7221 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2],ymm3[3,4,5,6,7]
7222 ; AVX2-SLOW-NEXT: vpermps %ymm1, %ymm0, %ymm0
7223 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3,4,5],ymm0[6,7]
7224 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7225 ; AVX2-SLOW-NEXT: vmovaps 192(%rdi), %xmm4
7226 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm4[2,3,2,3]
7227 ; AVX2-SLOW-NEXT: vpermilps $224, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
7228 ; AVX2-SLOW-NEXT: # ymm1 = mem[0,0,2,3,4,4,6,7]
7229 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
7230 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
7231 ; AVX2-SLOW-NEXT: vblendps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
7232 ; AVX2-SLOW-NEXT: # ymm1 = mem[0,1],ymm1[2,3],mem[4,5],ymm1[6,7]
7233 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm1 = ymm1[2,0,2,3,6,4,6,7]
7234 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,2,0,3]
7235 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
7236 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
7237 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
7238 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm1 = ymm3[0,0,0,0,4,4,4,4]
7239 ; AVX2-SLOW-NEXT: vpermilps $224, (%rsp), %ymm2 # 32-byte Folded Reload
7240 ; AVX2-SLOW-NEXT: # ymm2 = mem[0,0,2,3,4,4,6,7]
7241 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3],ymm2[4,5,6],ymm1[7]
7242 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
7243 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
7244 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7245 ; AVX2-SLOW-NEXT: vmovaps 576(%rdi), %xmm0
7246 ; AVX2-SLOW-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7247 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,3,2,3]
7248 ; AVX2-SLOW-NEXT: vpermilps $224, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
7249 ; AVX2-SLOW-NEXT: # ymm1 = mem[0,0,2,3,4,4,6,7]
7250 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
7251 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
7252 ; AVX2-SLOW-NEXT: vblendps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
7253 ; AVX2-SLOW-NEXT: # ymm1 = mem[0,1],ymm1[2,3],mem[4,5],ymm1[6,7]
7254 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm1 = ymm1[2,0,2,3,6,4,6,7]
7255 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,2,0,3]
7256 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
7257 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
7258 ; AVX2-SLOW-NEXT: vpermilps $0, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
7259 ; AVX2-SLOW-NEXT: # ymm1 = mem[0,0,0,0,4,4,4,4]
7260 ; AVX2-SLOW-NEXT: vpermilps $224, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
7261 ; AVX2-SLOW-NEXT: # ymm2 = mem[0,0,2,3,4,4,6,7]
7262 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3],ymm2[4,5,6],ymm1[7]
7263 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
7264 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
7265 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7266 ; AVX2-SLOW-NEXT: vmovaps 960(%rdi), %xmm2
7267 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm2[2,3,2,3]
7268 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
7269 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,0,2,3,4,4,6,7]
7270 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
7271 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
7272 ; AVX2-SLOW-NEXT: vblendps $204, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
7273 ; AVX2-SLOW-NEXT: # ymm1 = ymm1[0,1],mem[2,3],ymm1[4,5],mem[6,7]
7274 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm1 = ymm1[2,0,2,3,6,4,6,7]
7275 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,2,0,3]
7276 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
7277 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
7278 ; AVX2-SLOW-NEXT: vpermilps $0, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
7279 ; AVX2-SLOW-NEXT: # ymm1 = mem[0,0,0,0,4,4,4,4]
7280 ; AVX2-SLOW-NEXT: vpermilps $224, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload
7281 ; AVX2-SLOW-NEXT: # ymm5 = mem[0,0,2,3,4,4,6,7]
7282 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm5[0,1,2],ymm1[3],ymm5[4,5,6],ymm1[7]
7283 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
7284 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
7285 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7286 ; AVX2-SLOW-NEXT: vmovaps 1344(%rdi), %xmm1
7287 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm1[2,3,2,3]
7288 ; AVX2-SLOW-NEXT: vpermilps $224, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload
7289 ; AVX2-SLOW-NEXT: # ymm5 = mem[0,0,2,3,4,4,6,7]
7290 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm5[1,2,3],ymm0[4],ymm5[5,6,7]
7291 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
7292 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
7293 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1],ymm13[2,3],ymm5[4,5],ymm13[6,7]
7294 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm5 = ymm5[2,0,2,3,6,4,6,7]
7295 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,2,0,3]
7296 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
7297 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm5[3,4,5,6,7]
7298 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
7299 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm5 = ymm14[0,0,0,0,4,4,4,4]
7300 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
7301 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm6 = ymm15[0,0,2,3,4,4,6,7]
7302 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm5 = ymm6[0,1,2],ymm5[3],ymm6[4,5,6],ymm5[7]
7303 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,1,0,3]
7304 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm5[5,6,7]
7305 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7306 ; AVX2-SLOW-NEXT: vmovaps (%rdi), %xmm0
7307 ; AVX2-SLOW-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7308 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,3,2,3]
7309 ; AVX2-SLOW-NEXT: vpermilps $224, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload
7310 ; AVX2-SLOW-NEXT: # ymm5 = mem[0,0,2,3,4,4,6,7]
7311 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm5[1,2,3],ymm0[4],ymm5[5,6,7]
7312 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
7313 ; AVX2-SLOW-NEXT: vblendps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm5 # 32-byte Folded Reload
7314 ; AVX2-SLOW-NEXT: # ymm5 = mem[0,1],ymm5[2,3],mem[4,5],ymm5[6,7]
7315 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm5 = ymm5[2,0,2,3,6,4,6,7]
7316 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,2,0,3]
7317 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
7318 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm5[3,4,5,6,7]
7319 ; AVX2-SLOW-NEXT: vpermilps $0, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload
7320 ; AVX2-SLOW-NEXT: # ymm5 = mem[0,0,0,0,4,4,4,4]
7321 ; AVX2-SLOW-NEXT: vpermilps $224, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Folded Reload
7322 ; AVX2-SLOW-NEXT: # ymm6 = mem[0,0,2,3,4,4,6,7]
7323 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm5 = ymm6[0,1,2],ymm5[3],ymm6[4,5,6],ymm5[7]
7324 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,1,0,3]
7325 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm5[5,6,7]
7326 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7327 ; AVX2-SLOW-NEXT: vmovaps 384(%rdi), %xmm0
7328 ; AVX2-SLOW-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7329 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,3,2,3]
7330 ; AVX2-SLOW-NEXT: vpermilps $224, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Folded Reload
7331 ; AVX2-SLOW-NEXT: # ymm6 = mem[0,0,2,3,4,4,6,7]
7332 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm6[1,2,3],ymm0[4],ymm6[5,6,7]
7333 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
7334 ; AVX2-SLOW-NEXT: vblendps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm6 # 32-byte Folded Reload
7335 ; AVX2-SLOW-NEXT: # ymm6 = mem[0,1],ymm5[2,3],mem[4,5],ymm5[6,7]
7336 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm6 = ymm6[2,0,2,3,6,4,6,7]
7337 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[0,2,0,3]
7338 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
7339 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm6[3,4,5,6,7]
7340 ; AVX2-SLOW-NEXT: vpermilps $0, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Folded Reload
7341 ; AVX2-SLOW-NEXT: # ymm6 = mem[0,0,0,0,4,4,4,4]
7342 ; AVX2-SLOW-NEXT: vpermilps $224, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Folded Reload
7343 ; AVX2-SLOW-NEXT: # ymm7 = mem[0,0,2,3,4,4,6,7]
7344 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm6 = ymm7[0,1,2],ymm6[3],ymm7[4,5,6],ymm6[7]
7345 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[0,1,0,3]
7346 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm6[5,6,7]
7347 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7348 ; AVX2-SLOW-NEXT: vmovaps 768(%rdi), %xmm0
7349 ; AVX2-SLOW-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7350 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm6 = xmm0[2,3,2,3]
7351 ; AVX2-SLOW-NEXT: vpermilps $224, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Folded Reload
7352 ; AVX2-SLOW-NEXT: # ymm7 = mem[0,0,2,3,4,4,6,7]
7353 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0],ymm7[1,2,3],ymm6[4],ymm7[5,6,7]
7354 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
7355 ; AVX2-SLOW-NEXT: vblendps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm7 # 32-byte Folded Reload
7356 ; AVX2-SLOW-NEXT: # ymm7 = mem[0,1],ymm0[2,3],mem[4,5],ymm0[6,7]
7357 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm7 = ymm7[2,0,2,3,6,4,6,7]
7358 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm7 = ymm7[0,2,0,3]
7359 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[0,3,2,3]
7360 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2],ymm7[3,4,5,6,7]
7361 ; AVX2-SLOW-NEXT: vpermilps $0, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Folded Reload
7362 ; AVX2-SLOW-NEXT: # ymm7 = mem[0,0,0,0,4,4,4,4]
7363 ; AVX2-SLOW-NEXT: vpermilps $224, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Folded Reload
7364 ; AVX2-SLOW-NEXT: # ymm8 = mem[0,0,2,3,4,4,6,7]
7365 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm7 = ymm8[0,1,2],ymm7[3],ymm8[4,5,6],ymm7[7]
7366 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm7 = ymm7[0,1,0,3]
7367 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm6[0,1,2,3,4],ymm7[5,6,7]
7368 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7369 ; AVX2-SLOW-NEXT: vmovaps 1152(%rdi), %xmm12
7370 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm7 = xmm12[2,3,2,3]
7371 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
7372 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm8 = ymm9[0,0,2,3,4,4,6,7]
7373 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0],ymm8[1,2,3],ymm7[4],ymm8[5,6,7]
7374 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
7375 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
7376 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm8 = ymm11[0,1],ymm0[2,3],ymm11[4,5],ymm0[6,7]
7377 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm8 = ymm8[2,0,2,3,6,4,6,7]
7378 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm8 = ymm8[0,2,0,3]
7379 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm7 = ymm7[0,3,2,3]
7380 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2],ymm8[3,4,5,6,7]
7381 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
7382 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm8 = ymm6[0,0,0,0,4,4,4,4]
7383 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
7384 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm10 = ymm5[0,0,2,3,4,4,6,7]
7385 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm8 = ymm10[0,1,2],ymm8[3],ymm10[4,5,6],ymm8[7]
7386 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm8 = ymm8[0,1,0,3]
7387 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3,4],ymm8[5,6,7]
7388 ; AVX2-SLOW-NEXT: vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7389 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm4 = xmm4[3,3,3,3]
7390 ; AVX2-SLOW-NEXT: vpermilps $244, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Folded Reload
7391 ; AVX2-SLOW-NEXT: # ymm7 = mem[0,1,3,3,4,5,7,7]
7392 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0],ymm7[1,2,3],ymm4[4],ymm7[5,6,7]
7393 ; AVX2-SLOW-NEXT: vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Folded Reload
7394 ; AVX2-SLOW-NEXT: # ymm7 = mem[3,3,3,3,7,7,7,7]
7395 ; AVX2-SLOW-NEXT: vblendps $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm7 # 32-byte Folded Reload
7396 ; AVX2-SLOW-NEXT: # ymm7 = ymm7[0],mem[1],ymm7[2,3,4],mem[5],ymm7[6,7]
7397 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm7 = ymm7[0,2,0,3]
7398 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,3,2,3]
7399 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2],ymm7[3,4,5,6,7]
7400 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm7 = ymm3[0,1,0,1,4,5,4,5]
7401 ; AVX2-SLOW-NEXT: vpermilps $244, (%rsp), %ymm8 # 32-byte Folded Reload
7402 ; AVX2-SLOW-NEXT: # ymm8 = mem[0,1,3,3,4,5,7,7]
7403 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm7 = ymm8[0,1,2],ymm7[3],ymm8[4,5,6],ymm7[7]
7404 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm7 = ymm7[0,1,0,3]
7405 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0,1,2,3,4],ymm7[5,6,7]
7406 ; AVX2-SLOW-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7407 ; AVX2-SLOW-NEXT: vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
7408 ; AVX2-SLOW-NEXT: # xmm3 = mem[3,3,3,3]
7409 ; AVX2-SLOW-NEXT: vpermilps $244, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Folded Reload
7410 ; AVX2-SLOW-NEXT: # ymm4 = mem[0,1,3,3,4,5,7,7]
7411 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0],ymm4[1,2,3],ymm3[4],ymm4[5,6,7]
7412 ; AVX2-SLOW-NEXT: vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Folded Reload
7413 ; AVX2-SLOW-NEXT: # ymm4 = mem[3,3,3,3,7,7,7,7]
7414 ; AVX2-SLOW-NEXT: vblendps $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm4 # 32-byte Folded Reload
7415 ; AVX2-SLOW-NEXT: # ymm4 = ymm4[0],mem[1],ymm4[2,3,4],mem[5],ymm4[6,7]
7416 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,2,0,3]
7417 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[0,3,2,3]
7418 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2],ymm4[3,4,5,6,7]
7419 ; AVX2-SLOW-NEXT: vpermilps $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Folded Reload
7420 ; AVX2-SLOW-NEXT: # ymm4 = mem[0,1,0,1,4,5,4,5]
7421 ; AVX2-SLOW-NEXT: vpermilps $244, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Folded Reload
7422 ; AVX2-SLOW-NEXT: # ymm7 = mem[0,1,3,3,4,5,7,7]
7423 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm4 = ymm7[0,1,2],ymm4[3],ymm7[4,5,6],ymm4[7]
7424 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,1,0,3]
7425 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3,4],ymm4[5,6,7]
7426 ; AVX2-SLOW-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7427 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm2 = xmm2[3,3,3,3]
7428 ; AVX2-SLOW-NEXT: vpermilps $244, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Folded Reload
7429 ; AVX2-SLOW-NEXT: # ymm3 = mem[0,1,3,3,4,5,7,7]
7430 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0],ymm3[1,2,3],ymm2[4],ymm3[5,6,7]
7431 ; AVX2-SLOW-NEXT: vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Folded Reload
7432 ; AVX2-SLOW-NEXT: # ymm3 = mem[3,3,3,3,7,7,7,7]
7433 ; AVX2-SLOW-NEXT: vblendps $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
7434 ; AVX2-SLOW-NEXT: # ymm3 = ymm3[0],mem[1],ymm3[2,3,4],mem[5],ymm3[6,7]
7435 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[0,2,0,3]
7436 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,3,2,3]
7437 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2],ymm3[3,4,5,6,7]
7438 ; AVX2-SLOW-NEXT: vpermilps $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Folded Reload
7439 ; AVX2-SLOW-NEXT: # ymm3 = mem[0,1,0,1,4,5,4,5]
7440 ; AVX2-SLOW-NEXT: vpermilps $244, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Folded Reload
7441 ; AVX2-SLOW-NEXT: # ymm4 = mem[0,1,3,3,4,5,7,7]
7442 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0,1,2],ymm3[3],ymm4[4,5,6],ymm3[7]
7443 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[0,1,0,3]
7444 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm3[5,6,7]
7445 ; AVX2-SLOW-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7446 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm1[3,3,3,3]
7447 ; AVX2-SLOW-NEXT: vpermilps $244, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
7448 ; AVX2-SLOW-NEXT: # ymm2 = mem[0,1,3,3,4,5,7,7]
7449 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2,3],ymm1[4],ymm2[5,6,7]
7450 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm2 = ymm13[3,3,3,3,7,7,7,7]
7451 ; AVX2-SLOW-NEXT: vblendps $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
7452 ; AVX2-SLOW-NEXT: # ymm2 = ymm2[0],mem[1],ymm2[2,3,4],mem[5],ymm2[6,7]
7453 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,2,0,3]
7454 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,3,2,3]
7455 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4,5,6,7]
7456 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm2 = ymm14[0,1,0,1,4,5,4,5]
7457 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm3 = ymm15[0,1,3,3,4,5,7,7]
7458 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1,2],ymm2[3],ymm3[4,5,6],ymm2[7]
7459 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,1,0,3]
7460 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5,6,7]
7461 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7462 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm12[3,3,3,3]
7463 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm2 = ymm9[0,1,3,3,4,5,7,7]
7464 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2,3],ymm1[4],ymm2[5,6,7]
7465 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm2 = ymm0[3,3,3,3,7,7,7,7]
7466 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0],ymm11[1],ymm2[2,3,4],ymm11[5],ymm2[6,7]
7467 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,2,0,3]
7468 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,3,2,3]
7469 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4,5,6,7]
7470 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm2 = ymm6[0,1,0,1,4,5,4,5]
7471 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm3 = ymm5[0,1,3,3,4,5,7,7]
7472 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1,2],ymm2[3],ymm3[4,5,6],ymm2[7]
7473 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,1,0,3]
7474 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm2[5,6,7]
7475 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7476 ; AVX2-SLOW-NEXT: vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
7477 ; AVX2-SLOW-NEXT: # xmm0 = mem[3,3,3,3]
7478 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
7479 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm1 = ymm10[0,1,3,3,4,5,7,7]
7480 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
7481 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
7482 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm1 = ymm13[3,3,3,3,7,7,7,7]
7483 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
7484 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm11[1],ymm1[2,3,4],ymm11[5],ymm1[6,7]
7485 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,2,0,3]
7486 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
7487 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
7488 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
7489 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm1 = ymm14[0,1,0,1,4,5,4,5]
7490 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
7491 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm2 = ymm12[0,1,3,3,4,5,7,7]
7492 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3],ymm2[4,5,6],ymm1[7]
7493 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
7494 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
7495 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7496 ; AVX2-SLOW-NEXT: vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
7497 ; AVX2-SLOW-NEXT: # xmm0 = mem[3,3,3,3]
7498 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
7499 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm1 = ymm4[0,1,3,3,4,5,7,7]
7500 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
7501 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
7502 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm1 = ymm7[3,3,3,3,7,7,7,7]
7503 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
7504 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm6[1],ymm1[2,3,4],ymm6[5],ymm1[6,7]
7505 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,2,0,3]
7506 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
7507 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
7508 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
7509 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm1 = ymm9[0,1,0,1,4,5,4,5]
7510 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
7511 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm2 = ymm8[0,1,3,3,4,5,7,7]
7512 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3],ymm2[4,5,6],ymm1[7]
7513 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
7514 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
7515 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7516 ; AVX2-SLOW-NEXT: vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
7517 ; AVX2-SLOW-NEXT: # xmm0 = mem[3,3,3,3]
7518 ; AVX2-SLOW-NEXT: vpermilps $244, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
7519 ; AVX2-SLOW-NEXT: # ymm1 = mem[0,1,3,3,4,5,7,7]
7520 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
7521 ; AVX2-SLOW-NEXT: vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
7522 ; AVX2-SLOW-NEXT: # ymm1 = mem[3,3,3,3,7,7,7,7]
7523 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
7524 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm15[1],ymm1[2,3,4],ymm15[5],ymm1[6,7]
7525 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,2,0,3]
7526 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
7527 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
7528 ; AVX2-SLOW-NEXT: vpermilps $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
7529 ; AVX2-SLOW-NEXT: # ymm1 = mem[0,1,0,1,4,5,4,5]
7530 ; AVX2-SLOW-NEXT: vpermilps $244, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
7531 ; AVX2-SLOW-NEXT: # ymm2 = mem[0,1,3,3,4,5,7,7]
7532 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3],ymm2[4,5,6],ymm1[7]
7533 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
7534 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
7535 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7536 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
7537 ; AVX2-SLOW-NEXT: vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm2 # 32-byte Folded Reload
7538 ; AVX2-SLOW-NEXT: # ymm2 = ymm0[0,1,2,3],mem[4,5,6,7]
7539 ; AVX2-SLOW-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7540 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
7541 ; AVX2-SLOW-NEXT: vblendps $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 32-byte Folded Reload
7542 ; AVX2-SLOW-NEXT: # ymm1 = mem[0,1,2,3],ymm0[4,5,6,7]
7543 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7544 ; AVX2-SLOW-NEXT: vmovaps 272(%rdi), %xmm0
7545 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7546 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5,6,7]
7547 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,1,0,2,4,5,4,6]
7548 ; AVX2-SLOW-NEXT: vmovddup {{.*#+}} xmm1 = [4,2,4,2]
7549 ; AVX2-SLOW-NEXT: # xmm1 = mem[0,0]
7550 ; AVX2-SLOW-NEXT: vpermps %ymm2, %ymm1, %ymm2
7551 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm0[2,3,4,5,6,7]
7552 ; AVX2-SLOW-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
7553 ; AVX2-SLOW-NEXT: vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
7554 ; AVX2-SLOW-NEXT: # ymm0 = mem[0,1],ymm0[2,3],mem[4,5,6,7]
7555 ; AVX2-SLOW-NEXT: vmovups %ymm0, (%rsp) # 32-byte Spill
7556 ; AVX2-SLOW-NEXT: vbroadcastf128 {{.*#+}} ymm5 = [0,2,0,6,0,2,0,6]
7557 ; AVX2-SLOW-NEXT: # ymm5 = mem[0,1,0,1]
7558 ; AVX2-SLOW-NEXT: vpermps %ymm0, %ymm5, %ymm3
7559 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm3[5,6,7]
7560 ; AVX2-SLOW-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7561 ; AVX2-SLOW-NEXT: vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm3 # 32-byte Folded Reload
7562 ; AVX2-SLOW-NEXT: # ymm3 = ymm4[0,1,2,3],mem[4,5,6,7]
7563 ; AVX2-SLOW-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7564 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm6[0,1,2,3],ymm7[4,5,6,7]
7565 ; AVX2-SLOW-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7566 ; AVX2-SLOW-NEXT: vmovaps 464(%rdi), %xmm0
7567 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7568 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm0[2,3],ymm2[4,5,6,7]
7569 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm2 = ymm2[0,1,0,2,4,5,4,6]
7570 ; AVX2-SLOW-NEXT: vpermps %ymm3, %ymm1, %ymm3
7571 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3,4,5,6,7]
7572 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm9[0,1],ymm8[2,3],ymm9[4,5,6,7]
7573 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7574 ; AVX2-SLOW-NEXT: vpermps %ymm0, %ymm5, %ymm3
7575 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm3[5,6,7]
7576 ; AVX2-SLOW-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7577 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
7578 ; AVX2-SLOW-NEXT: vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm0 # 32-byte Folded Reload
7579 ; AVX2-SLOW-NEXT: # ymm0 = ymm2[0,1,2,3],mem[4,5,6,7]
7580 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7581 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
7582 ; AVX2-SLOW-NEXT: vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm3 # 32-byte Folded Reload
7583 ; AVX2-SLOW-NEXT: # ymm3 = ymm2[0,1,2,3],mem[4,5,6,7]
7584 ; AVX2-SLOW-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7585 ; AVX2-SLOW-NEXT: vmovaps 656(%rdi), %xmm2
7586 ; AVX2-SLOW-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7587 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3],ymm3[4,5,6,7]
7588 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm2 = ymm2[0,1,0,2,4,5,4,6]
7589 ; AVX2-SLOW-NEXT: vpermps %ymm0, %ymm1, %ymm3
7590 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3,4,5,6,7]
7591 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
7592 ; AVX2-SLOW-NEXT: vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm0 # 32-byte Folded Reload
7593 ; AVX2-SLOW-NEXT: # ymm0 = mem[0,1],ymm3[2,3],mem[4,5,6,7]
7594 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7595 ; AVX2-SLOW-NEXT: vpermps %ymm0, %ymm5, %ymm3
7596 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm3[5,6,7]
7597 ; AVX2-SLOW-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7598 ; AVX2-SLOW-NEXT: vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm0 # 32-byte Folded Reload
7599 ; AVX2-SLOW-NEXT: # ymm0 = ymm10[0,1,2,3],mem[4,5,6,7]
7600 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7601 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm3 = ymm11[0,1,2,3],ymm13[4,5,6,7]
7602 ; AVX2-SLOW-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7603 ; AVX2-SLOW-NEXT: vmovaps 848(%rdi), %xmm2
7604 ; AVX2-SLOW-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7605 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3],ymm3[4,5,6,7]
7606 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm2 = ymm2[0,1,0,2,4,5,4,6]
7607 ; AVX2-SLOW-NEXT: vpermps %ymm0, %ymm1, %ymm3
7608 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3,4,5,6,7]
7609 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm14[0,1],ymm12[2,3],ymm14[4,5,6,7]
7610 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7611 ; AVX2-SLOW-NEXT: vpermps %ymm0, %ymm5, %ymm3
7612 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm3[5,6,7]
7613 ; AVX2-SLOW-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7614 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
7615 ; AVX2-SLOW-NEXT: vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm0 # 32-byte Folded Reload
7616 ; AVX2-SLOW-NEXT: # ymm0 = ymm2[0,1,2,3],mem[4,5,6,7]
7617 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7618 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
7619 ; AVX2-SLOW-NEXT: vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm3 # 32-byte Folded Reload
7620 ; AVX2-SLOW-NEXT: # ymm3 = ymm2[0,1,2,3],mem[4,5,6,7]
7621 ; AVX2-SLOW-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7622 ; AVX2-SLOW-NEXT: vmovaps 1040(%rdi), %xmm2
7623 ; AVX2-SLOW-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7624 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3],ymm3[4,5,6,7]
7625 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm2 = ymm2[0,1,0,2,4,5,4,6]
7626 ; AVX2-SLOW-NEXT: vpermps %ymm0, %ymm1, %ymm3
7627 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3,4,5,6,7]
7628 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
7629 ; AVX2-SLOW-NEXT: vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm13 # 32-byte Folded Reload
7630 ; AVX2-SLOW-NEXT: # ymm13 = mem[0,1],ymm3[2,3],mem[4,5,6,7]
7631 ; AVX2-SLOW-NEXT: vpermps %ymm13, %ymm5, %ymm3
7632 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm3[5,6,7]
7633 ; AVX2-SLOW-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7634 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
7635 ; AVX2-SLOW-NEXT: vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm0 # 32-byte Folded Reload
7636 ; AVX2-SLOW-NEXT: # ymm0 = ymm2[0,1,2,3],mem[4,5,6,7]
7637 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7638 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
7639 ; AVX2-SLOW-NEXT: vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm12 # 32-byte Folded Reload
7640 ; AVX2-SLOW-NEXT: # ymm12 = ymm2[0,1,2,3],mem[4,5,6,7]
7641 ; AVX2-SLOW-NEXT: vmovaps 1232(%rdi), %xmm11
7642 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm12[0,1],ymm11[2,3],ymm12[4,5,6,7]
7643 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm2 = ymm2[0,1,0,2,4,5,4,6]
7644 ; AVX2-SLOW-NEXT: vpermps %ymm0, %ymm1, %ymm3
7645 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3,4,5,6,7]
7646 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
7647 ; AVX2-SLOW-NEXT: vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm9 # 32-byte Folded Reload
7648 ; AVX2-SLOW-NEXT: # ymm9 = mem[0,1],ymm3[2,3],mem[4,5,6,7]
7649 ; AVX2-SLOW-NEXT: vpermps %ymm9, %ymm5, %ymm3
7650 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm3[5,6,7]
7651 ; AVX2-SLOW-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7652 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
7653 ; AVX2-SLOW-NEXT: vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm0 # 32-byte Folded Reload
7654 ; AVX2-SLOW-NEXT: # ymm0 = ymm2[0,1,2,3],mem[4,5,6,7]
7655 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7656 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
7657 ; AVX2-SLOW-NEXT: vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm7 # 32-byte Folded Reload
7658 ; AVX2-SLOW-NEXT: # ymm7 = ymm2[0,1,2,3],mem[4,5,6,7]
7659 ; AVX2-SLOW-NEXT: vmovaps 1424(%rdi), %xmm8
7660 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm7[0,1],ymm8[2,3],ymm7[4,5,6,7]
7661 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm2 = ymm2[0,1,0,2,4,5,4,6]
7662 ; AVX2-SLOW-NEXT: vpermps %ymm0, %ymm1, %ymm3
7663 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3,4,5,6,7]
7664 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
7665 ; AVX2-SLOW-NEXT: vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm6 # 32-byte Folded Reload
7666 ; AVX2-SLOW-NEXT: # ymm6 = mem[0,1],ymm3[2,3],mem[4,5,6,7]
7667 ; AVX2-SLOW-NEXT: vpermps %ymm6, %ymm5, %ymm3
7668 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm3[5,6,7]
7669 ; AVX2-SLOW-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7670 ; AVX2-SLOW-NEXT: vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm10 # 32-byte Folded Reload
7671 ; AVX2-SLOW-NEXT: # ymm10 = ymm15[0,1,2,3],mem[4,5,6,7]
7672 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
7673 ; AVX2-SLOW-NEXT: vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm4 # 32-byte Folded Reload
7674 ; AVX2-SLOW-NEXT: # ymm4 = ymm2[0,1,2,3],mem[4,5,6,7]
7675 ; AVX2-SLOW-NEXT: vmovaps 80(%rdi), %xmm3
7676 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm10[0,1],ymm3[2,3],ymm10[4,5,6,7]
7677 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm2 = ymm2[0,1,0,2,4,5,4,6]
7678 ; AVX2-SLOW-NEXT: vpermps %ymm4, %ymm1, %ymm1
7679 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3,4,5,6,7]
7680 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
7681 ; AVX2-SLOW-NEXT: vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
7682 ; AVX2-SLOW-NEXT: # ymm2 = mem[0,1],ymm2[2,3],mem[4,5,6,7]
7683 ; AVX2-SLOW-NEXT: vpermps %ymm2, %ymm5, %ymm0
7684 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5,6,7]
7685 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7686 ; AVX2-SLOW-NEXT: vpermilps $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
7687 ; AVX2-SLOW-NEXT: # ymm0 = mem[1,1,1,1,5,5,5,5]
7688 ; AVX2-SLOW-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
7689 ; AVX2-SLOW-NEXT: # ymm0 = ymm0[0,1,2],mem[3],ymm0[4,5,6,7]
7690 ; AVX2-SLOW-NEXT: vmovddup {{.*#+}} xmm1 = [5,3,5,3]
7691 ; AVX2-SLOW-NEXT: # xmm1 = mem[0,0]
7692 ; AVX2-SLOW-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm14 # 32-byte Folded Reload
7693 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm14 = ymm14[0,1],ymm0[2,3,4,5,6,7]
7694 ; AVX2-SLOW-NEXT: vbroadcastf128 {{.*#+}} ymm0 = [0,3,1,7,0,3,1,7]
7695 ; AVX2-SLOW-NEXT: # ymm0 = mem[0,1,0,1]
7696 ; AVX2-SLOW-NEXT: vpermps (%rsp), %ymm0, %ymm15 # 32-byte Folded Reload
7697 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm5 = ymm14[0,1,2,3,4],ymm15[5,6,7]
7698 ; AVX2-SLOW-NEXT: vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7699 ; AVX2-SLOW-NEXT: vpermilps $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Folded Reload
7700 ; AVX2-SLOW-NEXT: # ymm14 = mem[1,1,1,1,5,5,5,5]
7701 ; AVX2-SLOW-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
7702 ; AVX2-SLOW-NEXT: # ymm14 = ymm14[0,1,2],mem[3],ymm14[4,5,6,7]
7703 ; AVX2-SLOW-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm15 # 32-byte Folded Reload
7704 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm14 = ymm15[0,1],ymm14[2,3,4,5,6,7]
7705 ; AVX2-SLOW-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm15 # 32-byte Folded Reload
7706 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm5 = ymm14[0,1,2,3,4],ymm15[5,6,7]
7707 ; AVX2-SLOW-NEXT: vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7708 ; AVX2-SLOW-NEXT: vpermilps $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Folded Reload
7709 ; AVX2-SLOW-NEXT: # ymm14 = mem[1,1,1,1,5,5,5,5]
7710 ; AVX2-SLOW-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
7711 ; AVX2-SLOW-NEXT: # ymm14 = ymm14[0,1,2],mem[3],ymm14[4,5,6,7]
7712 ; AVX2-SLOW-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm15 # 32-byte Folded Reload
7713 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm14 = ymm15[0,1],ymm14[2,3,4,5,6,7]
7714 ; AVX2-SLOW-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm15 # 32-byte Folded Reload
7715 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm5 = ymm14[0,1,2,3,4],ymm15[5,6,7]
7716 ; AVX2-SLOW-NEXT: vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7717 ; AVX2-SLOW-NEXT: vpermilps $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Folded Reload
7718 ; AVX2-SLOW-NEXT: # ymm14 = mem[1,1,1,1,5,5,5,5]
7719 ; AVX2-SLOW-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
7720 ; AVX2-SLOW-NEXT: # ymm14 = ymm14[0,1,2],mem[3],ymm14[4,5,6,7]
7721 ; AVX2-SLOW-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm15 # 32-byte Folded Reload
7722 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm14 = ymm15[0,1],ymm14[2,3,4,5,6,7]
7723 ; AVX2-SLOW-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm15 # 32-byte Folded Reload
7724 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm5 = ymm14[0,1,2,3,4],ymm15[5,6,7]
7725 ; AVX2-SLOW-NEXT: vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7726 ; AVX2-SLOW-NEXT: vpermilps $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Folded Reload
7727 ; AVX2-SLOW-NEXT: # ymm14 = mem[1,1,1,1,5,5,5,5]
7728 ; AVX2-SLOW-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
7729 ; AVX2-SLOW-NEXT: # ymm14 = ymm14[0,1,2],mem[3],ymm14[4,5,6,7]
7730 ; AVX2-SLOW-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm15 # 32-byte Folded Reload
7731 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm14 = ymm15[0,1],ymm14[2,3,4,5,6,7]
7732 ; AVX2-SLOW-NEXT: vpermps %ymm13, %ymm0, %ymm13
7733 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm13 = ymm14[0,1,2,3,4],ymm13[5,6,7]
7734 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm12 = ymm12[1,1,1,1,5,5,5,5]
7735 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm11 = ymm12[0,1,2],ymm11[3],ymm12[4,5,6,7]
7736 ; AVX2-SLOW-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm12 # 32-byte Folded Reload
7737 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm11 = ymm12[0,1],ymm11[2,3,4,5,6,7]
7738 ; AVX2-SLOW-NEXT: vpermps %ymm9, %ymm0, %ymm9
7739 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm9 = ymm11[0,1,2,3,4],ymm9[5,6,7]
7740 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm7 = ymm7[1,1,1,1,5,5,5,5]
7741 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2],ymm8[3],ymm7[4,5,6,7]
7742 ; AVX2-SLOW-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm8 # 32-byte Folded Reload
7743 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm7 = ymm8[0,1],ymm7[2,3,4,5,6,7]
7744 ; AVX2-SLOW-NEXT: vpermps %ymm6, %ymm0, %ymm6
7745 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm6 = ymm7[0,1,2,3,4],ymm6[5,6,7]
7746 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm5 = ymm10[1,1,1,1,5,5,5,5]
7747 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm3 = ymm5[0,1,2],ymm3[3],ymm5[4,5,6,7]
7748 ; AVX2-SLOW-NEXT: vpermps %ymm4, %ymm1, %ymm1
7749 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm3[2,3,4,5,6,7]
7750 ; AVX2-SLOW-NEXT: vpermps %ymm2, %ymm0, %ymm0
7751 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5,6,7]
7752 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
7753 ; AVX2-SLOW-NEXT: vmovaps %ymm1, 192(%rsi)
7754 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
7755 ; AVX2-SLOW-NEXT: vmovaps %ymm1, 128(%rsi)
7756 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
7757 ; AVX2-SLOW-NEXT: vmovaps %ymm1, 64(%rsi)
7758 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
7759 ; AVX2-SLOW-NEXT: vmovaps %ymm1, (%rsi)
7760 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
7761 ; AVX2-SLOW-NEXT: vmovaps %ymm1, 224(%rsi)
7762 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
7763 ; AVX2-SLOW-NEXT: vmovaps %ymm1, 160(%rsi)
7764 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
7765 ; AVX2-SLOW-NEXT: vmovaps %ymm1, 96(%rsi)
7766 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
7767 ; AVX2-SLOW-NEXT: vmovaps %ymm1, 32(%rsi)
7768 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
7769 ; AVX2-SLOW-NEXT: vmovaps %ymm1, 192(%rdx)
7770 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
7771 ; AVX2-SLOW-NEXT: vmovaps %ymm1, 128(%rdx)
7772 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
7773 ; AVX2-SLOW-NEXT: vmovaps %ymm1, 64(%rdx)
7774 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
7775 ; AVX2-SLOW-NEXT: vmovaps %ymm1, (%rdx)
7776 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
7777 ; AVX2-SLOW-NEXT: vmovaps %ymm1, 224(%rdx)
7778 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
7779 ; AVX2-SLOW-NEXT: vmovaps %ymm1, 160(%rdx)
7780 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
7781 ; AVX2-SLOW-NEXT: vmovaps %ymm1, 96(%rdx)
7782 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
7783 ; AVX2-SLOW-NEXT: vmovaps %ymm1, 32(%rdx)
7784 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
7785 ; AVX2-SLOW-NEXT: vmovaps %ymm1, 192(%rcx)
7786 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
7787 ; AVX2-SLOW-NEXT: vmovaps %ymm1, 128(%rcx)
7788 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
7789 ; AVX2-SLOW-NEXT: vmovaps %ymm1, 64(%rcx)
7790 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
7791 ; AVX2-SLOW-NEXT: vmovaps %ymm1, (%rcx)
7792 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
7793 ; AVX2-SLOW-NEXT: vmovaps %ymm1, 224(%rcx)
7794 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
7795 ; AVX2-SLOW-NEXT: vmovaps %ymm1, 160(%rcx)
7796 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
7797 ; AVX2-SLOW-NEXT: vmovaps %ymm1, 96(%rcx)
7798 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
7799 ; AVX2-SLOW-NEXT: vmovaps %ymm1, 32(%rcx)
7800 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
7801 ; AVX2-SLOW-NEXT: vmovaps %ymm1, (%r8)
7802 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
7803 ; AVX2-SLOW-NEXT: vmovaps %ymm1, 64(%r8)
7804 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
7805 ; AVX2-SLOW-NEXT: vmovaps %ymm1, 128(%r8)
7806 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
7807 ; AVX2-SLOW-NEXT: vmovaps %ymm1, 192(%r8)
7808 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
7809 ; AVX2-SLOW-NEXT: vmovaps %ymm1, 224(%r8)
7810 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
7811 ; AVX2-SLOW-NEXT: vmovaps %ymm1, 160(%r8)
7812 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
7813 ; AVX2-SLOW-NEXT: vmovaps %ymm1, 96(%r8)
7814 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
7815 ; AVX2-SLOW-NEXT: vmovaps %ymm1, 32(%r8)
7816 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
7817 ; AVX2-SLOW-NEXT: vmovaps %ymm1, 224(%r9)
7818 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
7819 ; AVX2-SLOW-NEXT: vmovaps %ymm1, 192(%r9)
7820 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
7821 ; AVX2-SLOW-NEXT: vmovaps %ymm1, 160(%r9)
7822 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
7823 ; AVX2-SLOW-NEXT: vmovaps %ymm1, 128(%r9)
7824 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
7825 ; AVX2-SLOW-NEXT: vmovaps %ymm1, 96(%r9)
7826 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
7827 ; AVX2-SLOW-NEXT: vmovaps %ymm1, 64(%r9)
7828 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
7829 ; AVX2-SLOW-NEXT: vmovaps %ymm1, 32(%r9)
7830 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
7831 ; AVX2-SLOW-NEXT: vmovaps %ymm1, (%r9)
7832 ; AVX2-SLOW-NEXT: movq {{[0-9]+}}(%rsp), %rax
7833 ; AVX2-SLOW-NEXT: vmovaps %ymm6, 224(%rax)
7834 ; AVX2-SLOW-NEXT: vmovaps %ymm9, 192(%rax)
7835 ; AVX2-SLOW-NEXT: vmovaps %ymm13, 160(%rax)
7836 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
7837 ; AVX2-SLOW-NEXT: vmovaps %ymm1, 128(%rax)
7838 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
7839 ; AVX2-SLOW-NEXT: vmovaps %ymm1, 96(%rax)
7840 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
7841 ; AVX2-SLOW-NEXT: vmovaps %ymm1, 64(%rax)
7842 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
7843 ; AVX2-SLOW-NEXT: vmovaps %ymm1, 32(%rax)
7844 ; AVX2-SLOW-NEXT: vmovaps %ymm0, (%rax)
7845 ; AVX2-SLOW-NEXT: addq $2504, %rsp # imm = 0x9C8
7846 ; AVX2-SLOW-NEXT: vzeroupper
7847 ; AVX2-SLOW-NEXT: retq
7849 ; AVX2-FAST-LABEL: load_i32_stride6_vf64:
7850 ; AVX2-FAST: # %bb.0:
7851 ; AVX2-FAST-NEXT: subq $2504, %rsp # imm = 0x9C8
7852 ; AVX2-FAST-NEXT: vmovaps 672(%rdi), %ymm2
7853 ; AVX2-FAST-NEXT: vmovaps 640(%rdi), %ymm3
7854 ; AVX2-FAST-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7855 ; AVX2-FAST-NEXT: vmovaps 608(%rdi), %ymm4
7856 ; AVX2-FAST-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7857 ; AVX2-FAST-NEXT: vmovaps 320(%rdi), %ymm5
7858 ; AVX2-FAST-NEXT: vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7859 ; AVX2-FAST-NEXT: vmovaps 352(%rdi), %ymm6
7860 ; AVX2-FAST-NEXT: vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7861 ; AVX2-FAST-NEXT: vmovaps 288(%rdi), %ymm7
7862 ; AVX2-FAST-NEXT: vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7863 ; AVX2-FAST-NEXT: vmovaps 256(%rdi), %ymm9
7864 ; AVX2-FAST-NEXT: vmovups %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7865 ; AVX2-FAST-NEXT: vmovaps 224(%rdi), %ymm0
7866 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7867 ; AVX2-FAST-NEXT: vmovaps 192(%rdi), %ymm1
7868 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7869 ; AVX2-FAST-NEXT: vmovaps {{.*#+}} xmm8 = <0,6,4,u>
7870 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm15 = ymm1[0,1,2,3],ymm0[4,5],ymm1[6,7]
7871 ; AVX2-FAST-NEXT: vpermps %ymm15, %ymm8, %ymm0
7872 ; AVX2-FAST-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm9[0,1],ymm7[0,1]
7873 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm7[6,7]
7874 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7875 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,2,2,2,4,6,6,6]
7876 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
7877 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm6[0,1,2,3],ymm5[4,5,6,7]
7878 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7879 ; AVX2-FAST-NEXT: vbroadcastsd {{.*#+}} ymm5 = [4,2,4,2,4,2,4,2]
7880 ; AVX2-FAST-NEXT: vpermps %ymm1, %ymm5, %ymm1
7881 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
7882 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7883 ; AVX2-FAST-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7884 ; AVX2-FAST-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm3[0,1],ymm2[0,1]
7885 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm0[0,1,2,3,4,5],ymm2[6,7]
7886 ; AVX2-FAST-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7887 ; AVX2-FAST-NEXT: vmovaps 576(%rdi), %ymm0
7888 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7889 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm4[4,5],ymm0[6,7]
7890 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7891 ; AVX2-FAST-NEXT: vpermps %ymm0, %ymm8, %ymm0
7892 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm1 = ymm2[0,2,2,2,4,6,6,6]
7893 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
7894 ; AVX2-FAST-NEXT: vmovaps 704(%rdi), %ymm1
7895 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7896 ; AVX2-FAST-NEXT: vmovaps 736(%rdi), %ymm2
7897 ; AVX2-FAST-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7898 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
7899 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7900 ; AVX2-FAST-NEXT: vpermps %ymm1, %ymm5, %ymm1
7901 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
7902 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7903 ; AVX2-FAST-NEXT: vmovaps 1056(%rdi), %ymm1
7904 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7905 ; AVX2-FAST-NEXT: vmovaps 1024(%rdi), %ymm0
7906 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7907 ; AVX2-FAST-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[0,1],ymm1[0,1]
7908 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm0[0,1,2,3,4,5],ymm1[6,7]
7909 ; AVX2-FAST-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7910 ; AVX2-FAST-NEXT: vmovaps 992(%rdi), %ymm0
7911 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7912 ; AVX2-FAST-NEXT: vmovaps 960(%rdi), %ymm1
7913 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7914 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5],ymm1[6,7]
7915 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7916 ; AVX2-FAST-NEXT: vpermps %ymm0, %ymm8, %ymm0
7917 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm1 = ymm2[0,2,2,2,4,6,6,6]
7918 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
7919 ; AVX2-FAST-NEXT: vmovaps 1088(%rdi), %ymm1
7920 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7921 ; AVX2-FAST-NEXT: vmovaps 1120(%rdi), %ymm2
7922 ; AVX2-FAST-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7923 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
7924 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7925 ; AVX2-FAST-NEXT: vpermps %ymm1, %ymm5, %ymm1
7926 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
7927 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7928 ; AVX2-FAST-NEXT: vmovaps 1440(%rdi), %ymm1
7929 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7930 ; AVX2-FAST-NEXT: vmovaps 1408(%rdi), %ymm0
7931 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7932 ; AVX2-FAST-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[0,1],ymm1[0,1]
7933 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm0[0,1,2,3,4,5],ymm1[6,7]
7934 ; AVX2-FAST-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7935 ; AVX2-FAST-NEXT: vmovaps 1376(%rdi), %ymm0
7936 ; AVX2-FAST-NEXT: vmovups %ymm0, (%rsp) # 32-byte Spill
7937 ; AVX2-FAST-NEXT: vmovaps 1344(%rdi), %ymm1
7938 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7939 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5],ymm1[6,7]
7940 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7941 ; AVX2-FAST-NEXT: vpermps %ymm0, %ymm8, %ymm0
7942 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm1 = ymm2[0,2,2,2,4,6,6,6]
7943 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
7944 ; AVX2-FAST-NEXT: vmovaps 1472(%rdi), %ymm1
7945 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7946 ; AVX2-FAST-NEXT: vmovaps 1504(%rdi), %ymm2
7947 ; AVX2-FAST-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7948 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
7949 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7950 ; AVX2-FAST-NEXT: vpermps %ymm1, %ymm5, %ymm1
7951 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
7952 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7953 ; AVX2-FAST-NEXT: vmovaps 96(%rdi), %ymm1
7954 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7955 ; AVX2-FAST-NEXT: vmovaps 64(%rdi), %ymm0
7956 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7957 ; AVX2-FAST-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[0,1],ymm1[0,1]
7958 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm14 = ymm0[0,1,2,3,4,5],ymm1[6,7]
7959 ; AVX2-FAST-NEXT: vmovaps (%rdi), %ymm0
7960 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7961 ; AVX2-FAST-NEXT: vmovaps 32(%rdi), %ymm1
7962 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7963 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm12 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
7964 ; AVX2-FAST-NEXT: vpermps %ymm12, %ymm8, %ymm0
7965 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm1 = ymm14[0,2,2,2,4,6,6,6]
7966 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
7967 ; AVX2-FAST-NEXT: vmovaps 128(%rdi), %ymm1
7968 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7969 ; AVX2-FAST-NEXT: vmovaps 160(%rdi), %ymm2
7970 ; AVX2-FAST-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7971 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm11 = ymm2[0,1,2,3],ymm1[4,5,6,7]
7972 ; AVX2-FAST-NEXT: vpermps %ymm11, %ymm5, %ymm1
7973 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
7974 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7975 ; AVX2-FAST-NEXT: vmovaps 480(%rdi), %ymm1
7976 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7977 ; AVX2-FAST-NEXT: vmovaps 448(%rdi), %ymm0
7978 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7979 ; AVX2-FAST-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[0,1],ymm1[0,1]
7980 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm10 = ymm0[0,1,2,3,4,5],ymm1[6,7]
7981 ; AVX2-FAST-NEXT: vmovaps 416(%rdi), %ymm0
7982 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7983 ; AVX2-FAST-NEXT: vmovaps 384(%rdi), %ymm1
7984 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7985 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm9 = ymm1[0,1,2,3],ymm0[4,5],ymm1[6,7]
7986 ; AVX2-FAST-NEXT: vpermps %ymm9, %ymm8, %ymm0
7987 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm1 = ymm10[0,2,2,2,4,6,6,6]
7988 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
7989 ; AVX2-FAST-NEXT: vmovaps 512(%rdi), %ymm1
7990 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7991 ; AVX2-FAST-NEXT: vmovaps 544(%rdi), %ymm2
7992 ; AVX2-FAST-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7993 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm7 = ymm2[0,1,2,3],ymm1[4,5,6,7]
7994 ; AVX2-FAST-NEXT: vpermps %ymm7, %ymm5, %ymm1
7995 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
7996 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7997 ; AVX2-FAST-NEXT: vmovaps 864(%rdi), %ymm1
7998 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7999 ; AVX2-FAST-NEXT: vmovaps 832(%rdi), %ymm0
8000 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8001 ; AVX2-FAST-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[0,1],ymm1[0,1]
8002 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm6 = ymm0[0,1,2,3,4,5],ymm1[6,7]
8003 ; AVX2-FAST-NEXT: vmovaps 800(%rdi), %ymm0
8004 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8005 ; AVX2-FAST-NEXT: vmovaps 768(%rdi), %ymm1
8006 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8007 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm4 = ymm1[0,1,2,3],ymm0[4,5],ymm1[6,7]
8008 ; AVX2-FAST-NEXT: vpermps %ymm4, %ymm8, %ymm0
8009 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm1 = ymm6[0,2,2,2,4,6,6,6]
8010 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
8011 ; AVX2-FAST-NEXT: vmovaps 896(%rdi), %ymm1
8012 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8013 ; AVX2-FAST-NEXT: vmovaps 928(%rdi), %ymm2
8014 ; AVX2-FAST-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8015 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm3 = ymm2[0,1,2,3],ymm1[4,5,6,7]
8016 ; AVX2-FAST-NEXT: vpermps %ymm3, %ymm5, %ymm1
8017 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
8018 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8019 ; AVX2-FAST-NEXT: vmovaps 1184(%rdi), %ymm0
8020 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8021 ; AVX2-FAST-NEXT: vmovaps 1152(%rdi), %ymm1
8022 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8023 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm1[0,1,2,3],ymm0[4,5],ymm1[6,7]
8024 ; AVX2-FAST-NEXT: vpermps %ymm2, %ymm8, %ymm0
8025 ; AVX2-FAST-NEXT: vmovaps 1248(%rdi), %ymm8
8026 ; AVX2-FAST-NEXT: vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8027 ; AVX2-FAST-NEXT: vmovaps 1216(%rdi), %ymm1
8028 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8029 ; AVX2-FAST-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[0,1],ymm8[0,1]
8030 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm8 = ymm1[0,1,2,3,4,5],ymm8[6,7]
8031 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm1 = ymm8[0,2,2,2,4,6,6,6]
8032 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
8033 ; AVX2-FAST-NEXT: vmovaps 1280(%rdi), %ymm1
8034 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8035 ; AVX2-FAST-NEXT: vmovaps 1312(%rdi), %ymm13
8036 ; AVX2-FAST-NEXT: vmovups %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8037 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm13[0,1,2,3],ymm1[4,5,6,7]
8038 ; AVX2-FAST-NEXT: vpermps %ymm1, %ymm5, %ymm5
8039 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm5[6,7]
8040 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8041 ; AVX2-FAST-NEXT: vmovaps {{.*#+}} xmm5 = <1,7,5,u>
8042 ; AVX2-FAST-NEXT: vpermps %ymm15, %ymm5, %ymm0
8043 ; AVX2-FAST-NEXT: vpermilps $237, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Folded Reload
8044 ; AVX2-FAST-NEXT: # ymm15 = mem[1,3,2,3,5,7,6,7]
8045 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm15 = ymm0[0,1,2],ymm15[3,4,5,6,7]
8046 ; AVX2-FAST-NEXT: vbroadcastsd {{.*#+}} ymm0 = [5,3,5,3,5,3,5,3]
8047 ; AVX2-FAST-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm13 # 32-byte Folded Reload
8048 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm13 = ymm15[0,1,2,3,4,5],ymm13[6,7]
8049 ; AVX2-FAST-NEXT: vmovups %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8050 ; AVX2-FAST-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm13 # 32-byte Folded Reload
8051 ; AVX2-FAST-NEXT: vpermilps $237, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Folded Reload
8052 ; AVX2-FAST-NEXT: # ymm15 = mem[1,3,2,3,5,7,6,7]
8053 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm13 = ymm13[0,1,2],ymm15[3,4,5,6,7]
8054 ; AVX2-FAST-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm15 # 32-byte Folded Reload
8055 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm13 = ymm13[0,1,2,3,4,5],ymm15[6,7]
8056 ; AVX2-FAST-NEXT: vmovups %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8057 ; AVX2-FAST-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm13 # 32-byte Folded Reload
8058 ; AVX2-FAST-NEXT: vpermilps $237, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Folded Reload
8059 ; AVX2-FAST-NEXT: # ymm15 = mem[1,3,2,3,5,7,6,7]
8060 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm13 = ymm13[0,1,2],ymm15[3,4,5,6,7]
8061 ; AVX2-FAST-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm15 # 32-byte Folded Reload
8062 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm13 = ymm13[0,1,2,3,4,5],ymm15[6,7]
8063 ; AVX2-FAST-NEXT: vmovups %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8064 ; AVX2-FAST-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm13 # 32-byte Folded Reload
8065 ; AVX2-FAST-NEXT: vpermilps $237, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Folded Reload
8066 ; AVX2-FAST-NEXT: # ymm15 = mem[1,3,2,3,5,7,6,7]
8067 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm13 = ymm13[0,1,2],ymm15[3,4,5,6,7]
8068 ; AVX2-FAST-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm15 # 32-byte Folded Reload
8069 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm13 = ymm13[0,1,2,3,4,5],ymm15[6,7]
8070 ; AVX2-FAST-NEXT: vmovups %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8071 ; AVX2-FAST-NEXT: vpermps %ymm12, %ymm5, %ymm12
8072 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm13 = ymm14[1,3,2,3,5,7,6,7]
8073 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm12 = ymm12[0,1,2],ymm13[3,4,5,6,7]
8074 ; AVX2-FAST-NEXT: vpermps %ymm11, %ymm0, %ymm11
8075 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm11 = ymm12[0,1,2,3,4,5],ymm11[6,7]
8076 ; AVX2-FAST-NEXT: vmovups %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8077 ; AVX2-FAST-NEXT: vpermps %ymm9, %ymm5, %ymm9
8078 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm10 = ymm10[1,3,2,3,5,7,6,7]
8079 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm9 = ymm9[0,1,2],ymm10[3,4,5,6,7]
8080 ; AVX2-FAST-NEXT: vpermps %ymm7, %ymm0, %ymm7
8081 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm7 = ymm9[0,1,2,3,4,5],ymm7[6,7]
8082 ; AVX2-FAST-NEXT: vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8083 ; AVX2-FAST-NEXT: vpermps %ymm4, %ymm5, %ymm4
8084 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm6 = ymm6[1,3,2,3,5,7,6,7]
8085 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2],ymm6[3,4,5,6,7]
8086 ; AVX2-FAST-NEXT: vpermps %ymm3, %ymm0, %ymm3
8087 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0,1,2,3,4,5],ymm3[6,7]
8088 ; AVX2-FAST-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8089 ; AVX2-FAST-NEXT: vpermps %ymm2, %ymm5, %ymm2
8090 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm3 = ymm8[1,3,2,3,5,7,6,7]
8091 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2],ymm3[3,4,5,6,7]
8092 ; AVX2-FAST-NEXT: vpermps %ymm1, %ymm0, %ymm0
8093 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3,4,5],ymm0[6,7]
8094 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8095 ; AVX2-FAST-NEXT: vmovaps 192(%rdi), %xmm2
8096 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm0 = xmm2[2,3,2,3]
8097 ; AVX2-FAST-NEXT: vpermilps $224, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
8098 ; AVX2-FAST-NEXT: # ymm1 = mem[0,0,2,3,4,4,6,7]
8099 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
8100 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8101 ; AVX2-FAST-NEXT: vblendps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
8102 ; AVX2-FAST-NEXT: # ymm1 = mem[0,1],ymm1[2,3],mem[4,5],ymm1[6,7]
8103 ; AVX2-FAST-NEXT: vmovaps {{.*#+}} ymm6 = [2,0,6,4,2,0,6,7]
8104 ; AVX2-FAST-NEXT: vpermps %ymm1, %ymm6, %ymm1
8105 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
8106 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
8107 ; AVX2-FAST-NEXT: vpermilps $0, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
8108 ; AVX2-FAST-NEXT: # ymm1 = mem[0,0,0,0,4,4,4,4]
8109 ; AVX2-FAST-NEXT: vpermilps $224, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Folded Reload
8110 ; AVX2-FAST-NEXT: # ymm3 = mem[0,0,2,3,4,4,6,7]
8111 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm3[0,1,2],ymm1[3],ymm3[4,5,6],ymm1[7]
8112 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
8113 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
8114 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8115 ; AVX2-FAST-NEXT: vmovaps 576(%rdi), %xmm1
8116 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm0 = xmm1[2,3,2,3]
8117 ; AVX2-FAST-NEXT: vpermilps $224, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Folded Reload
8118 ; AVX2-FAST-NEXT: # ymm3 = mem[0,0,2,3,4,4,6,7]
8119 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm3[1,2,3],ymm0[4],ymm3[5,6,7]
8120 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
8121 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
8122 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm3 = ymm13[0,1],ymm12[2,3],ymm13[4,5],ymm12[6,7]
8123 ; AVX2-FAST-NEXT: vpermps %ymm3, %ymm6, %ymm3
8124 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
8125 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm3[3,4,5,6,7]
8126 ; AVX2-FAST-NEXT: vpermilps $0, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Folded Reload
8127 ; AVX2-FAST-NEXT: # ymm3 = mem[0,0,0,0,4,4,4,4]
8128 ; AVX2-FAST-NEXT: vpermilps $224, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Folded Reload
8129 ; AVX2-FAST-NEXT: # ymm4 = mem[0,0,2,3,4,4,6,7]
8130 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0,1,2],ymm3[3],ymm4[4,5,6],ymm3[7]
8131 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[0,1,0,3]
8132 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm3[5,6,7]
8133 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8134 ; AVX2-FAST-NEXT: vmovaps 960(%rdi), %xmm0
8135 ; AVX2-FAST-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8136 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,3,2,3]
8137 ; AVX2-FAST-NEXT: vpermilps $224, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Folded Reload
8138 ; AVX2-FAST-NEXT: # ymm4 = mem[0,0,2,3,4,4,6,7]
8139 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm4[1,2,3],ymm0[4],ymm4[5,6,7]
8140 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
8141 ; AVX2-FAST-NEXT: vblendps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm4 # 32-byte Folded Reload
8142 ; AVX2-FAST-NEXT: # ymm4 = mem[0,1],ymm3[2,3],mem[4,5],ymm3[6,7]
8143 ; AVX2-FAST-NEXT: vpermps %ymm4, %ymm6, %ymm4
8144 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
8145 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm4[3,4,5,6,7]
8146 ; AVX2-FAST-NEXT: vpermilps $0, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Folded Reload
8147 ; AVX2-FAST-NEXT: # ymm4 = mem[0,0,0,0,4,4,4,4]
8148 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
8149 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm5 = ymm15[0,0,2,3,4,4,6,7]
8150 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0,1,2],ymm4[3],ymm5[4,5,6],ymm4[7]
8151 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,1,0,3]
8152 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm4[5,6,7]
8153 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8154 ; AVX2-FAST-NEXT: vmovaps 1344(%rdi), %xmm0
8155 ; AVX2-FAST-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8156 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,3,2,3]
8157 ; AVX2-FAST-NEXT: vpermilps $224, (%rsp), %ymm5 # 32-byte Folded Reload
8158 ; AVX2-FAST-NEXT: # ymm5 = mem[0,0,2,3,4,4,6,7]
8159 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm5[1,2,3],ymm0[4],ymm5[5,6,7]
8160 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
8161 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
8162 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm5 = ymm4[0,1],ymm14[2,3],ymm4[4,5],ymm14[6,7]
8163 ; AVX2-FAST-NEXT: vpermps %ymm5, %ymm6, %ymm5
8164 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
8165 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm5[3,4,5,6,7]
8166 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
8167 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm5 = ymm4[0,0,0,0,4,4,4,4]
8168 ; AVX2-FAST-NEXT: vpermilps $224, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Folded Reload
8169 ; AVX2-FAST-NEXT: # ymm7 = mem[0,0,2,3,4,4,6,7]
8170 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm5 = ymm7[0,1,2],ymm5[3],ymm7[4,5,6],ymm5[7]
8171 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,1,0,3]
8172 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm5[5,6,7]
8173 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8174 ; AVX2-FAST-NEXT: vmovaps (%rdi), %xmm0
8175 ; AVX2-FAST-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8176 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,3,2,3]
8177 ; AVX2-FAST-NEXT: vpermilps $224, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload
8178 ; AVX2-FAST-NEXT: # ymm5 = mem[0,0,2,3,4,4,6,7]
8179 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm5[1,2,3],ymm0[4],ymm5[5,6,7]
8180 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
8181 ; AVX2-FAST-NEXT: vblendps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm5 # 32-byte Folded Reload
8182 ; AVX2-FAST-NEXT: # ymm5 = mem[0,1],ymm4[2,3],mem[4,5],ymm4[6,7]
8183 ; AVX2-FAST-NEXT: vpermps %ymm5, %ymm6, %ymm5
8184 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
8185 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm5[3,4,5,6,7]
8186 ; AVX2-FAST-NEXT: vpermilps $0, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload
8187 ; AVX2-FAST-NEXT: # ymm5 = mem[0,0,0,0,4,4,4,4]
8188 ; AVX2-FAST-NEXT: vpermilps $224, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Folded Reload
8189 ; AVX2-FAST-NEXT: # ymm7 = mem[0,0,2,3,4,4,6,7]
8190 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm5 = ymm7[0,1,2],ymm5[3],ymm7[4,5,6],ymm5[7]
8191 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,1,0,3]
8192 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm5[5,6,7]
8193 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8194 ; AVX2-FAST-NEXT: vmovaps 384(%rdi), %xmm0
8195 ; AVX2-FAST-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8196 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,3,2,3]
8197 ; AVX2-FAST-NEXT: vpermilps $224, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Folded Reload
8198 ; AVX2-FAST-NEXT: # ymm7 = mem[0,0,2,3,4,4,6,7]
8199 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm7[1,2,3],ymm0[4],ymm7[5,6,7]
8200 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
8201 ; AVX2-FAST-NEXT: vblendps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm7 # 32-byte Folded Reload
8202 ; AVX2-FAST-NEXT: # ymm7 = mem[0,1],ymm4[2,3],mem[4,5],ymm4[6,7]
8203 ; AVX2-FAST-NEXT: vpermps %ymm7, %ymm6, %ymm7
8204 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
8205 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm7[3,4,5,6,7]
8206 ; AVX2-FAST-NEXT: vpermilps $0, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Folded Reload
8207 ; AVX2-FAST-NEXT: # ymm7 = mem[0,0,0,0,4,4,4,4]
8208 ; AVX2-FAST-NEXT: vpermilps $224, {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Folded Reload
8209 ; AVX2-FAST-NEXT: # ymm9 = mem[0,0,2,3,4,4,6,7]
8210 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm7 = ymm9[0,1,2],ymm7[3],ymm9[4,5,6],ymm7[7]
8211 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm7 = ymm7[0,1,0,3]
8212 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm7[5,6,7]
8213 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8214 ; AVX2-FAST-NEXT: vmovaps 768(%rdi), %xmm0
8215 ; AVX2-FAST-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8216 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm7 = xmm0[2,3,2,3]
8217 ; AVX2-FAST-NEXT: vpermilps $224, {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Folded Reload
8218 ; AVX2-FAST-NEXT: # ymm9 = mem[0,0,2,3,4,4,6,7]
8219 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0],ymm9[1,2,3],ymm7[4],ymm9[5,6,7]
8220 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8221 ; AVX2-FAST-NEXT: vblendps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm9 # 32-byte Folded Reload
8222 ; AVX2-FAST-NEXT: # ymm9 = mem[0,1],ymm0[2,3],mem[4,5],ymm0[6,7]
8223 ; AVX2-FAST-NEXT: vpermps %ymm9, %ymm6, %ymm9
8224 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm7 = ymm7[0,3,2,3]
8225 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2],ymm9[3,4,5,6,7]
8226 ; AVX2-FAST-NEXT: vpermilps $0, {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Folded Reload
8227 ; AVX2-FAST-NEXT: # ymm9 = mem[0,0,0,0,4,4,4,4]
8228 ; AVX2-FAST-NEXT: vpermilps $224, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Folded Reload
8229 ; AVX2-FAST-NEXT: # ymm10 = mem[0,0,2,3,4,4,6,7]
8230 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm9 = ymm10[0,1,2],ymm9[3],ymm10[4,5,6],ymm9[7]
8231 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm9 = ymm9[0,1,0,3]
8232 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm7[0,1,2,3,4],ymm9[5,6,7]
8233 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8234 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
8235 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
8236 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm7 = ymm5[0,1],ymm4[2,3],ymm5[4,5],ymm4[6,7]
8237 ; AVX2-FAST-NEXT: vpermps %ymm7, %ymm6, %ymm7
8238 ; AVX2-FAST-NEXT: vmovaps 1152(%rdi), %xmm8
8239 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm9 = xmm8[2,3,2,3]
8240 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
8241 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm10 = ymm6[0,0,2,3,4,4,6,7]
8242 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm9 = ymm9[0],ymm10[1,2,3],ymm9[4],ymm10[5,6,7]
8243 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm9 = ymm9[0,3,2,3]
8244 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm7 = ymm9[0,1,2],ymm7[3,4,5,6,7]
8245 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
8246 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm9 = ymm11[0,0,0,0,4,4,4,4]
8247 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8248 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm10 = ymm0[0,0,2,3,4,4,6,7]
8249 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm9 = ymm10[0,1,2],ymm9[3],ymm10[4,5,6],ymm9[7]
8250 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm9 = ymm9[0,1,0,3]
8251 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3,4],ymm9[5,6,7]
8252 ; AVX2-FAST-NEXT: vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8253 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm2 = xmm2[3,3,3,3]
8254 ; AVX2-FAST-NEXT: vpermilps $244, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Folded Reload
8255 ; AVX2-FAST-NEXT: # ymm7 = mem[0,1,3,3,4,5,7,7]
8256 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0],ymm7[1,2,3],ymm2[4],ymm7[5,6,7]
8257 ; AVX2-FAST-NEXT: vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Folded Reload
8258 ; AVX2-FAST-NEXT: # ymm7 = mem[3,3,3,3,7,7,7,7]
8259 ; AVX2-FAST-NEXT: vblendps $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm7 # 32-byte Folded Reload
8260 ; AVX2-FAST-NEXT: # ymm7 = ymm7[0],mem[1],ymm7[2,3,4],mem[5],ymm7[6,7]
8261 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm7 = ymm7[0,2,0,3]
8262 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,3,2,3]
8263 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2],ymm7[3,4,5,6,7]
8264 ; AVX2-FAST-NEXT: vpermilps $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Folded Reload
8265 ; AVX2-FAST-NEXT: # ymm7 = mem[0,1,0,1,4,5,4,5]
8266 ; AVX2-FAST-NEXT: vpermilps $244, {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Folded Reload
8267 ; AVX2-FAST-NEXT: # ymm9 = mem[0,1,3,3,4,5,7,7]
8268 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm7 = ymm9[0,1,2],ymm7[3],ymm9[4,5,6],ymm7[7]
8269 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm7 = ymm7[0,1,0,3]
8270 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm7[5,6,7]
8271 ; AVX2-FAST-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8272 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm1 = xmm1[3,3,3,3]
8273 ; AVX2-FAST-NEXT: vpermilps $244, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
8274 ; AVX2-FAST-NEXT: # ymm2 = mem[0,1,3,3,4,5,7,7]
8275 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2,3],ymm1[4],ymm2[5,6,7]
8276 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm2 = ymm12[3,3,3,3,7,7,7,7]
8277 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0],ymm13[1],ymm2[2,3,4],ymm13[5],ymm2[6,7]
8278 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,2,0,3]
8279 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,3,2,3]
8280 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4,5,6,7]
8281 ; AVX2-FAST-NEXT: vpermilps $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
8282 ; AVX2-FAST-NEXT: # ymm2 = mem[0,1,0,1,4,5,4,5]
8283 ; AVX2-FAST-NEXT: vpermilps $244, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Folded Reload
8284 ; AVX2-FAST-NEXT: # ymm7 = mem[0,1,3,3,4,5,7,7]
8285 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm7[0,1,2],ymm2[3],ymm7[4,5,6],ymm2[7]
8286 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,1,0,3]
8287 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5,6,7]
8288 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8289 ; AVX2-FAST-NEXT: vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
8290 ; AVX2-FAST-NEXT: # xmm1 = mem[3,3,3,3]
8291 ; AVX2-FAST-NEXT: vpermilps $244, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
8292 ; AVX2-FAST-NEXT: # ymm2 = mem[0,1,3,3,4,5,7,7]
8293 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2,3],ymm1[4],ymm2[5,6,7]
8294 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm2 = ymm3[3,3,3,3,7,7,7,7]
8295 ; AVX2-FAST-NEXT: vblendps $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
8296 ; AVX2-FAST-NEXT: # ymm2 = ymm2[0],mem[1],ymm2[2,3,4],mem[5],ymm2[6,7]
8297 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,2,0,3]
8298 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,3,2,3]
8299 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4,5,6,7]
8300 ; AVX2-FAST-NEXT: vpermilps $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
8301 ; AVX2-FAST-NEXT: # ymm2 = mem[0,1,0,1,4,5,4,5]
8302 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm3 = ymm15[0,1,3,3,4,5,7,7]
8303 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1,2],ymm2[3],ymm3[4,5,6],ymm2[7]
8304 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,1,0,3]
8305 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5,6,7]
8306 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8307 ; AVX2-FAST-NEXT: vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
8308 ; AVX2-FAST-NEXT: # xmm1 = mem[3,3,3,3]
8309 ; AVX2-FAST-NEXT: vpermilps $244, (%rsp), %ymm2 # 32-byte Folded Reload
8310 ; AVX2-FAST-NEXT: # ymm2 = mem[0,1,3,3,4,5,7,7]
8311 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2,3],ymm1[4],ymm2[5,6,7]
8312 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm2 = ymm14[3,3,3,3,7,7,7,7]
8313 ; AVX2-FAST-NEXT: vblendps $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
8314 ; AVX2-FAST-NEXT: # ymm2 = ymm2[0],mem[1],ymm2[2,3,4],mem[5],ymm2[6,7]
8315 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,2,0,3]
8316 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,3,2,3]
8317 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4,5,6,7]
8318 ; AVX2-FAST-NEXT: vpermilps $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
8319 ; AVX2-FAST-NEXT: # ymm2 = mem[0,1,0,1,4,5,4,5]
8320 ; AVX2-FAST-NEXT: vpermilps $244, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Folded Reload
8321 ; AVX2-FAST-NEXT: # ymm3 = mem[0,1,3,3,4,5,7,7]
8322 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1,2],ymm2[3],ymm3[4,5,6],ymm2[7]
8323 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,1,0,3]
8324 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5,6,7]
8325 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8326 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm1 = xmm8[3,3,3,3]
8327 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm2 = ymm6[0,1,3,3,4,5,7,7]
8328 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2,3],ymm1[4],ymm2[5,6,7]
8329 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm2 = ymm4[3,3,3,3,7,7,7,7]
8330 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0],ymm5[1],ymm2[2,3,4],ymm5[5],ymm2[6,7]
8331 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,2,0,3]
8332 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,3,2,3]
8333 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4,5,6,7]
8334 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm2 = ymm11[0,1,0,1,4,5,4,5]
8335 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm3 = ymm0[0,1,3,3,4,5,7,7]
8336 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1,2],ymm2[3],ymm3[4,5,6],ymm2[7]
8337 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,1,0,3]
8338 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm2[5,6,7]
8339 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8340 ; AVX2-FAST-NEXT: vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
8341 ; AVX2-FAST-NEXT: # xmm0 = mem[3,3,3,3]
8342 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
8343 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm1 = ymm10[0,1,3,3,4,5,7,7]
8344 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
8345 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
8346 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm1 = ymm13[3,3,3,3,7,7,7,7]
8347 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
8348 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm11[1],ymm1[2,3,4],ymm11[5],ymm1[6,7]
8349 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,2,0,3]
8350 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
8351 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
8352 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
8353 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm1 = ymm14[0,1,0,1,4,5,4,5]
8354 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
8355 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm2 = ymm12[0,1,3,3,4,5,7,7]
8356 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3],ymm2[4,5,6],ymm1[7]
8357 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
8358 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
8359 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8360 ; AVX2-FAST-NEXT: vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
8361 ; AVX2-FAST-NEXT: # xmm0 = mem[3,3,3,3]
8362 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
8363 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm1 = ymm4[0,1,3,3,4,5,7,7]
8364 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
8365 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
8366 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm1 = ymm7[3,3,3,3,7,7,7,7]
8367 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
8368 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm6[1],ymm1[2,3,4],ymm6[5],ymm1[6,7]
8369 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,2,0,3]
8370 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
8371 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
8372 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
8373 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm1 = ymm9[0,1,0,1,4,5,4,5]
8374 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
8375 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm2 = ymm8[0,1,3,3,4,5,7,7]
8376 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3],ymm2[4,5,6],ymm1[7]
8377 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
8378 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
8379 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8380 ; AVX2-FAST-NEXT: vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
8381 ; AVX2-FAST-NEXT: # xmm0 = mem[3,3,3,3]
8382 ; AVX2-FAST-NEXT: vpermilps $244, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
8383 ; AVX2-FAST-NEXT: # ymm1 = mem[0,1,3,3,4,5,7,7]
8384 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
8385 ; AVX2-FAST-NEXT: vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
8386 ; AVX2-FAST-NEXT: # ymm1 = mem[3,3,3,3,7,7,7,7]
8387 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
8388 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm15[1],ymm1[2,3,4],ymm15[5],ymm1[6,7]
8389 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,2,0,3]
8390 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
8391 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
8392 ; AVX2-FAST-NEXT: vpermilps $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
8393 ; AVX2-FAST-NEXT: # ymm1 = mem[0,1,0,1,4,5,4,5]
8394 ; AVX2-FAST-NEXT: vpermilps $244, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
8395 ; AVX2-FAST-NEXT: # ymm2 = mem[0,1,3,3,4,5,7,7]
8396 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3],ymm2[4,5,6],ymm1[7]
8397 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
8398 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
8399 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8400 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8401 ; AVX2-FAST-NEXT: vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm3 # 32-byte Folded Reload
8402 ; AVX2-FAST-NEXT: # ymm3 = ymm0[0,1,2,3],mem[4,5,6,7]
8403 ; AVX2-FAST-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8404 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8405 ; AVX2-FAST-NEXT: vblendps $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 32-byte Folded Reload
8406 ; AVX2-FAST-NEXT: # ymm1 = mem[0,1,2,3],ymm0[4,5,6,7]
8407 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8408 ; AVX2-FAST-NEXT: vmovaps 272(%rdi), %xmm0
8409 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8410 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5,6,7]
8411 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,1,0,2,4,5,4,6]
8412 ; AVX2-FAST-NEXT: vmovddup {{.*#+}} xmm2 = [4,2,4,2]
8413 ; AVX2-FAST-NEXT: # xmm2 = mem[0,0]
8414 ; AVX2-FAST-NEXT: vpermps %ymm3, %ymm2, %ymm1
8415 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm0[2,3,4,5,6,7]
8416 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8417 ; AVX2-FAST-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
8418 ; AVX2-FAST-NEXT: # ymm0 = ymm0[0,1],mem[2,3],ymm0[4,5,6,7]
8419 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8420 ; AVX2-FAST-NEXT: vbroadcastf128 {{.*#+}} ymm5 = [0,2,0,6,0,2,0,6]
8421 ; AVX2-FAST-NEXT: # ymm5 = mem[0,1,0,1]
8422 ; AVX2-FAST-NEXT: vpermps %ymm0, %ymm5, %ymm3
8423 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm3[5,6,7]
8424 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8425 ; AVX2-FAST-NEXT: vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm3 # 32-byte Folded Reload
8426 ; AVX2-FAST-NEXT: # ymm3 = ymm4[0,1,2,3],mem[4,5,6,7]
8427 ; AVX2-FAST-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8428 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm6[0,1,2,3],ymm7[4,5,6,7]
8429 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8430 ; AVX2-FAST-NEXT: vmovaps 464(%rdi), %xmm0
8431 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8432 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm0[2,3],ymm1[4,5,6,7]
8433 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,1,0,2,4,5,4,6]
8434 ; AVX2-FAST-NEXT: vpermps %ymm3, %ymm2, %ymm3
8435 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm3[0,1],ymm1[2,3,4,5,6,7]
8436 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm9[0,1],ymm8[2,3],ymm9[4,5,6,7]
8437 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8438 ; AVX2-FAST-NEXT: vpermps %ymm0, %ymm5, %ymm3
8439 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm3[5,6,7]
8440 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8441 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8442 ; AVX2-FAST-NEXT: vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm0 # 32-byte Folded Reload
8443 ; AVX2-FAST-NEXT: # ymm0 = ymm1[0,1,2,3],mem[4,5,6,7]
8444 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8445 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8446 ; AVX2-FAST-NEXT: vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm3 # 32-byte Folded Reload
8447 ; AVX2-FAST-NEXT: # ymm3 = ymm1[0,1,2,3],mem[4,5,6,7]
8448 ; AVX2-FAST-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8449 ; AVX2-FAST-NEXT: vmovaps 656(%rdi), %xmm1
8450 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8451 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm3[0,1],ymm1[2,3],ymm3[4,5,6,7]
8452 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,1,0,2,4,5,4,6]
8453 ; AVX2-FAST-NEXT: vpermps %ymm0, %ymm2, %ymm3
8454 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm3[0,1],ymm1[2,3,4,5,6,7]
8455 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
8456 ; AVX2-FAST-NEXT: vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm0 # 32-byte Folded Reload
8457 ; AVX2-FAST-NEXT: # ymm0 = mem[0,1],ymm3[2,3],mem[4,5,6,7]
8458 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8459 ; AVX2-FAST-NEXT: vpermps %ymm0, %ymm5, %ymm3
8460 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm3[5,6,7]
8461 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8462 ; AVX2-FAST-NEXT: vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm0 # 32-byte Folded Reload
8463 ; AVX2-FAST-NEXT: # ymm0 = ymm10[0,1,2,3],mem[4,5,6,7]
8464 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8465 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm3 = ymm11[0,1,2,3],ymm13[4,5,6,7]
8466 ; AVX2-FAST-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8467 ; AVX2-FAST-NEXT: vmovaps 848(%rdi), %xmm1
8468 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8469 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm3[0,1],ymm1[2,3],ymm3[4,5,6,7]
8470 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,1,0,2,4,5,4,6]
8471 ; AVX2-FAST-NEXT: vpermps %ymm0, %ymm2, %ymm3
8472 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm3[0,1],ymm1[2,3,4,5,6,7]
8473 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm14[0,1],ymm12[2,3],ymm14[4,5,6,7]
8474 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8475 ; AVX2-FAST-NEXT: vpermps %ymm0, %ymm5, %ymm3
8476 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm3[5,6,7]
8477 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8478 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8479 ; AVX2-FAST-NEXT: vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm0 # 32-byte Folded Reload
8480 ; AVX2-FAST-NEXT: # ymm0 = ymm1[0,1,2,3],mem[4,5,6,7]
8481 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8482 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8483 ; AVX2-FAST-NEXT: vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm3 # 32-byte Folded Reload
8484 ; AVX2-FAST-NEXT: # ymm3 = ymm1[0,1,2,3],mem[4,5,6,7]
8485 ; AVX2-FAST-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8486 ; AVX2-FAST-NEXT: vmovaps 1040(%rdi), %xmm1
8487 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8488 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm3[0,1],ymm1[2,3],ymm3[4,5,6,7]
8489 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,1,0,2,4,5,4,6]
8490 ; AVX2-FAST-NEXT: vpermps %ymm0, %ymm2, %ymm3
8491 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm3[0,1],ymm1[2,3,4,5,6,7]
8492 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
8493 ; AVX2-FAST-NEXT: vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm13 # 32-byte Folded Reload
8494 ; AVX2-FAST-NEXT: # ymm13 = mem[0,1],ymm3[2,3],mem[4,5,6,7]
8495 ; AVX2-FAST-NEXT: vpermps %ymm13, %ymm5, %ymm3
8496 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm3[5,6,7]
8497 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8498 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8499 ; AVX2-FAST-NEXT: vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm0 # 32-byte Folded Reload
8500 ; AVX2-FAST-NEXT: # ymm0 = ymm1[0,1,2,3],mem[4,5,6,7]
8501 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8502 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8503 ; AVX2-FAST-NEXT: vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm12 # 32-byte Folded Reload
8504 ; AVX2-FAST-NEXT: # ymm12 = ymm1[0,1,2,3],mem[4,5,6,7]
8505 ; AVX2-FAST-NEXT: vmovaps 1232(%rdi), %xmm11
8506 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm12[0,1],ymm11[2,3],ymm12[4,5,6,7]
8507 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,1,0,2,4,5,4,6]
8508 ; AVX2-FAST-NEXT: vpermps %ymm0, %ymm2, %ymm3
8509 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm3[0,1],ymm1[2,3,4,5,6,7]
8510 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
8511 ; AVX2-FAST-NEXT: vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm9 # 32-byte Folded Reload
8512 ; AVX2-FAST-NEXT: # ymm9 = mem[0,1],ymm3[2,3],mem[4,5,6,7]
8513 ; AVX2-FAST-NEXT: vpermps %ymm9, %ymm5, %ymm3
8514 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm3[5,6,7]
8515 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8516 ; AVX2-FAST-NEXT: vmovups (%rsp), %ymm1 # 32-byte Reload
8517 ; AVX2-FAST-NEXT: vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm0 # 32-byte Folded Reload
8518 ; AVX2-FAST-NEXT: # ymm0 = ymm1[0,1,2,3],mem[4,5,6,7]
8519 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8520 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8521 ; AVX2-FAST-NEXT: vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm7 # 32-byte Folded Reload
8522 ; AVX2-FAST-NEXT: # ymm7 = ymm1[0,1,2,3],mem[4,5,6,7]
8523 ; AVX2-FAST-NEXT: vmovaps 1424(%rdi), %xmm8
8524 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm7[0,1],ymm8[2,3],ymm7[4,5,6,7]
8525 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,1,0,2,4,5,4,6]
8526 ; AVX2-FAST-NEXT: vpermps %ymm0, %ymm2, %ymm3
8527 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm3[0,1],ymm1[2,3,4,5,6,7]
8528 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
8529 ; AVX2-FAST-NEXT: vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm6 # 32-byte Folded Reload
8530 ; AVX2-FAST-NEXT: # ymm6 = mem[0,1],ymm3[2,3],mem[4,5,6,7]
8531 ; AVX2-FAST-NEXT: vpermps %ymm6, %ymm5, %ymm3
8532 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm3[5,6,7]
8533 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8534 ; AVX2-FAST-NEXT: vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm10 # 32-byte Folded Reload
8535 ; AVX2-FAST-NEXT: # ymm10 = ymm15[0,1,2,3],mem[4,5,6,7]
8536 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8537 ; AVX2-FAST-NEXT: vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm4 # 32-byte Folded Reload
8538 ; AVX2-FAST-NEXT: # ymm4 = ymm1[0,1,2,3],mem[4,5,6,7]
8539 ; AVX2-FAST-NEXT: vmovaps 80(%rdi), %xmm3
8540 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm10[0,1],ymm3[2,3],ymm10[4,5,6,7]
8541 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,1,0,2,4,5,4,6]
8542 ; AVX2-FAST-NEXT: vpermps %ymm4, %ymm2, %ymm2
8543 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1],ymm1[2,3,4,5,6,7]
8544 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
8545 ; AVX2-FAST-NEXT: vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
8546 ; AVX2-FAST-NEXT: # ymm2 = mem[0,1],ymm2[2,3],mem[4,5,6,7]
8547 ; AVX2-FAST-NEXT: vpermps %ymm2, %ymm5, %ymm0
8548 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5,6,7]
8549 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8550 ; AVX2-FAST-NEXT: vpermilps $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
8551 ; AVX2-FAST-NEXT: # ymm0 = mem[1,1,1,1,5,5,5,5]
8552 ; AVX2-FAST-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
8553 ; AVX2-FAST-NEXT: # ymm0 = ymm0[0,1,2],mem[3],ymm0[4,5,6,7]
8554 ; AVX2-FAST-NEXT: vmovddup {{.*#+}} xmm1 = [5,3,5,3]
8555 ; AVX2-FAST-NEXT: # xmm1 = mem[0,0]
8556 ; AVX2-FAST-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm14 # 32-byte Folded Reload
8557 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm14 = ymm14[0,1],ymm0[2,3,4,5,6,7]
8558 ; AVX2-FAST-NEXT: vbroadcastf128 {{.*#+}} ymm0 = [0,3,1,7,0,3,1,7]
8559 ; AVX2-FAST-NEXT: # ymm0 = mem[0,1,0,1]
8560 ; AVX2-FAST-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm15 # 32-byte Folded Reload
8561 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm5 = ymm14[0,1,2,3,4],ymm15[5,6,7]
8562 ; AVX2-FAST-NEXT: vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8563 ; AVX2-FAST-NEXT: vpermilps $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Folded Reload
8564 ; AVX2-FAST-NEXT: # ymm14 = mem[1,1,1,1,5,5,5,5]
8565 ; AVX2-FAST-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
8566 ; AVX2-FAST-NEXT: # ymm14 = ymm14[0,1,2],mem[3],ymm14[4,5,6,7]
8567 ; AVX2-FAST-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm15 # 32-byte Folded Reload
8568 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm14 = ymm15[0,1],ymm14[2,3,4,5,6,7]
8569 ; AVX2-FAST-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm15 # 32-byte Folded Reload
8570 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm5 = ymm14[0,1,2,3,4],ymm15[5,6,7]
8571 ; AVX2-FAST-NEXT: vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8572 ; AVX2-FAST-NEXT: vpermilps $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Folded Reload
8573 ; AVX2-FAST-NEXT: # ymm14 = mem[1,1,1,1,5,5,5,5]
8574 ; AVX2-FAST-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
8575 ; AVX2-FAST-NEXT: # ymm14 = ymm14[0,1,2],mem[3],ymm14[4,5,6,7]
8576 ; AVX2-FAST-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm15 # 32-byte Folded Reload
8577 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm14 = ymm15[0,1],ymm14[2,3,4,5,6,7]
8578 ; AVX2-FAST-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm15 # 32-byte Folded Reload
8579 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm5 = ymm14[0,1,2,3,4],ymm15[5,6,7]
8580 ; AVX2-FAST-NEXT: vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8581 ; AVX2-FAST-NEXT: vpermilps $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Folded Reload
8582 ; AVX2-FAST-NEXT: # ymm14 = mem[1,1,1,1,5,5,5,5]
8583 ; AVX2-FAST-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
8584 ; AVX2-FAST-NEXT: # ymm14 = ymm14[0,1,2],mem[3],ymm14[4,5,6,7]
8585 ; AVX2-FAST-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm15 # 32-byte Folded Reload
8586 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm14 = ymm15[0,1],ymm14[2,3,4,5,6,7]
8587 ; AVX2-FAST-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm15 # 32-byte Folded Reload
8588 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm5 = ymm14[0,1,2,3,4],ymm15[5,6,7]
8589 ; AVX2-FAST-NEXT: vmovups %ymm5, (%rsp) # 32-byte Spill
8590 ; AVX2-FAST-NEXT: vpermilps $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Folded Reload
8591 ; AVX2-FAST-NEXT: # ymm14 = mem[1,1,1,1,5,5,5,5]
8592 ; AVX2-FAST-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
8593 ; AVX2-FAST-NEXT: # ymm14 = ymm14[0,1,2],mem[3],ymm14[4,5,6,7]
8594 ; AVX2-FAST-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm15 # 32-byte Folded Reload
8595 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm14 = ymm15[0,1],ymm14[2,3,4,5,6,7]
8596 ; AVX2-FAST-NEXT: vpermps %ymm13, %ymm0, %ymm13
8597 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm13 = ymm14[0,1,2,3,4],ymm13[5,6,7]
8598 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm12 = ymm12[1,1,1,1,5,5,5,5]
8599 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm11 = ymm12[0,1,2],ymm11[3],ymm12[4,5,6,7]
8600 ; AVX2-FAST-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm12 # 32-byte Folded Reload
8601 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm11 = ymm12[0,1],ymm11[2,3,4,5,6,7]
8602 ; AVX2-FAST-NEXT: vpermps %ymm9, %ymm0, %ymm9
8603 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm9 = ymm11[0,1,2,3,4],ymm9[5,6,7]
8604 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm7 = ymm7[1,1,1,1,5,5,5,5]
8605 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2],ymm8[3],ymm7[4,5,6,7]
8606 ; AVX2-FAST-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm8 # 32-byte Folded Reload
8607 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm7 = ymm8[0,1],ymm7[2,3,4,5,6,7]
8608 ; AVX2-FAST-NEXT: vpermps %ymm6, %ymm0, %ymm6
8609 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm6 = ymm7[0,1,2,3,4],ymm6[5,6,7]
8610 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm5 = ymm10[1,1,1,1,5,5,5,5]
8611 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm3 = ymm5[0,1,2],ymm3[3],ymm5[4,5,6,7]
8612 ; AVX2-FAST-NEXT: vpermps %ymm4, %ymm1, %ymm1
8613 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm3[2,3,4,5,6,7]
8614 ; AVX2-FAST-NEXT: vpermps %ymm2, %ymm0, %ymm0
8615 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5,6,7]
8616 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8617 ; AVX2-FAST-NEXT: vmovaps %ymm1, 192(%rsi)
8618 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8619 ; AVX2-FAST-NEXT: vmovaps %ymm1, 128(%rsi)
8620 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8621 ; AVX2-FAST-NEXT: vmovaps %ymm1, 64(%rsi)
8622 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8623 ; AVX2-FAST-NEXT: vmovaps %ymm1, (%rsi)
8624 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8625 ; AVX2-FAST-NEXT: vmovaps %ymm1, 224(%rsi)
8626 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8627 ; AVX2-FAST-NEXT: vmovaps %ymm1, 160(%rsi)
8628 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8629 ; AVX2-FAST-NEXT: vmovaps %ymm1, 96(%rsi)
8630 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8631 ; AVX2-FAST-NEXT: vmovaps %ymm1, 32(%rsi)
8632 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8633 ; AVX2-FAST-NEXT: vmovaps %ymm1, 192(%rdx)
8634 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8635 ; AVX2-FAST-NEXT: vmovaps %ymm1, 128(%rdx)
8636 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8637 ; AVX2-FAST-NEXT: vmovaps %ymm1, 64(%rdx)
8638 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8639 ; AVX2-FAST-NEXT: vmovaps %ymm1, (%rdx)
8640 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8641 ; AVX2-FAST-NEXT: vmovaps %ymm1, 224(%rdx)
8642 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8643 ; AVX2-FAST-NEXT: vmovaps %ymm1, 160(%rdx)
8644 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8645 ; AVX2-FAST-NEXT: vmovaps %ymm1, 96(%rdx)
8646 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8647 ; AVX2-FAST-NEXT: vmovaps %ymm1, 32(%rdx)
8648 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8649 ; AVX2-FAST-NEXT: vmovaps %ymm1, 192(%rcx)
8650 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8651 ; AVX2-FAST-NEXT: vmovaps %ymm1, 128(%rcx)
8652 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8653 ; AVX2-FAST-NEXT: vmovaps %ymm1, 64(%rcx)
8654 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8655 ; AVX2-FAST-NEXT: vmovaps %ymm1, (%rcx)
8656 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8657 ; AVX2-FAST-NEXT: vmovaps %ymm1, 224(%rcx)
8658 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8659 ; AVX2-FAST-NEXT: vmovaps %ymm1, 160(%rcx)
8660 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8661 ; AVX2-FAST-NEXT: vmovaps %ymm1, 96(%rcx)
8662 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8663 ; AVX2-FAST-NEXT: vmovaps %ymm1, 32(%rcx)
8664 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8665 ; AVX2-FAST-NEXT: vmovaps %ymm1, (%r8)
8666 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8667 ; AVX2-FAST-NEXT: vmovaps %ymm1, 64(%r8)
8668 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8669 ; AVX2-FAST-NEXT: vmovaps %ymm1, 128(%r8)
8670 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8671 ; AVX2-FAST-NEXT: vmovaps %ymm1, 192(%r8)
8672 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8673 ; AVX2-FAST-NEXT: vmovaps %ymm1, 224(%r8)
8674 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8675 ; AVX2-FAST-NEXT: vmovaps %ymm1, 160(%r8)
8676 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8677 ; AVX2-FAST-NEXT: vmovaps %ymm1, 96(%r8)
8678 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8679 ; AVX2-FAST-NEXT: vmovaps %ymm1, 32(%r8)
8680 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8681 ; AVX2-FAST-NEXT: vmovaps %ymm1, 224(%r9)
8682 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8683 ; AVX2-FAST-NEXT: vmovaps %ymm1, 192(%r9)
8684 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8685 ; AVX2-FAST-NEXT: vmovaps %ymm1, 160(%r9)
8686 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8687 ; AVX2-FAST-NEXT: vmovaps %ymm1, 128(%r9)
8688 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8689 ; AVX2-FAST-NEXT: vmovaps %ymm1, 96(%r9)
8690 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8691 ; AVX2-FAST-NEXT: vmovaps %ymm1, 64(%r9)
8692 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8693 ; AVX2-FAST-NEXT: vmovaps %ymm1, 32(%r9)
8694 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8695 ; AVX2-FAST-NEXT: vmovaps %ymm1, (%r9)
8696 ; AVX2-FAST-NEXT: movq {{[0-9]+}}(%rsp), %rax
8697 ; AVX2-FAST-NEXT: vmovaps %ymm6, 224(%rax)
8698 ; AVX2-FAST-NEXT: vmovaps %ymm9, 192(%rax)
8699 ; AVX2-FAST-NEXT: vmovaps %ymm13, 160(%rax)
8700 ; AVX2-FAST-NEXT: vmovups (%rsp), %ymm1 # 32-byte Reload
8701 ; AVX2-FAST-NEXT: vmovaps %ymm1, 128(%rax)
8702 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8703 ; AVX2-FAST-NEXT: vmovaps %ymm1, 96(%rax)
8704 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8705 ; AVX2-FAST-NEXT: vmovaps %ymm1, 64(%rax)
8706 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8707 ; AVX2-FAST-NEXT: vmovaps %ymm1, 32(%rax)
8708 ; AVX2-FAST-NEXT: vmovaps %ymm0, (%rax)
8709 ; AVX2-FAST-NEXT: addq $2504, %rsp # imm = 0x9C8
8710 ; AVX2-FAST-NEXT: vzeroupper
8711 ; AVX2-FAST-NEXT: retq
8713 ; AVX2-FAST-PERLANE-LABEL: load_i32_stride6_vf64:
8714 ; AVX2-FAST-PERLANE: # %bb.0:
8715 ; AVX2-FAST-PERLANE-NEXT: subq $2504, %rsp # imm = 0x9C8
8716 ; AVX2-FAST-PERLANE-NEXT: vmovaps 672(%rdi), %ymm2
8717 ; AVX2-FAST-PERLANE-NEXT: vmovaps 640(%rdi), %ymm3
8718 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8719 ; AVX2-FAST-PERLANE-NEXT: vmovaps 608(%rdi), %ymm4
8720 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8721 ; AVX2-FAST-PERLANE-NEXT: vmovaps 320(%rdi), %ymm5
8722 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm5, (%rsp) # 32-byte Spill
8723 ; AVX2-FAST-PERLANE-NEXT: vmovaps 352(%rdi), %ymm6
8724 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8725 ; AVX2-FAST-PERLANE-NEXT: vmovaps 288(%rdi), %ymm7
8726 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8727 ; AVX2-FAST-PERLANE-NEXT: vmovaps 256(%rdi), %ymm9
8728 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8729 ; AVX2-FAST-PERLANE-NEXT: vmovaps 224(%rdi), %ymm0
8730 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8731 ; AVX2-FAST-PERLANE-NEXT: vmovaps 192(%rdi), %ymm1
8732 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8733 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{.*#+}} xmm8 = <0,6,4,u>
8734 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm14 = ymm1[0,1,2,3],ymm0[4,5],ymm1[6,7]
8735 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm14, %ymm8, %ymm0
8736 ; AVX2-FAST-PERLANE-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm9[0,1],ymm7[0,1]
8737 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm7[6,7]
8738 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8739 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,2,2,2,4,6,6,6]
8740 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
8741 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm6[0,1,2,3],ymm5[4,5,6,7]
8742 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8743 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd {{.*#+}} ymm5 = [4,2,4,2,4,2,4,2]
8744 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm1, %ymm5, %ymm1
8745 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
8746 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8747 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8748 ; AVX2-FAST-PERLANE-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm3[0,1],ymm2[0,1]
8749 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm0[0,1,2,3,4,5],ymm2[6,7]
8750 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8751 ; AVX2-FAST-PERLANE-NEXT: vmovaps 576(%rdi), %ymm0
8752 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8753 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm4[4,5],ymm0[6,7]
8754 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8755 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm0, %ymm8, %ymm0
8756 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm1 = ymm2[0,2,2,2,4,6,6,6]
8757 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
8758 ; AVX2-FAST-PERLANE-NEXT: vmovaps 704(%rdi), %ymm1
8759 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8760 ; AVX2-FAST-PERLANE-NEXT: vmovaps 736(%rdi), %ymm2
8761 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8762 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
8763 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8764 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm1, %ymm5, %ymm1
8765 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
8766 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8767 ; AVX2-FAST-PERLANE-NEXT: vmovaps 1056(%rdi), %ymm1
8768 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8769 ; AVX2-FAST-PERLANE-NEXT: vmovaps 1024(%rdi), %ymm0
8770 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8771 ; AVX2-FAST-PERLANE-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[0,1],ymm1[0,1]
8772 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm0[0,1,2,3,4,5],ymm1[6,7]
8773 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8774 ; AVX2-FAST-PERLANE-NEXT: vmovaps 992(%rdi), %ymm0
8775 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8776 ; AVX2-FAST-PERLANE-NEXT: vmovaps 960(%rdi), %ymm1
8777 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8778 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5],ymm1[6,7]
8779 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8780 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm0, %ymm8, %ymm0
8781 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm1 = ymm2[0,2,2,2,4,6,6,6]
8782 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
8783 ; AVX2-FAST-PERLANE-NEXT: vmovaps 1088(%rdi), %ymm1
8784 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8785 ; AVX2-FAST-PERLANE-NEXT: vmovaps 1120(%rdi), %ymm2
8786 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8787 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
8788 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8789 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm1, %ymm5, %ymm1
8790 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
8791 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8792 ; AVX2-FAST-PERLANE-NEXT: vmovaps 1440(%rdi), %ymm1
8793 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8794 ; AVX2-FAST-PERLANE-NEXT: vmovaps 1408(%rdi), %ymm0
8795 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8796 ; AVX2-FAST-PERLANE-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[0,1],ymm1[0,1]
8797 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm0[0,1,2,3,4,5],ymm1[6,7]
8798 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8799 ; AVX2-FAST-PERLANE-NEXT: vmovaps 1376(%rdi), %ymm0
8800 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8801 ; AVX2-FAST-PERLANE-NEXT: vmovaps 1344(%rdi), %ymm1
8802 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8803 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5],ymm1[6,7]
8804 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8805 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm0, %ymm8, %ymm0
8806 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm1 = ymm2[0,2,2,2,4,6,6,6]
8807 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
8808 ; AVX2-FAST-PERLANE-NEXT: vmovaps 1472(%rdi), %ymm1
8809 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8810 ; AVX2-FAST-PERLANE-NEXT: vmovaps 1504(%rdi), %ymm2
8811 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8812 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
8813 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8814 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm1, %ymm5, %ymm1
8815 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
8816 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8817 ; AVX2-FAST-PERLANE-NEXT: vmovaps 96(%rdi), %ymm1
8818 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8819 ; AVX2-FAST-PERLANE-NEXT: vmovaps 64(%rdi), %ymm0
8820 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8821 ; AVX2-FAST-PERLANE-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[0,1],ymm1[0,1]
8822 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm15 = ymm0[0,1,2,3,4,5],ymm1[6,7]
8823 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rdi), %ymm0
8824 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8825 ; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%rdi), %ymm1
8826 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8827 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm12 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
8828 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm12, %ymm8, %ymm0
8829 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm1 = ymm15[0,2,2,2,4,6,6,6]
8830 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
8831 ; AVX2-FAST-PERLANE-NEXT: vmovaps 128(%rdi), %ymm1
8832 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8833 ; AVX2-FAST-PERLANE-NEXT: vmovaps 160(%rdi), %ymm2
8834 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8835 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm11 = ymm2[0,1,2,3],ymm1[4,5,6,7]
8836 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm11, %ymm5, %ymm1
8837 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
8838 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8839 ; AVX2-FAST-PERLANE-NEXT: vmovaps 480(%rdi), %ymm1
8840 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8841 ; AVX2-FAST-PERLANE-NEXT: vmovaps 448(%rdi), %ymm0
8842 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8843 ; AVX2-FAST-PERLANE-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[0,1],ymm1[0,1]
8844 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm10 = ymm0[0,1,2,3,4,5],ymm1[6,7]
8845 ; AVX2-FAST-PERLANE-NEXT: vmovaps 416(%rdi), %ymm0
8846 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8847 ; AVX2-FAST-PERLANE-NEXT: vmovaps 384(%rdi), %ymm1
8848 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8849 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm9 = ymm1[0,1,2,3],ymm0[4,5],ymm1[6,7]
8850 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm9, %ymm8, %ymm0
8851 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm1 = ymm10[0,2,2,2,4,6,6,6]
8852 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
8853 ; AVX2-FAST-PERLANE-NEXT: vmovaps 512(%rdi), %ymm1
8854 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8855 ; AVX2-FAST-PERLANE-NEXT: vmovaps 544(%rdi), %ymm2
8856 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8857 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm7 = ymm2[0,1,2,3],ymm1[4,5,6,7]
8858 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm7, %ymm5, %ymm1
8859 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
8860 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8861 ; AVX2-FAST-PERLANE-NEXT: vmovaps 864(%rdi), %ymm1
8862 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8863 ; AVX2-FAST-PERLANE-NEXT: vmovaps 832(%rdi), %ymm0
8864 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8865 ; AVX2-FAST-PERLANE-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[0,1],ymm1[0,1]
8866 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm6 = ymm0[0,1,2,3,4,5],ymm1[6,7]
8867 ; AVX2-FAST-PERLANE-NEXT: vmovaps 800(%rdi), %ymm0
8868 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8869 ; AVX2-FAST-PERLANE-NEXT: vmovaps 768(%rdi), %ymm1
8870 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8871 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm4 = ymm1[0,1,2,3],ymm0[4,5],ymm1[6,7]
8872 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm4, %ymm8, %ymm0
8873 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm1 = ymm6[0,2,2,2,4,6,6,6]
8874 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
8875 ; AVX2-FAST-PERLANE-NEXT: vmovaps 896(%rdi), %ymm1
8876 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8877 ; AVX2-FAST-PERLANE-NEXT: vmovaps 928(%rdi), %ymm2
8878 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8879 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm3 = ymm2[0,1,2,3],ymm1[4,5,6,7]
8880 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm3, %ymm5, %ymm1
8881 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
8882 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8883 ; AVX2-FAST-PERLANE-NEXT: vmovaps 1184(%rdi), %ymm0
8884 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8885 ; AVX2-FAST-PERLANE-NEXT: vmovaps 1152(%rdi), %ymm1
8886 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8887 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm1[0,1,2,3],ymm0[4,5],ymm1[6,7]
8888 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm2, %ymm8, %ymm0
8889 ; AVX2-FAST-PERLANE-NEXT: vmovaps 1248(%rdi), %ymm8
8890 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8891 ; AVX2-FAST-PERLANE-NEXT: vmovaps 1216(%rdi), %ymm1
8892 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8893 ; AVX2-FAST-PERLANE-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[0,1],ymm8[0,1]
8894 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm8 = ymm1[0,1,2,3,4,5],ymm8[6,7]
8895 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm1 = ymm8[0,2,2,2,4,6,6,6]
8896 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
8897 ; AVX2-FAST-PERLANE-NEXT: vmovaps 1280(%rdi), %ymm1
8898 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8899 ; AVX2-FAST-PERLANE-NEXT: vmovaps 1312(%rdi), %ymm13
8900 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8901 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm13[0,1,2,3],ymm1[4,5,6,7]
8902 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm1, %ymm5, %ymm5
8903 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm5[6,7]
8904 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8905 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{.*#+}} xmm5 = <1,7,5,u>
8906 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm14, %ymm5, %ymm0
8907 ; AVX2-FAST-PERLANE-NEXT: vpermilps $237, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Folded Reload
8908 ; AVX2-FAST-PERLANE-NEXT: # ymm13 = mem[1,3,2,3,5,7,6,7]
8909 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm13 = ymm0[0,1,2],ymm13[3,4,5,6,7]
8910 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd {{.*#+}} ymm0 = [5,3,5,3,5,3,5,3]
8911 ; AVX2-FAST-PERLANE-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm14 # 32-byte Folded Reload
8912 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm13 = ymm13[0,1,2,3,4,5],ymm14[6,7]
8913 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8914 ; AVX2-FAST-PERLANE-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm13 # 32-byte Folded Reload
8915 ; AVX2-FAST-PERLANE-NEXT: vpermilps $237, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Folded Reload
8916 ; AVX2-FAST-PERLANE-NEXT: # ymm14 = mem[1,3,2,3,5,7,6,7]
8917 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm13 = ymm13[0,1,2],ymm14[3,4,5,6,7]
8918 ; AVX2-FAST-PERLANE-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm14 # 32-byte Folded Reload
8919 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm13 = ymm13[0,1,2,3,4,5],ymm14[6,7]
8920 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8921 ; AVX2-FAST-PERLANE-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm13 # 32-byte Folded Reload
8922 ; AVX2-FAST-PERLANE-NEXT: vpermilps $237, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Folded Reload
8923 ; AVX2-FAST-PERLANE-NEXT: # ymm14 = mem[1,3,2,3,5,7,6,7]
8924 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm13 = ymm13[0,1,2],ymm14[3,4,5,6,7]
8925 ; AVX2-FAST-PERLANE-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm14 # 32-byte Folded Reload
8926 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm13 = ymm13[0,1,2,3,4,5],ymm14[6,7]
8927 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8928 ; AVX2-FAST-PERLANE-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm13 # 32-byte Folded Reload
8929 ; AVX2-FAST-PERLANE-NEXT: vpermilps $237, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Folded Reload
8930 ; AVX2-FAST-PERLANE-NEXT: # ymm14 = mem[1,3,2,3,5,7,6,7]
8931 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm13 = ymm13[0,1,2],ymm14[3,4,5,6,7]
8932 ; AVX2-FAST-PERLANE-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm14 # 32-byte Folded Reload
8933 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm13 = ymm13[0,1,2,3,4,5],ymm14[6,7]
8934 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8935 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm12, %ymm5, %ymm12
8936 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm13 = ymm15[1,3,2,3,5,7,6,7]
8937 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm12 = ymm12[0,1,2],ymm13[3,4,5,6,7]
8938 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm11, %ymm0, %ymm11
8939 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm11 = ymm12[0,1,2,3,4,5],ymm11[6,7]
8940 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8941 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm9, %ymm5, %ymm9
8942 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm10 = ymm10[1,3,2,3,5,7,6,7]
8943 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm9 = ymm9[0,1,2],ymm10[3,4,5,6,7]
8944 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm7, %ymm0, %ymm7
8945 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm7 = ymm9[0,1,2,3,4,5],ymm7[6,7]
8946 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8947 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm4, %ymm5, %ymm4
8948 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm6 = ymm6[1,3,2,3,5,7,6,7]
8949 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2],ymm6[3,4,5,6,7]
8950 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm3, %ymm0, %ymm3
8951 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0,1,2,3,4,5],ymm3[6,7]
8952 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8953 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm2, %ymm5, %ymm2
8954 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm3 = ymm8[1,3,2,3,5,7,6,7]
8955 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2],ymm3[3,4,5,6,7]
8956 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm1, %ymm0, %ymm0
8957 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3,4,5],ymm0[6,7]
8958 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8959 ; AVX2-FAST-PERLANE-NEXT: vmovaps 192(%rdi), %xmm4
8960 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm0 = xmm4[2,3,2,3]
8961 ; AVX2-FAST-PERLANE-NEXT: vpermilps $224, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
8962 ; AVX2-FAST-PERLANE-NEXT: # ymm1 = mem[0,0,2,3,4,4,6,7]
8963 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
8964 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8965 ; AVX2-FAST-PERLANE-NEXT: vblendps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
8966 ; AVX2-FAST-PERLANE-NEXT: # ymm1 = mem[0,1],ymm1[2,3],mem[4,5],ymm1[6,7]
8967 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm1 = ymm1[2,0,2,3,6,4,6,7]
8968 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,2,0,3]
8969 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
8970 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
8971 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
8972 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm1 = ymm3[0,0,0,0,4,4,4,4]
8973 ; AVX2-FAST-PERLANE-NEXT: vpermilps $224, (%rsp), %ymm2 # 32-byte Folded Reload
8974 ; AVX2-FAST-PERLANE-NEXT: # ymm2 = mem[0,0,2,3,4,4,6,7]
8975 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3],ymm2[4,5,6],ymm1[7]
8976 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
8977 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
8978 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8979 ; AVX2-FAST-PERLANE-NEXT: vmovaps 576(%rdi), %xmm0
8980 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8981 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,3,2,3]
8982 ; AVX2-FAST-PERLANE-NEXT: vpermilps $224, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
8983 ; AVX2-FAST-PERLANE-NEXT: # ymm1 = mem[0,0,2,3,4,4,6,7]
8984 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
8985 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8986 ; AVX2-FAST-PERLANE-NEXT: vblendps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
8987 ; AVX2-FAST-PERLANE-NEXT: # ymm1 = mem[0,1],ymm1[2,3],mem[4,5],ymm1[6,7]
8988 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm1 = ymm1[2,0,2,3,6,4,6,7]
8989 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,2,0,3]
8990 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
8991 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
8992 ; AVX2-FAST-PERLANE-NEXT: vpermilps $0, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
8993 ; AVX2-FAST-PERLANE-NEXT: # ymm1 = mem[0,0,0,0,4,4,4,4]
8994 ; AVX2-FAST-PERLANE-NEXT: vpermilps $224, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
8995 ; AVX2-FAST-PERLANE-NEXT: # ymm2 = mem[0,0,2,3,4,4,6,7]
8996 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3],ymm2[4,5,6],ymm1[7]
8997 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
8998 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
8999 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9000 ; AVX2-FAST-PERLANE-NEXT: vmovaps 960(%rdi), %xmm2
9001 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm0 = xmm2[2,3,2,3]
9002 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
9003 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,0,2,3,4,4,6,7]
9004 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
9005 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
9006 ; AVX2-FAST-PERLANE-NEXT: vblendps $204, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
9007 ; AVX2-FAST-PERLANE-NEXT: # ymm1 = ymm1[0,1],mem[2,3],ymm1[4,5],mem[6,7]
9008 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm1 = ymm1[2,0,2,3,6,4,6,7]
9009 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,2,0,3]
9010 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
9011 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
9012 ; AVX2-FAST-PERLANE-NEXT: vpermilps $0, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
9013 ; AVX2-FAST-PERLANE-NEXT: # ymm1 = mem[0,0,0,0,4,4,4,4]
9014 ; AVX2-FAST-PERLANE-NEXT: vpermilps $224, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload
9015 ; AVX2-FAST-PERLANE-NEXT: # ymm5 = mem[0,0,2,3,4,4,6,7]
9016 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm5[0,1,2],ymm1[3],ymm5[4,5,6],ymm1[7]
9017 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
9018 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
9019 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9020 ; AVX2-FAST-PERLANE-NEXT: vmovaps 1344(%rdi), %xmm1
9021 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm0 = xmm1[2,3,2,3]
9022 ; AVX2-FAST-PERLANE-NEXT: vpermilps $224, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload
9023 ; AVX2-FAST-PERLANE-NEXT: # ymm5 = mem[0,0,2,3,4,4,6,7]
9024 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm5[1,2,3],ymm0[4],ymm5[5,6,7]
9025 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
9026 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
9027 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1],ymm13[2,3],ymm5[4,5],ymm13[6,7]
9028 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm5 = ymm5[2,0,2,3,6,4,6,7]
9029 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,2,0,3]
9030 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
9031 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm5[3,4,5,6,7]
9032 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
9033 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm5 = ymm14[0,0,0,0,4,4,4,4]
9034 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
9035 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm6 = ymm15[0,0,2,3,4,4,6,7]
9036 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm5 = ymm6[0,1,2],ymm5[3],ymm6[4,5,6],ymm5[7]
9037 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,1,0,3]
9038 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm5[5,6,7]
9039 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9040 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rdi), %xmm0
9041 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9042 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,3,2,3]
9043 ; AVX2-FAST-PERLANE-NEXT: vpermilps $224, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload
9044 ; AVX2-FAST-PERLANE-NEXT: # ymm5 = mem[0,0,2,3,4,4,6,7]
9045 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm5[1,2,3],ymm0[4],ymm5[5,6,7]
9046 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
9047 ; AVX2-FAST-PERLANE-NEXT: vblendps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm5 # 32-byte Folded Reload
9048 ; AVX2-FAST-PERLANE-NEXT: # ymm5 = mem[0,1],ymm5[2,3],mem[4,5],ymm5[6,7]
9049 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm5 = ymm5[2,0,2,3,6,4,6,7]
9050 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,2,0,3]
9051 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
9052 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm5[3,4,5,6,7]
9053 ; AVX2-FAST-PERLANE-NEXT: vpermilps $0, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload
9054 ; AVX2-FAST-PERLANE-NEXT: # ymm5 = mem[0,0,0,0,4,4,4,4]
9055 ; AVX2-FAST-PERLANE-NEXT: vpermilps $224, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Folded Reload
9056 ; AVX2-FAST-PERLANE-NEXT: # ymm6 = mem[0,0,2,3,4,4,6,7]
9057 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm5 = ymm6[0,1,2],ymm5[3],ymm6[4,5,6],ymm5[7]
9058 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,1,0,3]
9059 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm5[5,6,7]
9060 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9061 ; AVX2-FAST-PERLANE-NEXT: vmovaps 384(%rdi), %xmm0
9062 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9063 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,3,2,3]
9064 ; AVX2-FAST-PERLANE-NEXT: vpermilps $224, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Folded Reload
9065 ; AVX2-FAST-PERLANE-NEXT: # ymm6 = mem[0,0,2,3,4,4,6,7]
9066 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm6[1,2,3],ymm0[4],ymm6[5,6,7]
9067 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
9068 ; AVX2-FAST-PERLANE-NEXT: vblendps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm6 # 32-byte Folded Reload
9069 ; AVX2-FAST-PERLANE-NEXT: # ymm6 = mem[0,1],ymm5[2,3],mem[4,5],ymm5[6,7]
9070 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm6 = ymm6[2,0,2,3,6,4,6,7]
9071 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[0,2,0,3]
9072 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
9073 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm6[3,4,5,6,7]
9074 ; AVX2-FAST-PERLANE-NEXT: vpermilps $0, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Folded Reload
9075 ; AVX2-FAST-PERLANE-NEXT: # ymm6 = mem[0,0,0,0,4,4,4,4]
9076 ; AVX2-FAST-PERLANE-NEXT: vpermilps $224, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Folded Reload
9077 ; AVX2-FAST-PERLANE-NEXT: # ymm7 = mem[0,0,2,3,4,4,6,7]
9078 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm6 = ymm7[0,1,2],ymm6[3],ymm7[4,5,6],ymm6[7]
9079 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[0,1,0,3]
9080 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm6[5,6,7]
9081 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9082 ; AVX2-FAST-PERLANE-NEXT: vmovaps 768(%rdi), %xmm0
9083 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9084 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm6 = xmm0[2,3,2,3]
9085 ; AVX2-FAST-PERLANE-NEXT: vpermilps $224, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Folded Reload
9086 ; AVX2-FAST-PERLANE-NEXT: # ymm7 = mem[0,0,2,3,4,4,6,7]
9087 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0],ymm7[1,2,3],ymm6[4],ymm7[5,6,7]
9088 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9089 ; AVX2-FAST-PERLANE-NEXT: vblendps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm7 # 32-byte Folded Reload
9090 ; AVX2-FAST-PERLANE-NEXT: # ymm7 = mem[0,1],ymm0[2,3],mem[4,5],ymm0[6,7]
9091 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm7 = ymm7[2,0,2,3,6,4,6,7]
9092 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm7 = ymm7[0,2,0,3]
9093 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[0,3,2,3]
9094 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2],ymm7[3,4,5,6,7]
9095 ; AVX2-FAST-PERLANE-NEXT: vpermilps $0, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Folded Reload
9096 ; AVX2-FAST-PERLANE-NEXT: # ymm7 = mem[0,0,0,0,4,4,4,4]
9097 ; AVX2-FAST-PERLANE-NEXT: vpermilps $224, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Folded Reload
9098 ; AVX2-FAST-PERLANE-NEXT: # ymm8 = mem[0,0,2,3,4,4,6,7]
9099 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm7 = ymm8[0,1,2],ymm7[3],ymm8[4,5,6],ymm7[7]
9100 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm7 = ymm7[0,1,0,3]
9101 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm6[0,1,2,3,4],ymm7[5,6,7]
9102 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9103 ; AVX2-FAST-PERLANE-NEXT: vmovaps 1152(%rdi), %xmm12
9104 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm7 = xmm12[2,3,2,3]
9105 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
9106 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm8 = ymm9[0,0,2,3,4,4,6,7]
9107 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0],ymm8[1,2,3],ymm7[4],ymm8[5,6,7]
9108 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9109 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
9110 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm8 = ymm11[0,1],ymm0[2,3],ymm11[4,5],ymm0[6,7]
9111 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm8 = ymm8[2,0,2,3,6,4,6,7]
9112 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm8 = ymm8[0,2,0,3]
9113 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm7 = ymm7[0,3,2,3]
9114 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2],ymm8[3,4,5,6,7]
9115 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
9116 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm8 = ymm6[0,0,0,0,4,4,4,4]
9117 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
9118 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm10 = ymm5[0,0,2,3,4,4,6,7]
9119 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm8 = ymm10[0,1,2],ymm8[3],ymm10[4,5,6],ymm8[7]
9120 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm8 = ymm8[0,1,0,3]
9121 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3,4],ymm8[5,6,7]
9122 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9123 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm4 = xmm4[3,3,3,3]
9124 ; AVX2-FAST-PERLANE-NEXT: vpermilps $244, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Folded Reload
9125 ; AVX2-FAST-PERLANE-NEXT: # ymm7 = mem[0,1,3,3,4,5,7,7]
9126 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0],ymm7[1,2,3],ymm4[4],ymm7[5,6,7]
9127 ; AVX2-FAST-PERLANE-NEXT: vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Folded Reload
9128 ; AVX2-FAST-PERLANE-NEXT: # ymm7 = mem[3,3,3,3,7,7,7,7]
9129 ; AVX2-FAST-PERLANE-NEXT: vblendps $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm7 # 32-byte Folded Reload
9130 ; AVX2-FAST-PERLANE-NEXT: # ymm7 = ymm7[0],mem[1],ymm7[2,3,4],mem[5],ymm7[6,7]
9131 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm7 = ymm7[0,2,0,3]
9132 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,3,2,3]
9133 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2],ymm7[3,4,5,6,7]
9134 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm7 = ymm3[0,1,0,1,4,5,4,5]
9135 ; AVX2-FAST-PERLANE-NEXT: vpermilps $244, (%rsp), %ymm8 # 32-byte Folded Reload
9136 ; AVX2-FAST-PERLANE-NEXT: # ymm8 = mem[0,1,3,3,4,5,7,7]
9137 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm7 = ymm8[0,1,2],ymm7[3],ymm8[4,5,6],ymm7[7]
9138 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm7 = ymm7[0,1,0,3]
9139 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0,1,2,3,4],ymm7[5,6,7]
9140 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9141 ; AVX2-FAST-PERLANE-NEXT: vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
9142 ; AVX2-FAST-PERLANE-NEXT: # xmm3 = mem[3,3,3,3]
9143 ; AVX2-FAST-PERLANE-NEXT: vpermilps $244, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Folded Reload
9144 ; AVX2-FAST-PERLANE-NEXT: # ymm4 = mem[0,1,3,3,4,5,7,7]
9145 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0],ymm4[1,2,3],ymm3[4],ymm4[5,6,7]
9146 ; AVX2-FAST-PERLANE-NEXT: vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Folded Reload
9147 ; AVX2-FAST-PERLANE-NEXT: # ymm4 = mem[3,3,3,3,7,7,7,7]
9148 ; AVX2-FAST-PERLANE-NEXT: vblendps $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm4 # 32-byte Folded Reload
9149 ; AVX2-FAST-PERLANE-NEXT: # ymm4 = ymm4[0],mem[1],ymm4[2,3,4],mem[5],ymm4[6,7]
9150 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,2,0,3]
9151 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[0,3,2,3]
9152 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2],ymm4[3,4,5,6,7]
9153 ; AVX2-FAST-PERLANE-NEXT: vpermilps $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Folded Reload
9154 ; AVX2-FAST-PERLANE-NEXT: # ymm4 = mem[0,1,0,1,4,5,4,5]
9155 ; AVX2-FAST-PERLANE-NEXT: vpermilps $244, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Folded Reload
9156 ; AVX2-FAST-PERLANE-NEXT: # ymm7 = mem[0,1,3,3,4,5,7,7]
9157 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm4 = ymm7[0,1,2],ymm4[3],ymm7[4,5,6],ymm4[7]
9158 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,1,0,3]
9159 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3,4],ymm4[5,6,7]
9160 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9161 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm2 = xmm2[3,3,3,3]
9162 ; AVX2-FAST-PERLANE-NEXT: vpermilps $244, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Folded Reload
9163 ; AVX2-FAST-PERLANE-NEXT: # ymm3 = mem[0,1,3,3,4,5,7,7]
9164 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0],ymm3[1,2,3],ymm2[4],ymm3[5,6,7]
9165 ; AVX2-FAST-PERLANE-NEXT: vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Folded Reload
9166 ; AVX2-FAST-PERLANE-NEXT: # ymm3 = mem[3,3,3,3,7,7,7,7]
9167 ; AVX2-FAST-PERLANE-NEXT: vblendps $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
9168 ; AVX2-FAST-PERLANE-NEXT: # ymm3 = ymm3[0],mem[1],ymm3[2,3,4],mem[5],ymm3[6,7]
9169 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[0,2,0,3]
9170 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,3,2,3]
9171 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2],ymm3[3,4,5,6,7]
9172 ; AVX2-FAST-PERLANE-NEXT: vpermilps $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Folded Reload
9173 ; AVX2-FAST-PERLANE-NEXT: # ymm3 = mem[0,1,0,1,4,5,4,5]
9174 ; AVX2-FAST-PERLANE-NEXT: vpermilps $244, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Folded Reload
9175 ; AVX2-FAST-PERLANE-NEXT: # ymm4 = mem[0,1,3,3,4,5,7,7]
9176 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0,1,2],ymm3[3],ymm4[4,5,6],ymm3[7]
9177 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[0,1,0,3]
9178 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm3[5,6,7]
9179 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9180 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm1 = xmm1[3,3,3,3]
9181 ; AVX2-FAST-PERLANE-NEXT: vpermilps $244, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
9182 ; AVX2-FAST-PERLANE-NEXT: # ymm2 = mem[0,1,3,3,4,5,7,7]
9183 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2,3],ymm1[4],ymm2[5,6,7]
9184 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm2 = ymm13[3,3,3,3,7,7,7,7]
9185 ; AVX2-FAST-PERLANE-NEXT: vblendps $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
9186 ; AVX2-FAST-PERLANE-NEXT: # ymm2 = ymm2[0],mem[1],ymm2[2,3,4],mem[5],ymm2[6,7]
9187 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,2,0,3]
9188 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,3,2,3]
9189 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4,5,6,7]
9190 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm2 = ymm14[0,1,0,1,4,5,4,5]
9191 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm3 = ymm15[0,1,3,3,4,5,7,7]
9192 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1,2],ymm2[3],ymm3[4,5,6],ymm2[7]
9193 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,1,0,3]
9194 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5,6,7]
9195 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9196 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm1 = xmm12[3,3,3,3]
9197 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm2 = ymm9[0,1,3,3,4,5,7,7]
9198 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2,3],ymm1[4],ymm2[5,6,7]
9199 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm2 = ymm0[3,3,3,3,7,7,7,7]
9200 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0],ymm11[1],ymm2[2,3,4],ymm11[5],ymm2[6,7]
9201 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,2,0,3]
9202 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,3,2,3]
9203 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4,5,6,7]
9204 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm2 = ymm6[0,1,0,1,4,5,4,5]
9205 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm3 = ymm5[0,1,3,3,4,5,7,7]
9206 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1,2],ymm2[3],ymm3[4,5,6],ymm2[7]
9207 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,1,0,3]
9208 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm2[5,6,7]
9209 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9210 ; AVX2-FAST-PERLANE-NEXT: vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
9211 ; AVX2-FAST-PERLANE-NEXT: # xmm0 = mem[3,3,3,3]
9212 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
9213 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm1 = ymm10[0,1,3,3,4,5,7,7]
9214 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
9215 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
9216 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm1 = ymm13[3,3,3,3,7,7,7,7]
9217 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
9218 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm11[1],ymm1[2,3,4],ymm11[5],ymm1[6,7]
9219 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,2,0,3]
9220 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
9221 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
9222 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
9223 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm1 = ymm14[0,1,0,1,4,5,4,5]
9224 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
9225 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm2 = ymm12[0,1,3,3,4,5,7,7]
9226 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3],ymm2[4,5,6],ymm1[7]
9227 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
9228 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
9229 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9230 ; AVX2-FAST-PERLANE-NEXT: vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
9231 ; AVX2-FAST-PERLANE-NEXT: # xmm0 = mem[3,3,3,3]
9232 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
9233 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm1 = ymm4[0,1,3,3,4,5,7,7]
9234 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
9235 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
9236 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm1 = ymm7[3,3,3,3,7,7,7,7]
9237 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
9238 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm6[1],ymm1[2,3,4],ymm6[5],ymm1[6,7]
9239 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,2,0,3]
9240 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
9241 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
9242 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
9243 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm1 = ymm9[0,1,0,1,4,5,4,5]
9244 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
9245 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm2 = ymm8[0,1,3,3,4,5,7,7]
9246 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3],ymm2[4,5,6],ymm1[7]
9247 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
9248 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
9249 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9250 ; AVX2-FAST-PERLANE-NEXT: vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
9251 ; AVX2-FAST-PERLANE-NEXT: # xmm0 = mem[3,3,3,3]
9252 ; AVX2-FAST-PERLANE-NEXT: vpermilps $244, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
9253 ; AVX2-FAST-PERLANE-NEXT: # ymm1 = mem[0,1,3,3,4,5,7,7]
9254 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
9255 ; AVX2-FAST-PERLANE-NEXT: vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
9256 ; AVX2-FAST-PERLANE-NEXT: # ymm1 = mem[3,3,3,3,7,7,7,7]
9257 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
9258 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm15[1],ymm1[2,3,4],ymm15[5],ymm1[6,7]
9259 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,2,0,3]
9260 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
9261 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
9262 ; AVX2-FAST-PERLANE-NEXT: vpermilps $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
9263 ; AVX2-FAST-PERLANE-NEXT: # ymm1 = mem[0,1,0,1,4,5,4,5]
9264 ; AVX2-FAST-PERLANE-NEXT: vpermilps $244, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
9265 ; AVX2-FAST-PERLANE-NEXT: # ymm2 = mem[0,1,3,3,4,5,7,7]
9266 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3],ymm2[4,5,6],ymm1[7]
9267 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
9268 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
9269 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9270 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9271 ; AVX2-FAST-PERLANE-NEXT: vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm2 # 32-byte Folded Reload
9272 ; AVX2-FAST-PERLANE-NEXT: # ymm2 = ymm0[0,1,2,3],mem[4,5,6,7]
9273 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9274 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9275 ; AVX2-FAST-PERLANE-NEXT: vblendps $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 32-byte Folded Reload
9276 ; AVX2-FAST-PERLANE-NEXT: # ymm1 = mem[0,1,2,3],ymm0[4,5,6,7]
9277 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9278 ; AVX2-FAST-PERLANE-NEXT: vmovaps 272(%rdi), %xmm0
9279 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9280 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5,6,7]
9281 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,1,0,2,4,5,4,6]
9282 ; AVX2-FAST-PERLANE-NEXT: vmovddup {{.*#+}} xmm1 = [4,2,4,2]
9283 ; AVX2-FAST-PERLANE-NEXT: # xmm1 = mem[0,0]
9284 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm2, %ymm1, %ymm2
9285 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm0[2,3,4,5,6,7]
9286 ; AVX2-FAST-PERLANE-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
9287 ; AVX2-FAST-PERLANE-NEXT: vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
9288 ; AVX2-FAST-PERLANE-NEXT: # ymm0 = mem[0,1],ymm0[2,3],mem[4,5,6,7]
9289 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, (%rsp) # 32-byte Spill
9290 ; AVX2-FAST-PERLANE-NEXT: vbroadcastf128 {{.*#+}} ymm5 = [0,2,0,6,0,2,0,6]
9291 ; AVX2-FAST-PERLANE-NEXT: # ymm5 = mem[0,1,0,1]
9292 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm0, %ymm5, %ymm3
9293 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm3[5,6,7]
9294 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9295 ; AVX2-FAST-PERLANE-NEXT: vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm3 # 32-byte Folded Reload
9296 ; AVX2-FAST-PERLANE-NEXT: # ymm3 = ymm4[0,1,2,3],mem[4,5,6,7]
9297 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9298 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm6[0,1,2,3],ymm7[4,5,6,7]
9299 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9300 ; AVX2-FAST-PERLANE-NEXT: vmovaps 464(%rdi), %xmm0
9301 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9302 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm0[2,3],ymm2[4,5,6,7]
9303 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm2 = ymm2[0,1,0,2,4,5,4,6]
9304 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm3, %ymm1, %ymm3
9305 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3,4,5,6,7]
9306 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm9[0,1],ymm8[2,3],ymm9[4,5,6,7]
9307 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9308 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm0, %ymm5, %ymm3
9309 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm3[5,6,7]
9310 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9311 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
9312 ; AVX2-FAST-PERLANE-NEXT: vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm0 # 32-byte Folded Reload
9313 ; AVX2-FAST-PERLANE-NEXT: # ymm0 = ymm2[0,1,2,3],mem[4,5,6,7]
9314 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9315 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
9316 ; AVX2-FAST-PERLANE-NEXT: vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm3 # 32-byte Folded Reload
9317 ; AVX2-FAST-PERLANE-NEXT: # ymm3 = ymm2[0,1,2,3],mem[4,5,6,7]
9318 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9319 ; AVX2-FAST-PERLANE-NEXT: vmovaps 656(%rdi), %xmm2
9320 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9321 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3],ymm3[4,5,6,7]
9322 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm2 = ymm2[0,1,0,2,4,5,4,6]
9323 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm0, %ymm1, %ymm3
9324 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3,4,5,6,7]
9325 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
9326 ; AVX2-FAST-PERLANE-NEXT: vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm0 # 32-byte Folded Reload
9327 ; AVX2-FAST-PERLANE-NEXT: # ymm0 = mem[0,1],ymm3[2,3],mem[4,5,6,7]
9328 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9329 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm0, %ymm5, %ymm3
9330 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm3[5,6,7]
9331 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9332 ; AVX2-FAST-PERLANE-NEXT: vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm0 # 32-byte Folded Reload
9333 ; AVX2-FAST-PERLANE-NEXT: # ymm0 = ymm10[0,1,2,3],mem[4,5,6,7]
9334 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9335 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm3 = ymm11[0,1,2,3],ymm13[4,5,6,7]
9336 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9337 ; AVX2-FAST-PERLANE-NEXT: vmovaps 848(%rdi), %xmm2
9338 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9339 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3],ymm3[4,5,6,7]
9340 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm2 = ymm2[0,1,0,2,4,5,4,6]
9341 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm0, %ymm1, %ymm3
9342 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3,4,5,6,7]
9343 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm14[0,1],ymm12[2,3],ymm14[4,5,6,7]
9344 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9345 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm0, %ymm5, %ymm3
9346 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm3[5,6,7]
9347 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9348 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
9349 ; AVX2-FAST-PERLANE-NEXT: vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm0 # 32-byte Folded Reload
9350 ; AVX2-FAST-PERLANE-NEXT: # ymm0 = ymm2[0,1,2,3],mem[4,5,6,7]
9351 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9352 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
9353 ; AVX2-FAST-PERLANE-NEXT: vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm3 # 32-byte Folded Reload
9354 ; AVX2-FAST-PERLANE-NEXT: # ymm3 = ymm2[0,1,2,3],mem[4,5,6,7]
9355 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9356 ; AVX2-FAST-PERLANE-NEXT: vmovaps 1040(%rdi), %xmm2
9357 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9358 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3],ymm3[4,5,6,7]
9359 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm2 = ymm2[0,1,0,2,4,5,4,6]
9360 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm0, %ymm1, %ymm3
9361 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3,4,5,6,7]
9362 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
9363 ; AVX2-FAST-PERLANE-NEXT: vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm13 # 32-byte Folded Reload
9364 ; AVX2-FAST-PERLANE-NEXT: # ymm13 = mem[0,1],ymm3[2,3],mem[4,5,6,7]
9365 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm13, %ymm5, %ymm3
9366 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm3[5,6,7]
9367 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9368 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
9369 ; AVX2-FAST-PERLANE-NEXT: vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm0 # 32-byte Folded Reload
9370 ; AVX2-FAST-PERLANE-NEXT: # ymm0 = ymm2[0,1,2,3],mem[4,5,6,7]
9371 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9372 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
9373 ; AVX2-FAST-PERLANE-NEXT: vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm12 # 32-byte Folded Reload
9374 ; AVX2-FAST-PERLANE-NEXT: # ymm12 = ymm2[0,1,2,3],mem[4,5,6,7]
9375 ; AVX2-FAST-PERLANE-NEXT: vmovaps 1232(%rdi), %xmm11
9376 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm12[0,1],ymm11[2,3],ymm12[4,5,6,7]
9377 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm2 = ymm2[0,1,0,2,4,5,4,6]
9378 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm0, %ymm1, %ymm3
9379 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3,4,5,6,7]
9380 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
9381 ; AVX2-FAST-PERLANE-NEXT: vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm9 # 32-byte Folded Reload
9382 ; AVX2-FAST-PERLANE-NEXT: # ymm9 = mem[0,1],ymm3[2,3],mem[4,5,6,7]
9383 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm9, %ymm5, %ymm3
9384 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm3[5,6,7]
9385 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9386 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
9387 ; AVX2-FAST-PERLANE-NEXT: vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm0 # 32-byte Folded Reload
9388 ; AVX2-FAST-PERLANE-NEXT: # ymm0 = ymm2[0,1,2,3],mem[4,5,6,7]
9389 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9390 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
9391 ; AVX2-FAST-PERLANE-NEXT: vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm7 # 32-byte Folded Reload
9392 ; AVX2-FAST-PERLANE-NEXT: # ymm7 = ymm2[0,1,2,3],mem[4,5,6,7]
9393 ; AVX2-FAST-PERLANE-NEXT: vmovaps 1424(%rdi), %xmm8
9394 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm7[0,1],ymm8[2,3],ymm7[4,5,6,7]
9395 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm2 = ymm2[0,1,0,2,4,5,4,6]
9396 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm0, %ymm1, %ymm3
9397 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3,4,5,6,7]
9398 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
9399 ; AVX2-FAST-PERLANE-NEXT: vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm6 # 32-byte Folded Reload
9400 ; AVX2-FAST-PERLANE-NEXT: # ymm6 = mem[0,1],ymm3[2,3],mem[4,5,6,7]
9401 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm6, %ymm5, %ymm3
9402 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm3[5,6,7]
9403 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9404 ; AVX2-FAST-PERLANE-NEXT: vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm10 # 32-byte Folded Reload
9405 ; AVX2-FAST-PERLANE-NEXT: # ymm10 = ymm15[0,1,2,3],mem[4,5,6,7]
9406 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
9407 ; AVX2-FAST-PERLANE-NEXT: vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm4 # 32-byte Folded Reload
9408 ; AVX2-FAST-PERLANE-NEXT: # ymm4 = ymm2[0,1,2,3],mem[4,5,6,7]
9409 ; AVX2-FAST-PERLANE-NEXT: vmovaps 80(%rdi), %xmm3
9410 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm10[0,1],ymm3[2,3],ymm10[4,5,6,7]
9411 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm2 = ymm2[0,1,0,2,4,5,4,6]
9412 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm4, %ymm1, %ymm1
9413 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3,4,5,6,7]
9414 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
9415 ; AVX2-FAST-PERLANE-NEXT: vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
9416 ; AVX2-FAST-PERLANE-NEXT: # ymm2 = mem[0,1],ymm2[2,3],mem[4,5,6,7]
9417 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm2, %ymm5, %ymm0
9418 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5,6,7]
9419 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9420 ; AVX2-FAST-PERLANE-NEXT: vpermilps $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
9421 ; AVX2-FAST-PERLANE-NEXT: # ymm0 = mem[1,1,1,1,5,5,5,5]
9422 ; AVX2-FAST-PERLANE-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
9423 ; AVX2-FAST-PERLANE-NEXT: # ymm0 = ymm0[0,1,2],mem[3],ymm0[4,5,6,7]
9424 ; AVX2-FAST-PERLANE-NEXT: vmovddup {{.*#+}} xmm1 = [5,3,5,3]
9425 ; AVX2-FAST-PERLANE-NEXT: # xmm1 = mem[0,0]
9426 ; AVX2-FAST-PERLANE-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm14 # 32-byte Folded Reload
9427 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm14 = ymm14[0,1],ymm0[2,3,4,5,6,7]
9428 ; AVX2-FAST-PERLANE-NEXT: vbroadcastf128 {{.*#+}} ymm0 = [0,3,1,7,0,3,1,7]
9429 ; AVX2-FAST-PERLANE-NEXT: # ymm0 = mem[0,1,0,1]
9430 ; AVX2-FAST-PERLANE-NEXT: vpermps (%rsp), %ymm0, %ymm15 # 32-byte Folded Reload
9431 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm5 = ymm14[0,1,2,3,4],ymm15[5,6,7]
9432 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9433 ; AVX2-FAST-PERLANE-NEXT: vpermilps $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Folded Reload
9434 ; AVX2-FAST-PERLANE-NEXT: # ymm14 = mem[1,1,1,1,5,5,5,5]
9435 ; AVX2-FAST-PERLANE-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
9436 ; AVX2-FAST-PERLANE-NEXT: # ymm14 = ymm14[0,1,2],mem[3],ymm14[4,5,6,7]
9437 ; AVX2-FAST-PERLANE-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm15 # 32-byte Folded Reload
9438 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm14 = ymm15[0,1],ymm14[2,3,4,5,6,7]
9439 ; AVX2-FAST-PERLANE-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm15 # 32-byte Folded Reload
9440 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm5 = ymm14[0,1,2,3,4],ymm15[5,6,7]
9441 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9442 ; AVX2-FAST-PERLANE-NEXT: vpermilps $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Folded Reload
9443 ; AVX2-FAST-PERLANE-NEXT: # ymm14 = mem[1,1,1,1,5,5,5,5]
9444 ; AVX2-FAST-PERLANE-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
9445 ; AVX2-FAST-PERLANE-NEXT: # ymm14 = ymm14[0,1,2],mem[3],ymm14[4,5,6,7]
9446 ; AVX2-FAST-PERLANE-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm15 # 32-byte Folded Reload
9447 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm14 = ymm15[0,1],ymm14[2,3,4,5,6,7]
9448 ; AVX2-FAST-PERLANE-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm15 # 32-byte Folded Reload
9449 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm5 = ymm14[0,1,2,3,4],ymm15[5,6,7]
9450 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9451 ; AVX2-FAST-PERLANE-NEXT: vpermilps $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Folded Reload
9452 ; AVX2-FAST-PERLANE-NEXT: # ymm14 = mem[1,1,1,1,5,5,5,5]
9453 ; AVX2-FAST-PERLANE-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
9454 ; AVX2-FAST-PERLANE-NEXT: # ymm14 = ymm14[0,1,2],mem[3],ymm14[4,5,6,7]
9455 ; AVX2-FAST-PERLANE-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm15 # 32-byte Folded Reload
9456 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm14 = ymm15[0,1],ymm14[2,3,4,5,6,7]
9457 ; AVX2-FAST-PERLANE-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm15 # 32-byte Folded Reload
9458 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm5 = ymm14[0,1,2,3,4],ymm15[5,6,7]
9459 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9460 ; AVX2-FAST-PERLANE-NEXT: vpermilps $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Folded Reload
9461 ; AVX2-FAST-PERLANE-NEXT: # ymm14 = mem[1,1,1,1,5,5,5,5]
9462 ; AVX2-FAST-PERLANE-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
9463 ; AVX2-FAST-PERLANE-NEXT: # ymm14 = ymm14[0,1,2],mem[3],ymm14[4,5,6,7]
9464 ; AVX2-FAST-PERLANE-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm15 # 32-byte Folded Reload
9465 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm14 = ymm15[0,1],ymm14[2,3,4,5,6,7]
9466 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm13, %ymm0, %ymm13
9467 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm13 = ymm14[0,1,2,3,4],ymm13[5,6,7]
9468 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm12 = ymm12[1,1,1,1,5,5,5,5]
9469 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm11 = ymm12[0,1,2],ymm11[3],ymm12[4,5,6,7]
9470 ; AVX2-FAST-PERLANE-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm12 # 32-byte Folded Reload
9471 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm11 = ymm12[0,1],ymm11[2,3,4,5,6,7]
9472 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm9, %ymm0, %ymm9
9473 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm9 = ymm11[0,1,2,3,4],ymm9[5,6,7]
9474 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm7 = ymm7[1,1,1,1,5,5,5,5]
9475 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2],ymm8[3],ymm7[4,5,6,7]
9476 ; AVX2-FAST-PERLANE-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm8 # 32-byte Folded Reload
9477 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm7 = ymm8[0,1],ymm7[2,3,4,5,6,7]
9478 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm6, %ymm0, %ymm6
9479 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm6 = ymm7[0,1,2,3,4],ymm6[5,6,7]
9480 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm5 = ymm10[1,1,1,1,5,5,5,5]
9481 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm3 = ymm5[0,1,2],ymm3[3],ymm5[4,5,6,7]
9482 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm4, %ymm1, %ymm1
9483 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm3[2,3,4,5,6,7]
9484 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm2, %ymm0, %ymm0
9485 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5,6,7]
9486 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
9487 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm1, 192(%rsi)
9488 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
9489 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm1, 128(%rsi)
9490 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
9491 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm1, 64(%rsi)
9492 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
9493 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm1, (%rsi)
9494 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
9495 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm1, 224(%rsi)
9496 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
9497 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm1, 160(%rsi)
9498 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
9499 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm1, 96(%rsi)
9500 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
9501 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm1, 32(%rsi)
9502 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
9503 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm1, 192(%rdx)
9504 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
9505 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm1, 128(%rdx)
9506 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
9507 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm1, 64(%rdx)
9508 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
9509 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm1, (%rdx)
9510 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
9511 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm1, 224(%rdx)
9512 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
9513 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm1, 160(%rdx)
9514 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
9515 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm1, 96(%rdx)
9516 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
9517 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm1, 32(%rdx)
9518 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
9519 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm1, 192(%rcx)
9520 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
9521 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm1, 128(%rcx)
9522 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
9523 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm1, 64(%rcx)
9524 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
9525 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm1, (%rcx)
9526 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
9527 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm1, 224(%rcx)
9528 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
9529 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm1, 160(%rcx)
9530 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
9531 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm1, 96(%rcx)
9532 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
9533 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm1, 32(%rcx)
9534 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
9535 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm1, (%r8)
9536 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
9537 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm1, 64(%r8)
9538 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
9539 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm1, 128(%r8)
9540 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
9541 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm1, 192(%r8)
9542 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
9543 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm1, 224(%r8)
9544 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
9545 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm1, 160(%r8)
9546 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
9547 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm1, 96(%r8)
9548 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
9549 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm1, 32(%r8)
9550 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
9551 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm1, 224(%r9)
9552 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
9553 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm1, 192(%r9)
9554 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
9555 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm1, 160(%r9)
9556 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
9557 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm1, 128(%r9)
9558 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
9559 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm1, 96(%r9)
9560 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
9561 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm1, 64(%r9)
9562 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
9563 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm1, 32(%r9)
9564 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
9565 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm1, (%r9)
9566 ; AVX2-FAST-PERLANE-NEXT: movq {{[0-9]+}}(%rsp), %rax
9567 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm6, 224(%rax)
9568 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm9, 192(%rax)
9569 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm13, 160(%rax)
9570 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
9571 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm1, 128(%rax)
9572 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
9573 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm1, 96(%rax)
9574 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
9575 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm1, 64(%rax)
9576 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
9577 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm1, 32(%rax)
9578 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, (%rax)
9579 ; AVX2-FAST-PERLANE-NEXT: addq $2504, %rsp # imm = 0x9C8
9580 ; AVX2-FAST-PERLANE-NEXT: vzeroupper
9581 ; AVX2-FAST-PERLANE-NEXT: retq
9583 ; AVX512F-LABEL: load_i32_stride6_vf64:
9585 ; AVX512F-NEXT: subq $2632, %rsp # imm = 0xA48
9586 ; AVX512F-NEXT: vmovdqa64 1472(%rdi), %zmm21
9587 ; AVX512F-NEXT: vmovdqa64 1408(%rdi), %zmm1
9588 ; AVX512F-NEXT: vmovdqa64 1088(%rdi), %zmm20
9589 ; AVX512F-NEXT: vmovdqa64 1024(%rdi), %zmm0
9590 ; AVX512F-NEXT: vmovdqa64 512(%rdi), %zmm2
9591 ; AVX512F-NEXT: vmovdqa64 576(%rdi), %zmm18
9592 ; AVX512F-NEXT: vmovdqa64 704(%rdi), %zmm27
9593 ; AVX512F-NEXT: vmovdqa64 640(%rdi), %zmm3
9594 ; AVX512F-NEXT: vmovdqa64 320(%rdi), %zmm25
9595 ; AVX512F-NEXT: vmovdqa64 256(%rdi), %zmm4
9596 ; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm5 = [0,6,12,0,0,0,20,26,0,6,12,0,0,0,20,26]
9597 ; AVX512F-NEXT: # zmm5 = mem[0,1,2,3,0,1,2,3]
9598 ; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm6 = [0,0,0,2,8,14,20,26,0,0,0,2,8,14,20,26]
9599 ; AVX512F-NEXT: # zmm6 = mem[0,1,2,3,0,1,2,3]
9600 ; AVX512F-NEXT: vmovdqa64 %zmm4, %zmm7
9601 ; AVX512F-NEXT: vpermt2d %zmm25, %zmm6, %zmm7
9602 ; AVX512F-NEXT: vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
9603 ; AVX512F-NEXT: vmovdqa64 %zmm3, %zmm7
9604 ; AVX512F-NEXT: vpermt2d %zmm27, %zmm6, %zmm7
9605 ; AVX512F-NEXT: vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
9606 ; AVX512F-NEXT: vmovdqa64 %zmm18, %zmm7
9607 ; AVX512F-NEXT: vpermt2d %zmm2, %zmm5, %zmm7
9608 ; AVX512F-NEXT: vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
9609 ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm7
9610 ; AVX512F-NEXT: vpermt2d %zmm20, %zmm6, %zmm7
9611 ; AVX512F-NEXT: vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
9612 ; AVX512F-NEXT: vpermi2d %zmm21, %zmm1, %zmm6
9613 ; AVX512F-NEXT: vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
9614 ; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm7 = [0,0,0,3,9,15,21,27,0,0,0,3,9,15,21,27]
9615 ; AVX512F-NEXT: # zmm7 = mem[0,1,2,3,0,1,2,3]
9616 ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm8
9617 ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm6
9618 ; AVX512F-NEXT: vpermt2d %zmm20, %zmm7, %zmm8
9619 ; AVX512F-NEXT: vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
9620 ; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm8 = [1,7,13,0,0,0,21,27,1,7,13,0,0,0,21,27]
9621 ; AVX512F-NEXT: # zmm8 = mem[0,1,2,3,0,1,2,3]
9622 ; AVX512F-NEXT: vmovdqa64 %zmm3, %zmm0
9623 ; AVX512F-NEXT: vpermt2d %zmm27, %zmm7, %zmm0
9624 ; AVX512F-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
9625 ; AVX512F-NEXT: vmovdqa64 %zmm4, %zmm0
9626 ; AVX512F-NEXT: vpermt2d %zmm25, %zmm7, %zmm0
9627 ; AVX512F-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
9628 ; AVX512F-NEXT: vpermi2d %zmm21, %zmm1, %zmm7
9629 ; AVX512F-NEXT: vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
9630 ; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm0 = [0,0,0,20,26,0,6,12,0,0,0,20,26,0,6,12]
9631 ; AVX512F-NEXT: # zmm0 = mem[0,1,2,3,0,1,2,3]
9632 ; AVX512F-NEXT: vmovdqa64 %zmm20, %zmm7
9633 ; AVX512F-NEXT: vpermt2d %zmm6, %zmm0, %zmm7
9634 ; AVX512F-NEXT: vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
9635 ; AVX512F-NEXT: vmovdqa64 %zmm27, %zmm7
9636 ; AVX512F-NEXT: vpermt2d %zmm3, %zmm0, %zmm7
9637 ; AVX512F-NEXT: vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
9638 ; AVX512F-NEXT: vmovdqa64 %zmm25, %zmm7
9639 ; AVX512F-NEXT: vpermt2d %zmm4, %zmm0, %zmm7
9640 ; AVX512F-NEXT: vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
9641 ; AVX512F-NEXT: vpermi2d %zmm1, %zmm21, %zmm0
9642 ; AVX512F-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
9643 ; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm0 = [0,0,0,21,27,1,7,13,0,0,0,21,27,1,7,13]
9644 ; AVX512F-NEXT: # zmm0 = mem[0,1,2,3,0,1,2,3]
9645 ; AVX512F-NEXT: vmovdqa64 %zmm20, %zmm7
9646 ; AVX512F-NEXT: vpermt2d %zmm6, %zmm0, %zmm7
9647 ; AVX512F-NEXT: vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
9648 ; AVX512F-NEXT: vmovdqa64 %zmm27, %zmm7
9649 ; AVX512F-NEXT: vpermt2d %zmm3, %zmm0, %zmm7
9650 ; AVX512F-NEXT: vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
9651 ; AVX512F-NEXT: vmovdqa64 %zmm25, %zmm7
9652 ; AVX512F-NEXT: vpermt2d %zmm4, %zmm0, %zmm7
9653 ; AVX512F-NEXT: vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
9654 ; AVX512F-NEXT: vpermi2d %zmm1, %zmm21, %zmm0
9655 ; AVX512F-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
9656 ; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm7 = [0,0,0,6,12,18,24,30,0,0,0,6,12,18,24,30]
9657 ; AVX512F-NEXT: # zmm7 = mem[0,1,2,3,0,1,2,3]
9658 ; AVX512F-NEXT: vmovdqa64 %zmm3, %zmm0
9659 ; AVX512F-NEXT: vpermt2d %zmm27, %zmm7, %zmm0
9660 ; AVX512F-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
9661 ; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm0 = [0,0,1,7,13,19,25,31,0,0,1,7,13,19,25,31]
9662 ; AVX512F-NEXT: # zmm0 = mem[0,1,2,3,0,1,2,3]
9663 ; AVX512F-NEXT: vpermt2d %zmm27, %zmm0, %zmm3
9664 ; AVX512F-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
9665 ; AVX512F-NEXT: vmovdqa64 %zmm4, %zmm3
9666 ; AVX512F-NEXT: vpermt2d %zmm25, %zmm7, %zmm3
9667 ; AVX512F-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
9668 ; AVX512F-NEXT: vpermt2d %zmm25, %zmm0, %zmm4
9669 ; AVX512F-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
9670 ; AVX512F-NEXT: vmovdqa64 %zmm6, %zmm3
9671 ; AVX512F-NEXT: vpermt2d %zmm20, %zmm7, %zmm3
9672 ; AVX512F-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
9673 ; AVX512F-NEXT: vpermi2d %zmm21, %zmm1, %zmm7
9674 ; AVX512F-NEXT: vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
9675 ; AVX512F-NEXT: vpermt2d %zmm21, %zmm0, %zmm1
9676 ; AVX512F-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
9677 ; AVX512F-NEXT: vmovdqa64 %zmm18, %zmm1
9678 ; AVX512F-NEXT: vpermt2d %zmm2, %zmm8, %zmm1
9679 ; AVX512F-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
9680 ; AVX512F-NEXT: vpermt2d %zmm20, %zmm0, %zmm6
9681 ; AVX512F-NEXT: vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
9682 ; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm31 = [18,24,30,0,0,0,6,12,18,24,30,0,0,0,6,12]
9683 ; AVX512F-NEXT: # zmm31 = mem[0,1,2,3,0,1,2,3]
9684 ; AVX512F-NEXT: vmovdqa64 %zmm2, %zmm0
9685 ; AVX512F-NEXT: vpermt2d %zmm18, %zmm31, %zmm0
9686 ; AVX512F-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
9687 ; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm25 = [19,25,31,0,0,1,7,13,19,25,31,0,0,1,7,13]
9688 ; AVX512F-NEXT: # zmm25 = mem[0,1,2,3,0,1,2,3]
9689 ; AVX512F-NEXT: vmovdqa64 %zmm2, %zmm0
9690 ; AVX512F-NEXT: vpermt2d %zmm18, %zmm25, %zmm0
9691 ; AVX512F-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
9692 ; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm3 = [20,26,0,0,0,2,8,14,20,26,0,0,0,2,8,14]
9693 ; AVX512F-NEXT: # zmm3 = mem[0,1,2,3,0,1,2,3]
9694 ; AVX512F-NEXT: vmovdqa64 %zmm2, %zmm0
9695 ; AVX512F-NEXT: vpermt2d %zmm18, %zmm3, %zmm0
9696 ; AVX512F-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
9697 ; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm0 = [21,27,0,0,0,3,9,15,21,27,0,0,0,3,9,15]
9698 ; AVX512F-NEXT: # zmm0 = mem[0,1,2,3,0,1,2,3]
9699 ; AVX512F-NEXT: vpermt2d %zmm18, %zmm0, %zmm2
9700 ; AVX512F-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
9701 ; AVX512F-NEXT: vmovdqa64 128(%rdi), %zmm26
9702 ; AVX512F-NEXT: vmovdqa64 192(%rdi), %zmm1
9703 ; AVX512F-NEXT: vmovdqa64 %zmm1, %zmm2
9704 ; AVX512F-NEXT: vpermt2d %zmm26, %zmm5, %zmm2
9705 ; AVX512F-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
9706 ; AVX512F-NEXT: vmovdqa64 %zmm1, %zmm2
9707 ; AVX512F-NEXT: vpermt2d %zmm26, %zmm8, %zmm2
9708 ; AVX512F-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
9709 ; AVX512F-NEXT: vmovdqa64 %zmm26, %zmm2
9710 ; AVX512F-NEXT: vpermt2d %zmm1, %zmm31, %zmm2
9711 ; AVX512F-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
9712 ; AVX512F-NEXT: vmovdqa64 %zmm26, %zmm2
9713 ; AVX512F-NEXT: vpermt2d %zmm1, %zmm25, %zmm2
9714 ; AVX512F-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
9715 ; AVX512F-NEXT: vmovdqa64 %zmm26, %zmm2
9716 ; AVX512F-NEXT: vpermt2d %zmm1, %zmm3, %zmm2
9717 ; AVX512F-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
9718 ; AVX512F-NEXT: vpermt2d %zmm1, %zmm0, %zmm26
9719 ; AVX512F-NEXT: vmovdqa64 896(%rdi), %zmm22
9720 ; AVX512F-NEXT: vmovdqa64 960(%rdi), %zmm1
9721 ; AVX512F-NEXT: vmovdqa64 %zmm1, %zmm2
9722 ; AVX512F-NEXT: vpermt2d %zmm22, %zmm5, %zmm2
9723 ; AVX512F-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
9724 ; AVX512F-NEXT: vmovdqa64 1280(%rdi), %zmm19
9725 ; AVX512F-NEXT: vmovdqa64 1344(%rdi), %zmm2
9726 ; AVX512F-NEXT: vpermi2d %zmm19, %zmm2, %zmm5
9727 ; AVX512F-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
9728 ; AVX512F-NEXT: vmovdqa64 %zmm1, %zmm4
9729 ; AVX512F-NEXT: vpermt2d %zmm22, %zmm8, %zmm4
9730 ; AVX512F-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
9731 ; AVX512F-NEXT: vpermi2d %zmm19, %zmm2, %zmm8
9732 ; AVX512F-NEXT: vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
9733 ; AVX512F-NEXT: vmovdqa64 %zmm22, %zmm28
9734 ; AVX512F-NEXT: vpermt2d %zmm1, %zmm31, %zmm28
9735 ; AVX512F-NEXT: vpermi2d %zmm2, %zmm19, %zmm31
9736 ; AVX512F-NEXT: vmovdqa64 %zmm22, %zmm29
9737 ; AVX512F-NEXT: vpermt2d %zmm1, %zmm25, %zmm29
9738 ; AVX512F-NEXT: vpermi2d %zmm2, %zmm19, %zmm25
9739 ; AVX512F-NEXT: vmovdqa64 %zmm22, %zmm4
9740 ; AVX512F-NEXT: vpermt2d %zmm1, %zmm3, %zmm4
9741 ; AVX512F-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
9742 ; AVX512F-NEXT: vpermi2d %zmm2, %zmm19, %zmm3
9743 ; AVX512F-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
9744 ; AVX512F-NEXT: vpermt2d %zmm2, %zmm0, %zmm19
9745 ; AVX512F-NEXT: vpermt2d %zmm1, %zmm0, %zmm22
9746 ; AVX512F-NEXT: vmovdqa64 448(%rdi), %zmm2
9747 ; AVX512F-NEXT: vmovdqa64 384(%rdi), %zmm0
9748 ; AVX512F-NEXT: vmovdqa {{.*#+}} ymm3 = <0,6,12,18,24,30,u,u>
9749 ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm20
9750 ; AVX512F-NEXT: vpermt2d %zmm2, %zmm3, %zmm20
9751 ; AVX512F-NEXT: vmovdqa {{.*#+}} ymm4 = <1,7,13,19,25,31,u,u>
9752 ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm21
9753 ; AVX512F-NEXT: vpermt2d %zmm2, %zmm4, %zmm21
9754 ; AVX512F-NEXT: vmovdqa {{.*#+}} ymm12 = <2,8,14,20,26,u,u,u>
9755 ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm1
9756 ; AVX512F-NEXT: vpermt2d %zmm2, %zmm12, %zmm1
9757 ; AVX512F-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
9758 ; AVX512F-NEXT: vmovdqa {{.*#+}} ymm14 = <3,9,15,21,27,u,u,u>
9759 ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm1
9760 ; AVX512F-NEXT: vpermt2d %zmm2, %zmm14, %zmm1
9761 ; AVX512F-NEXT: vmovdqu64 %zmm1, (%rsp) # 64-byte Spill
9762 ; AVX512F-NEXT: vmovdqa {{.*#+}} ymm5 = <20,26,0,6,12,u,u,u>
9763 ; AVX512F-NEXT: vmovdqa64 %zmm2, %zmm23
9764 ; AVX512F-NEXT: vpermt2d %zmm0, %zmm5, %zmm23
9765 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} ymm24 = <21,27,1,7,13,u,u,u>
9766 ; AVX512F-NEXT: vpermt2d %zmm0, %zmm24, %zmm2
9767 ; AVX512F-NEXT: vmovdqa64 (%rdi), %zmm0
9768 ; AVX512F-NEXT: vmovdqa64 64(%rdi), %zmm6
9769 ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm15
9770 ; AVX512F-NEXT: vpermt2d %zmm6, %zmm3, %zmm15
9771 ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm16
9772 ; AVX512F-NEXT: vpermt2d %zmm6, %zmm4, %zmm16
9773 ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm27
9774 ; AVX512F-NEXT: vpermt2d %zmm6, %zmm12, %zmm27
9775 ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm30
9776 ; AVX512F-NEXT: vpermt2d %zmm6, %zmm14, %zmm30
9777 ; AVX512F-NEXT: vmovdqa64 %zmm6, %zmm17
9778 ; AVX512F-NEXT: vpermt2d %zmm0, %zmm5, %zmm17
9779 ; AVX512F-NEXT: vpermt2d %zmm0, %zmm24, %zmm6
9780 ; AVX512F-NEXT: vmovdqa64 832(%rdi), %zmm10
9781 ; AVX512F-NEXT: vmovdqa64 768(%rdi), %zmm7
9782 ; AVX512F-NEXT: vmovdqa64 %zmm7, %zmm8
9783 ; AVX512F-NEXT: vpermt2d %zmm10, %zmm3, %zmm8
9784 ; AVX512F-NEXT: vmovdqa64 1216(%rdi), %zmm1
9785 ; AVX512F-NEXT: vmovdqa64 1152(%rdi), %zmm0
9786 ; AVX512F-NEXT: vpermi2d %zmm1, %zmm0, %zmm3
9787 ; AVX512F-NEXT: vmovdqa64 %zmm7, %zmm9
9788 ; AVX512F-NEXT: vpermt2d %zmm10, %zmm4, %zmm9
9789 ; AVX512F-NEXT: vpermi2d %zmm1, %zmm0, %zmm4
9790 ; AVX512F-NEXT: vmovdqa64 %zmm7, %zmm13
9791 ; AVX512F-NEXT: vpermt2d %zmm10, %zmm12, %zmm13
9792 ; AVX512F-NEXT: vpermi2d %zmm1, %zmm0, %zmm12
9793 ; AVX512F-NEXT: vmovdqa64 %zmm7, %zmm18
9794 ; AVX512F-NEXT: vpermt2d %zmm10, %zmm14, %zmm18
9795 ; AVX512F-NEXT: vpermi2d %zmm1, %zmm0, %zmm14
9796 ; AVX512F-NEXT: vmovdqa64 %zmm10, %zmm11
9797 ; AVX512F-NEXT: vpermt2d %zmm7, %zmm5, %zmm11
9798 ; AVX512F-NEXT: vpermi2d %zmm0, %zmm1, %zmm5
9799 ; AVX512F-NEXT: vpermt2d %zmm0, %zmm24, %zmm1
9800 ; AVX512F-NEXT: vpermt2d %zmm7, %zmm24, %zmm10
9801 ; AVX512F-NEXT: movb $56, %al
9802 ; AVX512F-NEXT: kmovw %eax, %k2
9803 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
9804 ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm15 {%k2}
9805 ; AVX512F-NEXT: movw $-2048, %ax # imm = 0xF800
9806 ; AVX512F-NEXT: kmovw %eax, %k1
9807 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
9808 ; AVX512F-NEXT: vmovdqa32 %zmm0, %zmm15 {%k1}
9809 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
9810 ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm20 {%k2}
9811 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
9812 ; AVX512F-NEXT: vmovdqa32 %zmm0, %zmm20 {%k1}
9813 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
9814 ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm8 {%k2}
9815 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
9816 ; AVX512F-NEXT: vmovdqa32 %zmm0, %zmm8 {%k1}
9817 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
9818 ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm3 {%k2}
9819 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
9820 ; AVX512F-NEXT: vmovdqa32 %zmm0, %zmm3 {%k1}
9821 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
9822 ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm9 {%k2}
9823 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
9824 ; AVX512F-NEXT: vmovdqa32 %zmm0, %zmm9 {%k1}
9825 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
9826 ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm21 {%k2}
9827 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
9828 ; AVX512F-NEXT: vmovdqa32 %zmm0, %zmm21 {%k1}
9829 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
9830 ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm16 {%k2}
9831 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
9832 ; AVX512F-NEXT: vmovdqa32 %zmm0, %zmm16 {%k1}
9833 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
9834 ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm4 {%k2}
9835 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
9836 ; AVX512F-NEXT: vmovdqa32 %zmm0, %zmm4 {%k1}
9837 ; AVX512F-NEXT: movw $31, %ax
9838 ; AVX512F-NEXT: kmovw %eax, %k2
9839 ; AVX512F-NEXT: vmovdqa32 %zmm13, %zmm28 {%k2}
9840 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
9841 ; AVX512F-NEXT: vmovdqa32 %zmm0, %zmm28 {%k1}
9842 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
9843 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
9844 ; AVX512F-NEXT: vmovdqa32 %zmm0, %zmm7 {%k2}
9845 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
9846 ; AVX512F-NEXT: vmovdqa32 %zmm0, %zmm7 {%k1}
9847 ; AVX512F-NEXT: vmovdqa64 %zmm7, %zmm13
9848 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
9849 ; AVX512F-NEXT: vmovdqa32 %zmm27, %zmm7 {%k2}
9850 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
9851 ; AVX512F-NEXT: vmovdqa32 %zmm0, %zmm7 {%k1}
9852 ; AVX512F-NEXT: vmovdqa64 %zmm7, %zmm24
9853 ; AVX512F-NEXT: vmovdqa32 %zmm12, %zmm31 {%k2}
9854 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
9855 ; AVX512F-NEXT: vmovdqa32 %zmm0, %zmm31 {%k1}
9856 ; AVX512F-NEXT: vmovdqa32 %zmm18, %zmm29 {%k2}
9857 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
9858 ; AVX512F-NEXT: vmovdqa32 %zmm0, %zmm29 {%k1}
9859 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
9860 ; AVX512F-NEXT: vmovdqu64 (%rsp), %zmm0 # 64-byte Reload
9861 ; AVX512F-NEXT: vmovdqa32 %zmm0, %zmm7 {%k2}
9862 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
9863 ; AVX512F-NEXT: vmovdqa32 %zmm0, %zmm7 {%k1}
9864 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
9865 ; AVX512F-NEXT: vmovdqa32 %zmm30, %zmm12 {%k2}
9866 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
9867 ; AVX512F-NEXT: vmovdqa32 %zmm0, %zmm12 {%k1}
9868 ; AVX512F-NEXT: vmovdqa32 %zmm14, %zmm25 {%k2}
9869 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
9870 ; AVX512F-NEXT: vmovdqa32 %zmm0, %zmm25 {%k1}
9871 ; AVX512F-NEXT: movw $992, %ax # imm = 0x3E0
9872 ; AVX512F-NEXT: kmovw %eax, %k1
9873 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
9874 ; AVX512F-NEXT: vmovdqa32 %zmm0, %zmm11 {%k1}
9875 ; AVX512F-NEXT: movb $-32, %al
9876 ; AVX512F-NEXT: kmovw %eax, %k2
9877 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
9878 ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm11 {%k2}
9879 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
9880 ; AVX512F-NEXT: vmovdqa32 %zmm0, %zmm23 {%k1}
9881 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
9882 ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm23 {%k2}
9883 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
9884 ; AVX512F-NEXT: vmovdqa32 %zmm0, %zmm17 {%k1}
9885 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
9886 ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm17 {%k2}
9887 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
9888 ; AVX512F-NEXT: vmovdqa32 %zmm0, %zmm5 {%k1}
9889 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
9890 ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm5 {%k2}
9891 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
9892 ; AVX512F-NEXT: vmovdqa32 %zmm0, %zmm2 {%k1}
9893 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
9894 ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm2 {%k2}
9895 ; AVX512F-NEXT: vmovdqa32 %zmm26, %zmm6 {%k1}
9896 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
9897 ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm6 {%k2}
9898 ; AVX512F-NEXT: vmovdqa32 %zmm19, %zmm1 {%k1}
9899 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
9900 ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm1 {%k2}
9901 ; AVX512F-NEXT: vmovdqa32 %zmm22, %zmm10 {%k1}
9902 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
9903 ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm10 {%k2}
9904 ; AVX512F-NEXT: vmovdqa64 %zmm3, 192(%rsi)
9905 ; AVX512F-NEXT: vmovdqa64 %zmm8, 128(%rsi)
9906 ; AVX512F-NEXT: vmovdqa64 %zmm20, 64(%rsi)
9907 ; AVX512F-NEXT: vmovdqa64 %zmm15, (%rsi)
9908 ; AVX512F-NEXT: vmovdqa64 %zmm4, 192(%rdx)
9909 ; AVX512F-NEXT: vmovdqa64 %zmm16, (%rdx)
9910 ; AVX512F-NEXT: vmovdqa64 %zmm21, 64(%rdx)
9911 ; AVX512F-NEXT: vmovdqa64 %zmm9, 128(%rdx)
9912 ; AVX512F-NEXT: vmovdqa64 %zmm31, 192(%rcx)
9913 ; AVX512F-NEXT: vmovdqa64 %zmm24, (%rcx)
9914 ; AVX512F-NEXT: vmovdqa64 %zmm13, 64(%rcx)
9915 ; AVX512F-NEXT: vmovdqa64 %zmm28, 128(%rcx)
9916 ; AVX512F-NEXT: vmovdqa64 %zmm25, 192(%r8)
9917 ; AVX512F-NEXT: vmovdqa64 %zmm12, (%r8)
9918 ; AVX512F-NEXT: vmovdqa64 %zmm7, 64(%r8)
9919 ; AVX512F-NEXT: vmovdqa64 %zmm29, 128(%r8)
9920 ; AVX512F-NEXT: vmovdqa64 %zmm5, 192(%r9)
9921 ; AVX512F-NEXT: vmovdqa64 %zmm17, (%r9)
9922 ; AVX512F-NEXT: vmovdqa64 %zmm23, 64(%r9)
9923 ; AVX512F-NEXT: vmovdqa64 %zmm11, 128(%r9)
9924 ; AVX512F-NEXT: movq {{[0-9]+}}(%rsp), %rax
9925 ; AVX512F-NEXT: vmovdqa64 %zmm10, 128(%rax)
9926 ; AVX512F-NEXT: vmovdqa64 %zmm1, 192(%rax)
9927 ; AVX512F-NEXT: vmovdqa64 %zmm6, (%rax)
9928 ; AVX512F-NEXT: vmovdqa64 %zmm2, 64(%rax)
9929 ; AVX512F-NEXT: addq $2632, %rsp # imm = 0xA48
9930 ; AVX512F-NEXT: vzeroupper
9931 ; AVX512F-NEXT: retq
9933 ; AVX512BW-LABEL: load_i32_stride6_vf64:
9934 ; AVX512BW: # %bb.0:
9935 ; AVX512BW-NEXT: subq $2632, %rsp # imm = 0xA48
9936 ; AVX512BW-NEXT: vmovdqa64 1472(%rdi), %zmm21
9937 ; AVX512BW-NEXT: vmovdqa64 1408(%rdi), %zmm1
9938 ; AVX512BW-NEXT: vmovdqa64 1088(%rdi), %zmm20
9939 ; AVX512BW-NEXT: vmovdqa64 1024(%rdi), %zmm0
9940 ; AVX512BW-NEXT: vmovdqa64 512(%rdi), %zmm2
9941 ; AVX512BW-NEXT: vmovdqa64 576(%rdi), %zmm18
9942 ; AVX512BW-NEXT: vmovdqa64 704(%rdi), %zmm27
9943 ; AVX512BW-NEXT: vmovdqa64 640(%rdi), %zmm3
9944 ; AVX512BW-NEXT: vmovdqa64 320(%rdi), %zmm25
9945 ; AVX512BW-NEXT: vmovdqa64 256(%rdi), %zmm4
9946 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm5 = [0,6,12,0,0,0,20,26,0,6,12,0,0,0,20,26]
9947 ; AVX512BW-NEXT: # zmm5 = mem[0,1,2,3,0,1,2,3]
9948 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm6 = [0,0,0,2,8,14,20,26,0,0,0,2,8,14,20,26]
9949 ; AVX512BW-NEXT: # zmm6 = mem[0,1,2,3,0,1,2,3]
9950 ; AVX512BW-NEXT: vmovdqa64 %zmm4, %zmm7
9951 ; AVX512BW-NEXT: vpermt2d %zmm25, %zmm6, %zmm7
9952 ; AVX512BW-NEXT: vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
9953 ; AVX512BW-NEXT: vmovdqa64 %zmm3, %zmm7
9954 ; AVX512BW-NEXT: vpermt2d %zmm27, %zmm6, %zmm7
9955 ; AVX512BW-NEXT: vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
9956 ; AVX512BW-NEXT: vmovdqa64 %zmm18, %zmm7
9957 ; AVX512BW-NEXT: vpermt2d %zmm2, %zmm5, %zmm7
9958 ; AVX512BW-NEXT: vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
9959 ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm7
9960 ; AVX512BW-NEXT: vpermt2d %zmm20, %zmm6, %zmm7
9961 ; AVX512BW-NEXT: vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
9962 ; AVX512BW-NEXT: vpermi2d %zmm21, %zmm1, %zmm6
9963 ; AVX512BW-NEXT: vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
9964 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm7 = [0,0,0,3,9,15,21,27,0,0,0,3,9,15,21,27]
9965 ; AVX512BW-NEXT: # zmm7 = mem[0,1,2,3,0,1,2,3]
9966 ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm8
9967 ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm6
9968 ; AVX512BW-NEXT: vpermt2d %zmm20, %zmm7, %zmm8
9969 ; AVX512BW-NEXT: vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
9970 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm8 = [1,7,13,0,0,0,21,27,1,7,13,0,0,0,21,27]
9971 ; AVX512BW-NEXT: # zmm8 = mem[0,1,2,3,0,1,2,3]
9972 ; AVX512BW-NEXT: vmovdqa64 %zmm3, %zmm0
9973 ; AVX512BW-NEXT: vpermt2d %zmm27, %zmm7, %zmm0
9974 ; AVX512BW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
9975 ; AVX512BW-NEXT: vmovdqa64 %zmm4, %zmm0
9976 ; AVX512BW-NEXT: vpermt2d %zmm25, %zmm7, %zmm0
9977 ; AVX512BW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
9978 ; AVX512BW-NEXT: vpermi2d %zmm21, %zmm1, %zmm7
9979 ; AVX512BW-NEXT: vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
9980 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm0 = [0,0,0,20,26,0,6,12,0,0,0,20,26,0,6,12]
9981 ; AVX512BW-NEXT: # zmm0 = mem[0,1,2,3,0,1,2,3]
9982 ; AVX512BW-NEXT: vmovdqa64 %zmm20, %zmm7
9983 ; AVX512BW-NEXT: vpermt2d %zmm6, %zmm0, %zmm7
9984 ; AVX512BW-NEXT: vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
9985 ; AVX512BW-NEXT: vmovdqa64 %zmm27, %zmm7
9986 ; AVX512BW-NEXT: vpermt2d %zmm3, %zmm0, %zmm7
9987 ; AVX512BW-NEXT: vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
9988 ; AVX512BW-NEXT: vmovdqa64 %zmm25, %zmm7
9989 ; AVX512BW-NEXT: vpermt2d %zmm4, %zmm0, %zmm7
9990 ; AVX512BW-NEXT: vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
9991 ; AVX512BW-NEXT: vpermi2d %zmm1, %zmm21, %zmm0
9992 ; AVX512BW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
9993 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm0 = [0,0,0,21,27,1,7,13,0,0,0,21,27,1,7,13]
9994 ; AVX512BW-NEXT: # zmm0 = mem[0,1,2,3,0,1,2,3]
9995 ; AVX512BW-NEXT: vmovdqa64 %zmm20, %zmm7
9996 ; AVX512BW-NEXT: vpermt2d %zmm6, %zmm0, %zmm7
9997 ; AVX512BW-NEXT: vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
9998 ; AVX512BW-NEXT: vmovdqa64 %zmm27, %zmm7
9999 ; AVX512BW-NEXT: vpermt2d %zmm3, %zmm0, %zmm7
10000 ; AVX512BW-NEXT: vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
10001 ; AVX512BW-NEXT: vmovdqa64 %zmm25, %zmm7
10002 ; AVX512BW-NEXT: vpermt2d %zmm4, %zmm0, %zmm7
10003 ; AVX512BW-NEXT: vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
10004 ; AVX512BW-NEXT: vpermi2d %zmm1, %zmm21, %zmm0
10005 ; AVX512BW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
10006 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm7 = [0,0,0,6,12,18,24,30,0,0,0,6,12,18,24,30]
10007 ; AVX512BW-NEXT: # zmm7 = mem[0,1,2,3,0,1,2,3]
10008 ; AVX512BW-NEXT: vmovdqa64 %zmm3, %zmm0
10009 ; AVX512BW-NEXT: vpermt2d %zmm27, %zmm7, %zmm0
10010 ; AVX512BW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
10011 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm0 = [0,0,1,7,13,19,25,31,0,0,1,7,13,19,25,31]
10012 ; AVX512BW-NEXT: # zmm0 = mem[0,1,2,3,0,1,2,3]
10013 ; AVX512BW-NEXT: vpermt2d %zmm27, %zmm0, %zmm3
10014 ; AVX512BW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
10015 ; AVX512BW-NEXT: vmovdqa64 %zmm4, %zmm3
10016 ; AVX512BW-NEXT: vpermt2d %zmm25, %zmm7, %zmm3
10017 ; AVX512BW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
10018 ; AVX512BW-NEXT: vpermt2d %zmm25, %zmm0, %zmm4
10019 ; AVX512BW-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
10020 ; AVX512BW-NEXT: vmovdqa64 %zmm6, %zmm3
10021 ; AVX512BW-NEXT: vpermt2d %zmm20, %zmm7, %zmm3
10022 ; AVX512BW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
10023 ; AVX512BW-NEXT: vpermi2d %zmm21, %zmm1, %zmm7
10024 ; AVX512BW-NEXT: vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
10025 ; AVX512BW-NEXT: vpermt2d %zmm21, %zmm0, %zmm1
10026 ; AVX512BW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
10027 ; AVX512BW-NEXT: vmovdqa64 %zmm18, %zmm1
10028 ; AVX512BW-NEXT: vpermt2d %zmm2, %zmm8, %zmm1
10029 ; AVX512BW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
10030 ; AVX512BW-NEXT: vpermt2d %zmm20, %zmm0, %zmm6
10031 ; AVX512BW-NEXT: vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
10032 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm31 = [18,24,30,0,0,0,6,12,18,24,30,0,0,0,6,12]
10033 ; AVX512BW-NEXT: # zmm31 = mem[0,1,2,3,0,1,2,3]
10034 ; AVX512BW-NEXT: vmovdqa64 %zmm2, %zmm0
10035 ; AVX512BW-NEXT: vpermt2d %zmm18, %zmm31, %zmm0
10036 ; AVX512BW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
10037 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm25 = [19,25,31,0,0,1,7,13,19,25,31,0,0,1,7,13]
10038 ; AVX512BW-NEXT: # zmm25 = mem[0,1,2,3,0,1,2,3]
10039 ; AVX512BW-NEXT: vmovdqa64 %zmm2, %zmm0
10040 ; AVX512BW-NEXT: vpermt2d %zmm18, %zmm25, %zmm0
10041 ; AVX512BW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
10042 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm3 = [20,26,0,0,0,2,8,14,20,26,0,0,0,2,8,14]
10043 ; AVX512BW-NEXT: # zmm3 = mem[0,1,2,3,0,1,2,3]
10044 ; AVX512BW-NEXT: vmovdqa64 %zmm2, %zmm0
10045 ; AVX512BW-NEXT: vpermt2d %zmm18, %zmm3, %zmm0
10046 ; AVX512BW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
10047 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm0 = [21,27,0,0,0,3,9,15,21,27,0,0,0,3,9,15]
10048 ; AVX512BW-NEXT: # zmm0 = mem[0,1,2,3,0,1,2,3]
10049 ; AVX512BW-NEXT: vpermt2d %zmm18, %zmm0, %zmm2
10050 ; AVX512BW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
10051 ; AVX512BW-NEXT: vmovdqa64 128(%rdi), %zmm26
10052 ; AVX512BW-NEXT: vmovdqa64 192(%rdi), %zmm1
10053 ; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm2
10054 ; AVX512BW-NEXT: vpermt2d %zmm26, %zmm5, %zmm2
10055 ; AVX512BW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
10056 ; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm2
10057 ; AVX512BW-NEXT: vpermt2d %zmm26, %zmm8, %zmm2
10058 ; AVX512BW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
10059 ; AVX512BW-NEXT: vmovdqa64 %zmm26, %zmm2
10060 ; AVX512BW-NEXT: vpermt2d %zmm1, %zmm31, %zmm2
10061 ; AVX512BW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
10062 ; AVX512BW-NEXT: vmovdqa64 %zmm26, %zmm2
10063 ; AVX512BW-NEXT: vpermt2d %zmm1, %zmm25, %zmm2
10064 ; AVX512BW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
10065 ; AVX512BW-NEXT: vmovdqa64 %zmm26, %zmm2
10066 ; AVX512BW-NEXT: vpermt2d %zmm1, %zmm3, %zmm2
10067 ; AVX512BW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
10068 ; AVX512BW-NEXT: vpermt2d %zmm1, %zmm0, %zmm26
10069 ; AVX512BW-NEXT: vmovdqa64 896(%rdi), %zmm22
10070 ; AVX512BW-NEXT: vmovdqa64 960(%rdi), %zmm1
10071 ; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm2
10072 ; AVX512BW-NEXT: vpermt2d %zmm22, %zmm5, %zmm2
10073 ; AVX512BW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
10074 ; AVX512BW-NEXT: vmovdqa64 1280(%rdi), %zmm19
10075 ; AVX512BW-NEXT: vmovdqa64 1344(%rdi), %zmm2
10076 ; AVX512BW-NEXT: vpermi2d %zmm19, %zmm2, %zmm5
10077 ; AVX512BW-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
10078 ; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm4
10079 ; AVX512BW-NEXT: vpermt2d %zmm22, %zmm8, %zmm4
10080 ; AVX512BW-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
10081 ; AVX512BW-NEXT: vpermi2d %zmm19, %zmm2, %zmm8
10082 ; AVX512BW-NEXT: vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
10083 ; AVX512BW-NEXT: vmovdqa64 %zmm22, %zmm28
10084 ; AVX512BW-NEXT: vpermt2d %zmm1, %zmm31, %zmm28
10085 ; AVX512BW-NEXT: vpermi2d %zmm2, %zmm19, %zmm31
10086 ; AVX512BW-NEXT: vmovdqa64 %zmm22, %zmm29
10087 ; AVX512BW-NEXT: vpermt2d %zmm1, %zmm25, %zmm29
10088 ; AVX512BW-NEXT: vpermi2d %zmm2, %zmm19, %zmm25
10089 ; AVX512BW-NEXT: vmovdqa64 %zmm22, %zmm4
10090 ; AVX512BW-NEXT: vpermt2d %zmm1, %zmm3, %zmm4
10091 ; AVX512BW-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
10092 ; AVX512BW-NEXT: vpermi2d %zmm2, %zmm19, %zmm3
10093 ; AVX512BW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
10094 ; AVX512BW-NEXT: vpermt2d %zmm2, %zmm0, %zmm19
10095 ; AVX512BW-NEXT: vpermt2d %zmm1, %zmm0, %zmm22
10096 ; AVX512BW-NEXT: vmovdqa64 448(%rdi), %zmm2
10097 ; AVX512BW-NEXT: vmovdqa64 384(%rdi), %zmm0
10098 ; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm3 = <0,6,12,18,24,30,u,u>
10099 ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm20
10100 ; AVX512BW-NEXT: vpermt2d %zmm2, %zmm3, %zmm20
10101 ; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm4 = <1,7,13,19,25,31,u,u>
10102 ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm21
10103 ; AVX512BW-NEXT: vpermt2d %zmm2, %zmm4, %zmm21
10104 ; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm12 = <2,8,14,20,26,u,u,u>
10105 ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm1
10106 ; AVX512BW-NEXT: vpermt2d %zmm2, %zmm12, %zmm1
10107 ; AVX512BW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
10108 ; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm14 = <3,9,15,21,27,u,u,u>
10109 ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm1
10110 ; AVX512BW-NEXT: vpermt2d %zmm2, %zmm14, %zmm1
10111 ; AVX512BW-NEXT: vmovdqu64 %zmm1, (%rsp) # 64-byte Spill
10112 ; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm5 = <20,26,0,6,12,u,u,u>
10113 ; AVX512BW-NEXT: vmovdqa64 %zmm2, %zmm23
10114 ; AVX512BW-NEXT: vpermt2d %zmm0, %zmm5, %zmm23
10115 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} ymm24 = <21,27,1,7,13,u,u,u>
10116 ; AVX512BW-NEXT: vpermt2d %zmm0, %zmm24, %zmm2
10117 ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0
10118 ; AVX512BW-NEXT: vmovdqa64 64(%rdi), %zmm6
10119 ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm15
10120 ; AVX512BW-NEXT: vpermt2d %zmm6, %zmm3, %zmm15
10121 ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm16
10122 ; AVX512BW-NEXT: vpermt2d %zmm6, %zmm4, %zmm16
10123 ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm27
10124 ; AVX512BW-NEXT: vpermt2d %zmm6, %zmm12, %zmm27
10125 ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm30
10126 ; AVX512BW-NEXT: vpermt2d %zmm6, %zmm14, %zmm30
10127 ; AVX512BW-NEXT: vmovdqa64 %zmm6, %zmm17
10128 ; AVX512BW-NEXT: vpermt2d %zmm0, %zmm5, %zmm17
10129 ; AVX512BW-NEXT: vpermt2d %zmm0, %zmm24, %zmm6
10130 ; AVX512BW-NEXT: vmovdqa64 832(%rdi), %zmm10
10131 ; AVX512BW-NEXT: vmovdqa64 768(%rdi), %zmm7
10132 ; AVX512BW-NEXT: vmovdqa64 %zmm7, %zmm8
10133 ; AVX512BW-NEXT: vpermt2d %zmm10, %zmm3, %zmm8
10134 ; AVX512BW-NEXT: vmovdqa64 1216(%rdi), %zmm1
10135 ; AVX512BW-NEXT: vmovdqa64 1152(%rdi), %zmm0
10136 ; AVX512BW-NEXT: vpermi2d %zmm1, %zmm0, %zmm3
10137 ; AVX512BW-NEXT: vmovdqa64 %zmm7, %zmm9
10138 ; AVX512BW-NEXT: vpermt2d %zmm10, %zmm4, %zmm9
10139 ; AVX512BW-NEXT: vpermi2d %zmm1, %zmm0, %zmm4
10140 ; AVX512BW-NEXT: vmovdqa64 %zmm7, %zmm13
10141 ; AVX512BW-NEXT: vpermt2d %zmm10, %zmm12, %zmm13
10142 ; AVX512BW-NEXT: vpermi2d %zmm1, %zmm0, %zmm12
10143 ; AVX512BW-NEXT: vmovdqa64 %zmm7, %zmm18
10144 ; AVX512BW-NEXT: vpermt2d %zmm10, %zmm14, %zmm18
10145 ; AVX512BW-NEXT: vpermi2d %zmm1, %zmm0, %zmm14
10146 ; AVX512BW-NEXT: vmovdqa64 %zmm10, %zmm11
10147 ; AVX512BW-NEXT: vpermt2d %zmm7, %zmm5, %zmm11
10148 ; AVX512BW-NEXT: vpermi2d %zmm0, %zmm1, %zmm5
10149 ; AVX512BW-NEXT: vpermt2d %zmm0, %zmm24, %zmm1
10150 ; AVX512BW-NEXT: vpermt2d %zmm7, %zmm24, %zmm10
10151 ; AVX512BW-NEXT: movb $56, %al
10152 ; AVX512BW-NEXT: kmovd %eax, %k2
10153 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
10154 ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm15 {%k2}
10155 ; AVX512BW-NEXT: movw $-2048, %ax # imm = 0xF800
10156 ; AVX512BW-NEXT: kmovd %eax, %k1
10157 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
10158 ; AVX512BW-NEXT: vmovdqa32 %zmm0, %zmm15 {%k1}
10159 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
10160 ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm20 {%k2}
10161 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
10162 ; AVX512BW-NEXT: vmovdqa32 %zmm0, %zmm20 {%k1}
10163 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
10164 ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm8 {%k2}
10165 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
10166 ; AVX512BW-NEXT: vmovdqa32 %zmm0, %zmm8 {%k1}
10167 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
10168 ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm3 {%k2}
10169 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
10170 ; AVX512BW-NEXT: vmovdqa32 %zmm0, %zmm3 {%k1}
10171 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
10172 ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm9 {%k2}
10173 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
10174 ; AVX512BW-NEXT: vmovdqa32 %zmm0, %zmm9 {%k1}
10175 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
10176 ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm21 {%k2}
10177 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
10178 ; AVX512BW-NEXT: vmovdqa32 %zmm0, %zmm21 {%k1}
10179 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
10180 ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm16 {%k2}
10181 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
10182 ; AVX512BW-NEXT: vmovdqa32 %zmm0, %zmm16 {%k1}
10183 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
10184 ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm4 {%k2}
10185 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
10186 ; AVX512BW-NEXT: vmovdqa32 %zmm0, %zmm4 {%k1}
10187 ; AVX512BW-NEXT: movw $31, %ax
10188 ; AVX512BW-NEXT: kmovd %eax, %k2
10189 ; AVX512BW-NEXT: vmovdqa32 %zmm13, %zmm28 {%k2}
10190 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
10191 ; AVX512BW-NEXT: vmovdqa32 %zmm0, %zmm28 {%k1}
10192 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
10193 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
10194 ; AVX512BW-NEXT: vmovdqa32 %zmm0, %zmm7 {%k2}
10195 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
10196 ; AVX512BW-NEXT: vmovdqa32 %zmm0, %zmm7 {%k1}
10197 ; AVX512BW-NEXT: vmovdqa64 %zmm7, %zmm13
10198 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
10199 ; AVX512BW-NEXT: vmovdqa32 %zmm27, %zmm7 {%k2}
10200 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
10201 ; AVX512BW-NEXT: vmovdqa32 %zmm0, %zmm7 {%k1}
10202 ; AVX512BW-NEXT: vmovdqa64 %zmm7, %zmm24
10203 ; AVX512BW-NEXT: vmovdqa32 %zmm12, %zmm31 {%k2}
10204 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
10205 ; AVX512BW-NEXT: vmovdqa32 %zmm0, %zmm31 {%k1}
10206 ; AVX512BW-NEXT: vmovdqa32 %zmm18, %zmm29 {%k2}
10207 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
10208 ; AVX512BW-NEXT: vmovdqa32 %zmm0, %zmm29 {%k1}
10209 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
10210 ; AVX512BW-NEXT: vmovdqu64 (%rsp), %zmm0 # 64-byte Reload
10211 ; AVX512BW-NEXT: vmovdqa32 %zmm0, %zmm7 {%k2}
10212 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
10213 ; AVX512BW-NEXT: vmovdqa32 %zmm0, %zmm7 {%k1}
10214 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
10215 ; AVX512BW-NEXT: vmovdqa32 %zmm30, %zmm12 {%k2}
10216 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
10217 ; AVX512BW-NEXT: vmovdqa32 %zmm0, %zmm12 {%k1}
10218 ; AVX512BW-NEXT: vmovdqa32 %zmm14, %zmm25 {%k2}
10219 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
10220 ; AVX512BW-NEXT: vmovdqa32 %zmm0, %zmm25 {%k1}
10221 ; AVX512BW-NEXT: movw $992, %ax # imm = 0x3E0
10222 ; AVX512BW-NEXT: kmovd %eax, %k1
10223 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
10224 ; AVX512BW-NEXT: vmovdqa32 %zmm0, %zmm11 {%k1}
10225 ; AVX512BW-NEXT: movb $-32, %al
10226 ; AVX512BW-NEXT: kmovd %eax, %k2
10227 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
10228 ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm11 {%k2}
10229 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
10230 ; AVX512BW-NEXT: vmovdqa32 %zmm0, %zmm23 {%k1}
10231 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
10232 ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm23 {%k2}
10233 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
10234 ; AVX512BW-NEXT: vmovdqa32 %zmm0, %zmm17 {%k1}
10235 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
10236 ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm17 {%k2}
10237 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
10238 ; AVX512BW-NEXT: vmovdqa32 %zmm0, %zmm5 {%k1}
10239 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
10240 ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm5 {%k2}
10241 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
10242 ; AVX512BW-NEXT: vmovdqa32 %zmm0, %zmm2 {%k1}
10243 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
10244 ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm2 {%k2}
10245 ; AVX512BW-NEXT: vmovdqa32 %zmm26, %zmm6 {%k1}
10246 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
10247 ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm6 {%k2}
10248 ; AVX512BW-NEXT: vmovdqa32 %zmm19, %zmm1 {%k1}
10249 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
10250 ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm1 {%k2}
10251 ; AVX512BW-NEXT: vmovdqa32 %zmm22, %zmm10 {%k1}
10252 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
10253 ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm10 {%k2}
10254 ; AVX512BW-NEXT: vmovdqa64 %zmm3, 192(%rsi)
10255 ; AVX512BW-NEXT: vmovdqa64 %zmm8, 128(%rsi)
10256 ; AVX512BW-NEXT: vmovdqa64 %zmm20, 64(%rsi)
10257 ; AVX512BW-NEXT: vmovdqa64 %zmm15, (%rsi)
10258 ; AVX512BW-NEXT: vmovdqa64 %zmm4, 192(%rdx)
10259 ; AVX512BW-NEXT: vmovdqa64 %zmm16, (%rdx)
10260 ; AVX512BW-NEXT: vmovdqa64 %zmm21, 64(%rdx)
10261 ; AVX512BW-NEXT: vmovdqa64 %zmm9, 128(%rdx)
10262 ; AVX512BW-NEXT: vmovdqa64 %zmm31, 192(%rcx)
10263 ; AVX512BW-NEXT: vmovdqa64 %zmm24, (%rcx)
10264 ; AVX512BW-NEXT: vmovdqa64 %zmm13, 64(%rcx)
10265 ; AVX512BW-NEXT: vmovdqa64 %zmm28, 128(%rcx)
10266 ; AVX512BW-NEXT: vmovdqa64 %zmm25, 192(%r8)
10267 ; AVX512BW-NEXT: vmovdqa64 %zmm12, (%r8)
10268 ; AVX512BW-NEXT: vmovdqa64 %zmm7, 64(%r8)
10269 ; AVX512BW-NEXT: vmovdqa64 %zmm29, 128(%r8)
10270 ; AVX512BW-NEXT: vmovdqa64 %zmm5, 192(%r9)
10271 ; AVX512BW-NEXT: vmovdqa64 %zmm17, (%r9)
10272 ; AVX512BW-NEXT: vmovdqa64 %zmm23, 64(%r9)
10273 ; AVX512BW-NEXT: vmovdqa64 %zmm11, 128(%r9)
10274 ; AVX512BW-NEXT: movq {{[0-9]+}}(%rsp), %rax
10275 ; AVX512BW-NEXT: vmovdqa64 %zmm10, 128(%rax)
10276 ; AVX512BW-NEXT: vmovdqa64 %zmm1, 192(%rax)
10277 ; AVX512BW-NEXT: vmovdqa64 %zmm6, (%rax)
10278 ; AVX512BW-NEXT: vmovdqa64 %zmm2, 64(%rax)
10279 ; AVX512BW-NEXT: addq $2632, %rsp # imm = 0xA48
10280 ; AVX512BW-NEXT: vzeroupper
10281 ; AVX512BW-NEXT: retq
10282 %wide.vec = load <384 x i32>, ptr %in.vec, align 64
10283 %strided.vec0 = shufflevector <384 x i32> %wide.vec, <384 x i32> poison, <64 x i32> <i32 0, i32 6, i32 12, i32 18, i32 24, i32 30, i32 36, i32 42, i32 48, i32 54, i32 60, i32 66, i32 72, i32 78, i32 84, i32 90, i32 96, i32 102, i32 108, i32 114, i32 120, i32 126, i32 132, i32 138, i32 144, i32 150, i32 156, i32 162, i32 168, i32 174, i32 180, i32 186, i32 192, i32 198, i32 204, i32 210, i32 216, i32 222, i32 228, i32 234, i32 240, i32 246, i32 252, i32 258, i32 264, i32 270, i32 276, i32 282, i32 288, i32 294, i32 300, i32 306, i32 312, i32 318, i32 324, i32 330, i32 336, i32 342, i32 348, i32 354, i32 360, i32 366, i32 372, i32 378>
10284 %strided.vec1 = shufflevector <384 x i32> %wide.vec, <384 x i32> poison, <64 x i32> <i32 1, i32 7, i32 13, i32 19, i32 25, i32 31, i32 37, i32 43, i32 49, i32 55, i32 61, i32 67, i32 73, i32 79, i32 85, i32 91, i32 97, i32 103, i32 109, i32 115, i32 121, i32 127, i32 133, i32 139, i32 145, i32 151, i32 157, i32 163, i32 169, i32 175, i32 181, i32 187, i32 193, i32 199, i32 205, i32 211, i32 217, i32 223, i32 229, i32 235, i32 241, i32 247, i32 253, i32 259, i32 265, i32 271, i32 277, i32 283, i32 289, i32 295, i32 301, i32 307, i32 313, i32 319, i32 325, i32 331, i32 337, i32 343, i32 349, i32 355, i32 361, i32 367, i32 373, i32 379>
10285 %strided.vec2 = shufflevector <384 x i32> %wide.vec, <384 x i32> poison, <64 x i32> <i32 2, i32 8, i32 14, i32 20, i32 26, i32 32, i32 38, i32 44, i32 50, i32 56, i32 62, i32 68, i32 74, i32 80, i32 86, i32 92, i32 98, i32 104, i32 110, i32 116, i32 122, i32 128, i32 134, i32 140, i32 146, i32 152, i32 158, i32 164, i32 170, i32 176, i32 182, i32 188, i32 194, i32 200, i32 206, i32 212, i32 218, i32 224, i32 230, i32 236, i32 242, i32 248, i32 254, i32 260, i32 266, i32 272, i32 278, i32 284, i32 290, i32 296, i32 302, i32 308, i32 314, i32 320, i32 326, i32 332, i32 338, i32 344, i32 350, i32 356, i32 362, i32 368, i32 374, i32 380>
10286 %strided.vec3 = shufflevector <384 x i32> %wide.vec, <384 x i32> poison, <64 x i32> <i32 3, i32 9, i32 15, i32 21, i32 27, i32 33, i32 39, i32 45, i32 51, i32 57, i32 63, i32 69, i32 75, i32 81, i32 87, i32 93, i32 99, i32 105, i32 111, i32 117, i32 123, i32 129, i32 135, i32 141, i32 147, i32 153, i32 159, i32 165, i32 171, i32 177, i32 183, i32 189, i32 195, i32 201, i32 207, i32 213, i32 219, i32 225, i32 231, i32 237, i32 243, i32 249, i32 255, i32 261, i32 267, i32 273, i32 279, i32 285, i32 291, i32 297, i32 303, i32 309, i32 315, i32 321, i32 327, i32 333, i32 339, i32 345, i32 351, i32 357, i32 363, i32 369, i32 375, i32 381>
10287 %strided.vec4 = shufflevector <384 x i32> %wide.vec, <384 x i32> poison, <64 x i32> <i32 4, i32 10, i32 16, i32 22, i32 28, i32 34, i32 40, i32 46, i32 52, i32 58, i32 64, i32 70, i32 76, i32 82, i32 88, i32 94, i32 100, i32 106, i32 112, i32 118, i32 124, i32 130, i32 136, i32 142, i32 148, i32 154, i32 160, i32 166, i32 172, i32 178, i32 184, i32 190, i32 196, i32 202, i32 208, i32 214, i32 220, i32 226, i32 232, i32 238, i32 244, i32 250, i32 256, i32 262, i32 268, i32 274, i32 280, i32 286, i32 292, i32 298, i32 304, i32 310, i32 316, i32 322, i32 328, i32 334, i32 340, i32 346, i32 352, i32 358, i32 364, i32 370, i32 376, i32 382>
10288 %strided.vec5 = shufflevector <384 x i32> %wide.vec, <384 x i32> poison, <64 x i32> <i32 5, i32 11, i32 17, i32 23, i32 29, i32 35, i32 41, i32 47, i32 53, i32 59, i32 65, i32 71, i32 77, i32 83, i32 89, i32 95, i32 101, i32 107, i32 113, i32 119, i32 125, i32 131, i32 137, i32 143, i32 149, i32 155, i32 161, i32 167, i32 173, i32 179, i32 185, i32 191, i32 197, i32 203, i32 209, i32 215, i32 221, i32 227, i32 233, i32 239, i32 245, i32 251, i32 257, i32 263, i32 269, i32 275, i32 281, i32 287, i32 293, i32 299, i32 305, i32 311, i32 317, i32 323, i32 329, i32 335, i32 341, i32 347, i32 353, i32 359, i32 365, i32 371, i32 377, i32 383>
10289 store <64 x i32> %strided.vec0, ptr %out.vec0, align 64
10290 store <64 x i32> %strided.vec1, ptr %out.vec1, align 64
10291 store <64 x i32> %strided.vec2, ptr %out.vec2, align 64
10292 store <64 x i32> %strided.vec3, ptr %out.vec3, align 64
10293 store <64 x i32> %strided.vec4, ptr %out.vec4, align 64
10294 store <64 x i32> %strided.vec5, ptr %out.vec5, align 64
10297 ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
10301 ; AVX512BW-FAST: {{.*}}
10302 ; AVX512BW-ONLY-FAST: {{.*}}
10303 ; AVX512BW-ONLY-SLOW: {{.*}}
10304 ; AVX512DQ-FAST: {{.*}}
10305 ; AVX512DQ-SLOW: {{.*}}
10306 ; AVX512DQBW-FAST: {{.*}}
10307 ; AVX512DQBW-SLOW: {{.*}}
10308 ; AVX512F-FAST: {{.*}}
10309 ; AVX512F-ONLY-FAST: {{.*}}
10310 ; AVX512F-ONLY-SLOW: {{.*}}
10311 ; AVX512F-SLOW: {{.*}}
10312 ; FALLBACK0: {{.*}}
10313 ; FALLBACK1: {{.*}}
10314 ; FALLBACK10: {{.*}}
10315 ; FALLBACK11: {{.*}}
10316 ; FALLBACK12: {{.*}}
10317 ; FALLBACK2: {{.*}}
10318 ; FALLBACK3: {{.*}}
10319 ; FALLBACK4: {{.*}}
10320 ; FALLBACK5: {{.*}}
10321 ; FALLBACK6: {{.*}}
10322 ; FALLBACK7: {{.*}}
10323 ; FALLBACK8: {{.*}}
10324 ; FALLBACK9: {{.*}}