1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+sse2 | FileCheck %s --check-prefixes=SSE,FALLBACK0
3 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1,AVX1-ONLY,FALLBACK1
4 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-SLOW,FALLBACK2
5 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST,FALLBACK3
6 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST-PERLANE,FALLBACK4
7 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
8 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
9 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
10 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
11 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
12 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
13 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
14 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
16 ; These patterns are produced by LoopVectorizer for interleaved stores.
18 define void @store_i32_stride7_vf2(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecptr2, ptr %in.vecptr3, ptr %in.vecptr4, ptr %in.vecptr5, ptr %in.vecptr6, ptr %out.vec) nounwind {
19 ; SSE-LABEL: store_i32_stride7_vf2:
21 ; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax
22 ; SSE-NEXT: movq {{[0-9]+}}(%rsp), %r10
23 ; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
24 ; SSE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
25 ; SSE-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
26 ; SSE-NEXT: movsd {{.*#+}} xmm3 = mem[0],zero
27 ; SSE-NEXT: movq {{.*#+}} xmm4 = mem[0],zero
28 ; SSE-NEXT: movsd {{.*#+}} xmm5 = mem[0],zero
29 ; SSE-NEXT: movsd {{.*#+}} xmm6 = mem[0],zero
30 ; SSE-NEXT: movaps %xmm6, %xmm7
31 ; SSE-NEXT: movlhps {{.*#+}} xmm7 = xmm7[0],xmm5[0]
32 ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm4[1,1,1,1]
33 ; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,1],xmm0[1,3]
34 ; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
35 ; SSE-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
36 ; SSE-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
37 ; SSE-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0]
38 ; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[1,1],xmm3[1,1]
39 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,3],xmm6[2,0]
40 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm7[3,3,3,3]
41 ; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,2],xmm4[0,2]
42 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1]
43 ; SSE-NEXT: movaps %xmm1, 32(%rax)
44 ; SSE-NEXT: movaps %xmm7, 16(%rax)
45 ; SSE-NEXT: movaps %xmm0, (%rax)
46 ; SSE-NEXT: movq %xmm2, 48(%rax)
49 ; AVX1-ONLY-LABEL: store_i32_stride7_vf2:
51 ; AVX1-ONLY-NEXT: movq {{[0-9]+}}(%rsp), %rax
52 ; AVX1-ONLY-NEXT: movq {{[0-9]+}}(%rsp), %r10
53 ; AVX1-ONLY-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
54 ; AVX1-ONLY-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
55 ; AVX1-ONLY-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero
56 ; AVX1-ONLY-NEXT: vmovq {{.*#+}} xmm3 = mem[0],zero
57 ; AVX1-ONLY-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm3[0],xmm2[0]
58 ; AVX1-ONLY-NEXT: vmovq {{.*#+}} xmm4 = mem[0],zero
59 ; AVX1-ONLY-NEXT: vmovq {{.*#+}} xmm5 = mem[0],zero
60 ; AVX1-ONLY-NEXT: vpunpcklqdq {{.*#+}} xmm4 = xmm5[0],xmm4[0]
61 ; AVX1-ONLY-NEXT: vmovsd {{.*#+}} xmm6 = mem[0],zero
62 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm6, %ymm0, %ymm0
63 ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
64 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm0[2,3,0,1]
65 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm6 = ymm1[3,0],ymm0[1,0],ymm1[7,4],ymm0[5,4]
66 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm6 = ymm6[0,2],ymm0[2,1],ymm6[4,6],ymm0[6,5]
67 ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm5 = xmm2[12,13,14,15],xmm5[0,1,2,3,4,5,6,7,8,9,10,11]
68 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm5 = xmm5[0,2],xmm4[3,3]
69 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm5 = xmm5[0,1,2],xmm6[3]
70 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm1[0,2],ymm0[0,3],ymm1[4,6],ymm0[4,7]
71 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm1
72 ; AVX1-ONLY-NEXT: vpermilps {{.*#+}} ymm1 = ymm1[u,u,0,2,u,u,u,5]
73 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm2[0,2],ymm1[2,3],ymm2[4,6],ymm1[6,7]
74 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6],ymm1[7]
75 ; AVX1-ONLY-NEXT: vmovaps %ymm0, (%rax)
76 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm6, %xmm0
77 ; AVX1-ONLY-NEXT: vmovlps %xmm0, 48(%rax)
78 ; AVX1-ONLY-NEXT: vmovaps %xmm5, 32(%rax)
79 ; AVX1-ONLY-NEXT: vzeroupper
80 ; AVX1-ONLY-NEXT: retq
82 ; AVX2-SLOW-LABEL: store_i32_stride7_vf2:
84 ; AVX2-SLOW-NEXT: movq {{[0-9]+}}(%rsp), %rax
85 ; AVX2-SLOW-NEXT: movq {{[0-9]+}}(%rsp), %r10
86 ; AVX2-SLOW-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
87 ; AVX2-SLOW-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
88 ; AVX2-SLOW-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
89 ; AVX2-SLOW-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero
90 ; AVX2-SLOW-NEXT: vmovlhps {{.*#+}} xmm2 = xmm3[0],xmm2[0]
91 ; AVX2-SLOW-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero
92 ; AVX2-SLOW-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
93 ; AVX2-SLOW-NEXT: vmovlhps {{.*#+}} xmm3 = xmm4[0],xmm3[0]
94 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
95 ; AVX2-SLOW-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero
96 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
97 ; AVX2-SLOW-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm3[0],ymm0[2],ymm3[2]
98 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,1,3]
99 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm1 = ymm2[0,2,2,1,4,6,6,5]
100 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,2,2,1]
101 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm3 = ymm0[0,2,2,3,4,6,6,7]
102 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[0,1,0,2]
103 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm3[4,5,6],ymm1[7]
104 ; AVX2-SLOW-NEXT: vmovaps {{.*#+}} xmm3 = <3,5,7,u>
105 ; AVX2-SLOW-NEXT: vpermps %ymm2, %ymm3, %ymm2
106 ; AVX2-SLOW-NEXT: vbroadcastf128 {{.*#+}} ymm3 = [3,5,0,1,3,5,0,1]
107 ; AVX2-SLOW-NEXT: # ymm3 = mem[0,1,0,1]
108 ; AVX2-SLOW-NEXT: vpermps %ymm0, %ymm3, %ymm0
109 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm2 = xmm2[0,1,2],xmm0[3]
110 ; AVX2-SLOW-NEXT: vextractf128 $1, %ymm0, %xmm0
111 ; AVX2-SLOW-NEXT: vmovlps %xmm0, 48(%rax)
112 ; AVX2-SLOW-NEXT: vmovaps %xmm2, 32(%rax)
113 ; AVX2-SLOW-NEXT: vmovaps %ymm1, (%rax)
114 ; AVX2-SLOW-NEXT: vzeroupper
115 ; AVX2-SLOW-NEXT: retq
117 ; AVX2-FAST-LABEL: store_i32_stride7_vf2:
118 ; AVX2-FAST: # %bb.0:
119 ; AVX2-FAST-NEXT: movq {{[0-9]+}}(%rsp), %rax
120 ; AVX2-FAST-NEXT: movq {{[0-9]+}}(%rsp), %r10
121 ; AVX2-FAST-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
122 ; AVX2-FAST-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
123 ; AVX2-FAST-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
124 ; AVX2-FAST-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero
125 ; AVX2-FAST-NEXT: vmovlhps {{.*#+}} xmm2 = xmm3[0],xmm2[0]
126 ; AVX2-FAST-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero
127 ; AVX2-FAST-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
128 ; AVX2-FAST-NEXT: vmovlhps {{.*#+}} xmm3 = xmm4[0],xmm3[0]
129 ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
130 ; AVX2-FAST-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero
131 ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
132 ; AVX2-FAST-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm3[0],ymm0[2],ymm3[2]
133 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,1,3]
134 ; AVX2-FAST-NEXT: vmovaps {{.*#+}} xmm1 = <3,5,7,u>
135 ; AVX2-FAST-NEXT: vpermps %ymm2, %ymm1, %ymm1
136 ; AVX2-FAST-NEXT: vbroadcastf128 {{.*#+}} ymm3 = [3,5,0,1,3,5,0,1]
137 ; AVX2-FAST-NEXT: # ymm3 = mem[0,1,0,1]
138 ; AVX2-FAST-NEXT: vpermps %ymm0, %ymm3, %ymm3
139 ; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1,2],xmm3[3]
140 ; AVX2-FAST-NEXT: vmovaps {{.*#+}} ymm4 = <0,2,4,6,u,u,u,1>
141 ; AVX2-FAST-NEXT: vpermps %ymm2, %ymm4, %ymm2
142 ; AVX2-FAST-NEXT: vbroadcastf128 {{.*#+}} ymm4 = [0,2,4,0,0,2,4,0]
143 ; AVX2-FAST-NEXT: # ymm4 = mem[0,1,0,1]
144 ; AVX2-FAST-NEXT: vpermps %ymm0, %ymm4, %ymm0
145 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6],ymm2[7]
146 ; AVX2-FAST-NEXT: vextractf128 $1, %ymm3, %xmm2
147 ; AVX2-FAST-NEXT: vmovlps %xmm2, 48(%rax)
148 ; AVX2-FAST-NEXT: vmovaps %ymm0, (%rax)
149 ; AVX2-FAST-NEXT: vmovaps %xmm1, 32(%rax)
150 ; AVX2-FAST-NEXT: vzeroupper
151 ; AVX2-FAST-NEXT: retq
153 ; AVX2-FAST-PERLANE-LABEL: store_i32_stride7_vf2:
154 ; AVX2-FAST-PERLANE: # %bb.0:
155 ; AVX2-FAST-PERLANE-NEXT: movq {{[0-9]+}}(%rsp), %rax
156 ; AVX2-FAST-PERLANE-NEXT: movq {{[0-9]+}}(%rsp), %r10
157 ; AVX2-FAST-PERLANE-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
158 ; AVX2-FAST-PERLANE-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
159 ; AVX2-FAST-PERLANE-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
160 ; AVX2-FAST-PERLANE-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero
161 ; AVX2-FAST-PERLANE-NEXT: vmovlhps {{.*#+}} xmm2 = xmm3[0],xmm2[0]
162 ; AVX2-FAST-PERLANE-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero
163 ; AVX2-FAST-PERLANE-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
164 ; AVX2-FAST-PERLANE-NEXT: vmovlhps {{.*#+}} xmm3 = xmm4[0],xmm3[0]
165 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
166 ; AVX2-FAST-PERLANE-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero
167 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
168 ; AVX2-FAST-PERLANE-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm3[0],ymm0[2],ymm3[2]
169 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,1,3]
170 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm1 = ymm2[0,2,2,1,4,6,6,5]
171 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,2,2,1]
172 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm3 = ymm0[0,2,2,3,4,6,6,7]
173 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[0,1,0,2]
174 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm3[4,5,6],ymm1[7]
175 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{.*#+}} xmm3 = <3,5,7,u>
176 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm2, %ymm3, %ymm2
177 ; AVX2-FAST-PERLANE-NEXT: vbroadcastf128 {{.*#+}} ymm3 = [3,5,0,1,3,5,0,1]
178 ; AVX2-FAST-PERLANE-NEXT: # ymm3 = mem[0,1,0,1]
179 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm0, %ymm3, %ymm0
180 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} xmm2 = xmm2[0,1,2],xmm0[3]
181 ; AVX2-FAST-PERLANE-NEXT: vextractf128 $1, %ymm0, %xmm0
182 ; AVX2-FAST-PERLANE-NEXT: vmovlps %xmm0, 48(%rax)
183 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm2, 32(%rax)
184 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm1, (%rax)
185 ; AVX2-FAST-PERLANE-NEXT: vzeroupper
186 ; AVX2-FAST-PERLANE-NEXT: retq
188 ; AVX512-SLOW-LABEL: store_i32_stride7_vf2:
189 ; AVX512-SLOW: # %bb.0:
190 ; AVX512-SLOW-NEXT: movq {{[0-9]+}}(%rsp), %rax
191 ; AVX512-SLOW-NEXT: movq {{[0-9]+}}(%rsp), %r10
192 ; AVX512-SLOW-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
193 ; AVX512-SLOW-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
194 ; AVX512-SLOW-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero
195 ; AVX512-SLOW-NEXT: vmovq {{.*#+}} xmm3 = mem[0],zero
196 ; AVX512-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm3[0],xmm2[0]
197 ; AVX512-SLOW-NEXT: vmovq {{.*#+}} xmm3 = mem[0],zero
198 ; AVX512-SLOW-NEXT: vmovq {{.*#+}} xmm4 = mem[0],zero
199 ; AVX512-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm3 = xmm4[0],xmm3[0]
200 ; AVX512-SLOW-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm2
201 ; AVX512-SLOW-NEXT: vmovq {{.*#+}} xmm3 = mem[0],zero
202 ; AVX512-SLOW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
203 ; AVX512-SLOW-NEXT: vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],ymm3[0],ymm0[2],ymm3[2]
204 ; AVX512-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm1 = <0,2,4,6,16,20,18,1,3,5,7,17,21,19,u,u>
205 ; AVX512-SLOW-NEXT: vpermi2d %zmm0, %zmm2, %zmm1
206 ; AVX512-SLOW-NEXT: vextracti32x4 $2, %zmm1, 32(%rax)
207 ; AVX512-SLOW-NEXT: vextracti32x4 $3, %zmm1, %xmm0
208 ; AVX512-SLOW-NEXT: vmovq %xmm0, 48(%rax)
209 ; AVX512-SLOW-NEXT: vmovdqa %ymm1, (%rax)
210 ; AVX512-SLOW-NEXT: vzeroupper
211 ; AVX512-SLOW-NEXT: retq
213 ; AVX512-FAST-LABEL: store_i32_stride7_vf2:
214 ; AVX512-FAST: # %bb.0:
215 ; AVX512-FAST-NEXT: movq {{[0-9]+}}(%rsp), %rax
216 ; AVX512-FAST-NEXT: movq {{[0-9]+}}(%rsp), %r10
217 ; AVX512-FAST-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
218 ; AVX512-FAST-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
219 ; AVX512-FAST-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero
220 ; AVX512-FAST-NEXT: vmovq {{.*#+}} xmm3 = mem[0],zero
221 ; AVX512-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm3[0],xmm2[0]
222 ; AVX512-FAST-NEXT: vmovq {{.*#+}} xmm3 = mem[0],zero
223 ; AVX512-FAST-NEXT: vmovq {{.*#+}} xmm4 = mem[0],zero
224 ; AVX512-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm3 = xmm4[0],xmm3[0]
225 ; AVX512-FAST-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm2
226 ; AVX512-FAST-NEXT: vmovq {{.*#+}} xmm3 = mem[0],zero
227 ; AVX512-FAST-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
228 ; AVX512-FAST-NEXT: vmovdqa {{.*#+}} ymm1 = <0,2,4,u>
229 ; AVX512-FAST-NEXT: vpermi2q %ymm3, %ymm0, %ymm1
230 ; AVX512-FAST-NEXT: vinserti64x4 $1, %ymm1, %zmm2, %zmm0
231 ; AVX512-FAST-NEXT: vmovdqa64 {{.*#+}} zmm1 = <0,2,4,6,8,10,12,1,3,5,7,9,11,13,u,u>
232 ; AVX512-FAST-NEXT: vpermd %zmm0, %zmm1, %zmm0
233 ; AVX512-FAST-NEXT: vextracti32x4 $2, %zmm0, 32(%rax)
234 ; AVX512-FAST-NEXT: vextracti32x4 $3, %zmm0, %xmm1
235 ; AVX512-FAST-NEXT: vmovq %xmm1, 48(%rax)
236 ; AVX512-FAST-NEXT: vmovdqa %ymm0, (%rax)
237 ; AVX512-FAST-NEXT: vzeroupper
238 ; AVX512-FAST-NEXT: retq
239 %in.vec0 = load <2 x i32>, ptr %in.vecptr0, align 64
240 %in.vec1 = load <2 x i32>, ptr %in.vecptr1, align 64
241 %in.vec2 = load <2 x i32>, ptr %in.vecptr2, align 64
242 %in.vec3 = load <2 x i32>, ptr %in.vecptr3, align 64
243 %in.vec4 = load <2 x i32>, ptr %in.vecptr4, align 64
244 %in.vec5 = load <2 x i32>, ptr %in.vecptr5, align 64
245 %in.vec6 = load <2 x i32>, ptr %in.vecptr6, align 64
246 %1 = shufflevector <2 x i32> %in.vec0, <2 x i32> %in.vec1, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
247 %2 = shufflevector <2 x i32> %in.vec2, <2 x i32> %in.vec3, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
248 %3 = shufflevector <2 x i32> %in.vec4, <2 x i32> %in.vec5, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
249 %4 = shufflevector <4 x i32> %1, <4 x i32> %2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
250 %5 = shufflevector <2 x i32> %in.vec6, <2 x i32> poison, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
251 %6 = shufflevector <4 x i32> %3, <4 x i32> %5, <6 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5>
252 %7 = shufflevector <6 x i32> %6, <6 x i32> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 undef, i32 undef>
253 %8 = shufflevector <8 x i32> %4, <8 x i32> %7, <14 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13>
254 %interleaved.vec = shufflevector <14 x i32> %8, <14 x i32> poison, <14 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13>
255 store <14 x i32> %interleaved.vec, ptr %out.vec, align 64
259 define void @store_i32_stride7_vf4(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecptr2, ptr %in.vecptr3, ptr %in.vecptr4, ptr %in.vecptr5, ptr %in.vecptr6, ptr %out.vec) nounwind {
260 ; SSE-LABEL: store_i32_stride7_vf4:
262 ; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax
263 ; SSE-NEXT: movq {{[0-9]+}}(%rsp), %r10
264 ; SSE-NEXT: movaps (%rdi), %xmm0
265 ; SSE-NEXT: movaps (%rsi), %xmm6
266 ; SSE-NEXT: movaps (%rdx), %xmm5
267 ; SSE-NEXT: movaps (%rcx), %xmm1
268 ; SSE-NEXT: movaps (%r8), %xmm4
269 ; SSE-NEXT: movaps (%r9), %xmm2
270 ; SSE-NEXT: movaps (%r10), %xmm8
271 ; SSE-NEXT: movaps %xmm5, %xmm7
272 ; SSE-NEXT: unpcklps {{.*#+}} xmm7 = xmm7[0],xmm1[0],xmm7[1],xmm1[1]
273 ; SSE-NEXT: movaps %xmm0, %xmm3
274 ; SSE-NEXT: unpcklps {{.*#+}} xmm3 = xmm3[0],xmm6[0],xmm3[1],xmm6[1]
275 ; SSE-NEXT: movlhps {{.*#+}} xmm3 = xmm3[0],xmm7[0]
276 ; SSE-NEXT: movaps %xmm4, %xmm9
277 ; SSE-NEXT: unpckhps {{.*#+}} xmm9 = xmm9[2],xmm2[2],xmm9[3],xmm2[3]
278 ; SSE-NEXT: movaps %xmm5, %xmm7
279 ; SSE-NEXT: unpckhps {{.*#+}} xmm7 = xmm7[2],xmm1[2],xmm7[3],xmm1[3]
280 ; SSE-NEXT: movlhps {{.*#+}} xmm7 = xmm7[0],xmm9[0]
281 ; SSE-NEXT: movaps %xmm8, %xmm9
282 ; SSE-NEXT: shufps {{.*#+}} xmm9 = xmm9[3,3],xmm2[3,3]
283 ; SSE-NEXT: movaps %xmm4, %xmm10
284 ; SSE-NEXT: shufps {{.*#+}} xmm10 = xmm10[1,1],xmm1[1,1]
285 ; SSE-NEXT: unpckhps {{.*#+}} xmm1 = xmm1[2],xmm4[2],xmm1[3],xmm4[3]
286 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,3],xmm9[2,0]
287 ; SSE-NEXT: movaps %xmm0, %xmm9
288 ; SSE-NEXT: unpckhps {{.*#+}} xmm9 = xmm9[2],xmm6[2],xmm9[3],xmm6[3]
289 ; SSE-NEXT: unpcklps {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1]
290 ; SSE-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm8[0],xmm2[1],xmm8[1]
291 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[2,3],xmm9[0,1]
292 ; SSE-NEXT: movaps %xmm6, %xmm9
293 ; SSE-NEXT: unpcklps {{.*#+}} xmm9 = xmm9[0],xmm5[0],xmm9[1],xmm5[1]
294 ; SSE-NEXT: shufps {{.*#+}} xmm9 = xmm9[2,3],xmm10[2,0]
295 ; SSE-NEXT: pshufd {{.*#+}} xmm10 = xmm8[2,3,2,3]
296 ; SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[0,1],xmm0[1,3]
297 ; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,1],xmm8[0,2]
298 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[3,3],xmm6[3,3]
299 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,3],xmm5[2,0]
300 ; SSE-NEXT: movss {{.*#+}} xmm0 = xmm10[0],xmm0[1,2,3]
301 ; SSE-NEXT: movaps %xmm4, 16(%rax)
302 ; SSE-NEXT: movaps %xmm9, 32(%rax)
303 ; SSE-NEXT: movaps %xmm2, 48(%rax)
304 ; SSE-NEXT: movaps %xmm1, 96(%rax)
305 ; SSE-NEXT: movaps %xmm7, 64(%rax)
306 ; SSE-NEXT: movaps %xmm3, (%rax)
307 ; SSE-NEXT: movaps %xmm0, 80(%rax)
310 ; AVX1-ONLY-LABEL: store_i32_stride7_vf4:
311 ; AVX1-ONLY: # %bb.0:
312 ; AVX1-ONLY-NEXT: movq {{[0-9]+}}(%rsp), %rax
313 ; AVX1-ONLY-NEXT: movq {{[0-9]+}}(%rsp), %r10
314 ; AVX1-ONLY-NEXT: vmovaps (%rdi), %xmm5
315 ; AVX1-ONLY-NEXT: vmovaps (%rsi), %xmm6
316 ; AVX1-ONLY-NEXT: vmovaps (%rdx), %xmm3
317 ; AVX1-ONLY-NEXT: vmovaps (%rcx), %xmm4
318 ; AVX1-ONLY-NEXT: vmovaps (%r8), %xmm1
319 ; AVX1-ONLY-NEXT: vmovaps (%r9), %xmm2
320 ; AVX1-ONLY-NEXT: vmovaps (%r10), %xmm0
321 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm6, %ymm5, %ymm7
322 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm8
323 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm9
324 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm10 = ymm9[1,0],ymm8[1,0],ymm9[5,4],ymm8[5,4]
325 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm10 = ymm10[2,0],ymm8[2,1],ymm10[6,4],ymm8[6,5]
326 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm5, %ymm6, %ymm5
327 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} ymm6 = ymm7[1],ymm5[1],ymm7[3],ymm5[3]
328 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm6 = ymm5[1,1],ymm6[2,0],ymm5[5,5],ymm6[6,4]
329 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm11 = xmm3[1,1],xmm4[1,1]
330 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0],ymm11[1,2],ymm6[3,4,5,6,7]
331 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2],ymm10[3,4,5],ymm6[6,7]
332 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm10 = ymm5[3,3],ymm7[3,3],ymm5[7,7],ymm7[7,7]
333 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm11
334 ; AVX1-ONLY-NEXT: vunpckhps {{.*#+}} ymm11 = ymm3[2],ymm11[2],ymm3[3],ymm11[3],ymm3[6],ymm11[6],ymm3[7],ymm11[7]
335 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm10 = ymm11[0,1,2,3,4],ymm10[5,6],ymm11[7]
336 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm11 = xmm2[1],xmm1[1]
337 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm9 = ymm9[2,1],ymm11[2,0],ymm9[6,5],ymm11[6,4]
338 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm9 = ymm10[0,1],ymm9[2,3,4],ymm10[5,6,7]
339 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm10
340 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} ymm8 = ymm10[0],ymm8[0],ymm10[1],ymm8[1],ymm10[4],ymm8[4],ymm10[5],ymm8[5]
341 ; AVX1-ONLY-NEXT: vbroadcastss (%r10), %ymm10
342 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1,2,3,4,5],ymm10[6,7]
343 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} ymm5 = ymm7[0],ymm5[0],ymm7[1],ymm5[1],ymm7[4],ymm5[4],ymm7[5],ymm5[5]
344 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm3 = xmm4[0,0],xmm3[0,0]
345 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm3 = xmm3[0,1,2,0]
346 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm3 = ymm5[0,1],ymm3[2,3],ymm5[4,5,6,7]
347 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm8[4,5,6],ymm3[7]
348 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm1[3,3],xmm2[3,3]
349 ; AVX1-ONLY-NEXT: vbroadcastss 12(%rcx), %xmm2
350 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm2[0],xmm1[1,2,3]
351 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3]
352 ; AVX1-ONLY-NEXT: vmovaps %xmm0, 96(%rax)
353 ; AVX1-ONLY-NEXT: vmovaps %ymm3, (%rax)
354 ; AVX1-ONLY-NEXT: vmovaps %ymm9, 64(%rax)
355 ; AVX1-ONLY-NEXT: vmovaps %ymm6, 32(%rax)
356 ; AVX1-ONLY-NEXT: vzeroupper
357 ; AVX1-ONLY-NEXT: retq
359 ; AVX2-SLOW-LABEL: store_i32_stride7_vf4:
360 ; AVX2-SLOW: # %bb.0:
361 ; AVX2-SLOW-NEXT: movq {{[0-9]+}}(%rsp), %rax
362 ; AVX2-SLOW-NEXT: movq {{[0-9]+}}(%rsp), %r10
363 ; AVX2-SLOW-NEXT: vmovaps (%rdi), %xmm4
364 ; AVX2-SLOW-NEXT: vmovaps (%rdx), %xmm5
365 ; AVX2-SLOW-NEXT: vmovaps (%rcx), %xmm0
366 ; AVX2-SLOW-NEXT: vmovaps (%r8), %xmm2
367 ; AVX2-SLOW-NEXT: vmovaps (%r9), %xmm3
368 ; AVX2-SLOW-NEXT: vmovaps (%r10), %xmm1
369 ; AVX2-SLOW-NEXT: vinsertf128 $1, (%rsi), %ymm4, %ymm4
370 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm0, %ymm5, %ymm6
371 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm7
372 ; AVX2-SLOW-NEXT: vbroadcastf128 {{.*#+}} ymm8 = [2,6,0,3,2,6,0,3]
373 ; AVX2-SLOW-NEXT: # ymm8 = mem[0,1,0,1]
374 ; AVX2-SLOW-NEXT: vpermps %ymm6, %ymm8, %ymm8
375 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm9 = ymm4[3,3,3,3,7,7,7,7]
376 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm9 = ymm9[0,1,0,2]
377 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1,2,3,4],ymm9[5,6],ymm8[7]
378 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm9
379 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm9 = ymm9[2],ymm3[2],ymm9[3],ymm3[3],ymm9[6],ymm3[6],ymm9[7],ymm3[7]
380 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm9 = ymm9[0,1,0,1,4,5,4,5]
381 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1],ymm9[2,3,4],ymm8[5,6,7]
382 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm9
383 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm10 = ymm7[1,1,1,1,5,5,5,5]
384 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm9 = ymm10[0,1,2,3,4],ymm9[5],ymm10[6,7]
385 ; AVX2-SLOW-NEXT: vinsertps {{.*#+}} xmm5 = zero,xmm5[1],xmm0[1],zero
386 ; AVX2-SLOW-NEXT: vbroadcastf128 {{.*#+}} ymm10 = [5,0,2,6,5,0,2,6]
387 ; AVX2-SLOW-NEXT: # ymm10 = mem[0,1,0,1]
388 ; AVX2-SLOW-NEXT: vpermps %ymm4, %ymm10, %ymm10
389 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm5 = ymm10[0],ymm5[1,2],ymm10[3,4,5,6,7]
390 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2],ymm9[3,4,5],ymm5[6,7]
391 ; AVX2-SLOW-NEXT: vmovddup {{.*#+}} xmm9 = [0,4,0,4]
392 ; AVX2-SLOW-NEXT: # xmm9 = mem[0,0]
393 ; AVX2-SLOW-NEXT: vpermps %ymm6, %ymm9, %ymm6
394 ; AVX2-SLOW-NEXT: vbroadcastf128 {{.*#+}} ymm9 = [0,4,0,1,0,4,0,1]
395 ; AVX2-SLOW-NEXT: # ymm9 = mem[0,1,0,1]
396 ; AVX2-SLOW-NEXT: vpermps %ymm4, %ymm9, %ymm4
397 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm6[2,3],ymm4[4,5,6,7]
398 ; AVX2-SLOW-NEXT: vbroadcastsd {{.*#+}} ymm6 = [0,4,0,4,0,4,0,4]
399 ; AVX2-SLOW-NEXT: vpermps %ymm7, %ymm6, %ymm6
400 ; AVX2-SLOW-NEXT: vbroadcastss (%r10), %ymm7
401 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3,4,5],ymm7[6,7]
402 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm6[4,5,6],ymm4[7]
403 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm2 = xmm2[3,3],xmm3[3,3]
404 ; AVX2-SLOW-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[3],xmm2[1,2],zero
405 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3]
406 ; AVX2-SLOW-NEXT: vmovaps %xmm0, 96(%rax)
407 ; AVX2-SLOW-NEXT: vmovaps %ymm4, (%rax)
408 ; AVX2-SLOW-NEXT: vmovaps %ymm5, 32(%rax)
409 ; AVX2-SLOW-NEXT: vmovaps %ymm8, 64(%rax)
410 ; AVX2-SLOW-NEXT: vzeroupper
411 ; AVX2-SLOW-NEXT: retq
413 ; AVX2-FAST-LABEL: store_i32_stride7_vf4:
414 ; AVX2-FAST: # %bb.0:
415 ; AVX2-FAST-NEXT: movq {{[0-9]+}}(%rsp), %rax
416 ; AVX2-FAST-NEXT: movq {{[0-9]+}}(%rsp), %r10
417 ; AVX2-FAST-NEXT: vmovaps (%rdi), %xmm2
418 ; AVX2-FAST-NEXT: vmovaps (%rdx), %xmm3
419 ; AVX2-FAST-NEXT: vmovaps (%rcx), %xmm1
420 ; AVX2-FAST-NEXT: vmovaps (%r8), %xmm4
421 ; AVX2-FAST-NEXT: vmovaps (%r9), %xmm5
422 ; AVX2-FAST-NEXT: vmovaps (%r10), %xmm0
423 ; AVX2-FAST-NEXT: vinsertf128 $1, (%rsi), %ymm2, %ymm2
424 ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm1, %ymm3, %ymm3
425 ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm5, %ymm4, %ymm6
426 ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm7
427 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm8 = ymm6[1,1,1,1,5,5,5,5]
428 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm7 = ymm8[0,1,2,3,4],ymm7[5],ymm8[6,7]
429 ; AVX2-FAST-NEXT: vbroadcastsd {{.*#+}} ymm8 = [5,1,5,1,5,1,5,1]
430 ; AVX2-FAST-NEXT: vpermps %ymm3, %ymm8, %ymm8
431 ; AVX2-FAST-NEXT: vbroadcastf128 {{.*#+}} ymm9 = [5,0,2,6,5,0,2,6]
432 ; AVX2-FAST-NEXT: # ymm9 = mem[0,1,0,1]
433 ; AVX2-FAST-NEXT: vpermps %ymm2, %ymm9, %ymm9
434 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm8 = ymm9[0],ymm8[1,2],ymm9[3,4,5,6,7]
435 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm7 = ymm8[0,1,2],ymm7[3,4,5],ymm8[6,7]
436 ; AVX2-FAST-NEXT: vbroadcastsd {{.*#+}} ymm8 = [7,3,7,3,7,3,7,3]
437 ; AVX2-FAST-NEXT: vpermps %ymm2, %ymm8, %ymm8
438 ; AVX2-FAST-NEXT: vbroadcastf128 {{.*#+}} ymm9 = [2,6,0,3,2,6,0,3]
439 ; AVX2-FAST-NEXT: # ymm9 = mem[0,1,0,1]
440 ; AVX2-FAST-NEXT: vpermps %ymm3, %ymm9, %ymm9
441 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm8 = ymm9[0,1,2,3,4],ymm8[5,6],ymm9[7]
442 ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm0, %ymm4, %ymm4
443 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm4 = ymm4[2],ymm5[2],ymm4[3],ymm5[3],ymm4[6],ymm5[6],ymm4[7],ymm5[7]
444 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm4 = ymm4[0,1,0,1,4,5,4,5]
445 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm4 = ymm8[0,1],ymm4[2,3,4],ymm8[5,6,7]
446 ; AVX2-FAST-NEXT: vmovddup {{.*#+}} xmm5 = [0,4,0,4]
447 ; AVX2-FAST-NEXT: # xmm5 = mem[0,0]
448 ; AVX2-FAST-NEXT: vpermps %ymm3, %ymm5, %ymm3
449 ; AVX2-FAST-NEXT: vbroadcastf128 {{.*#+}} ymm5 = [0,4,0,1,0,4,0,1]
450 ; AVX2-FAST-NEXT: # ymm5 = mem[0,1,0,1]
451 ; AVX2-FAST-NEXT: vpermps %ymm2, %ymm5, %ymm2
452 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm3[2,3],ymm2[4,5,6,7]
453 ; AVX2-FAST-NEXT: vbroadcastsd {{.*#+}} ymm3 = [0,4,0,4,0,4,0,4]
454 ; AVX2-FAST-NEXT: vpermps %ymm6, %ymm3, %ymm3
455 ; AVX2-FAST-NEXT: vbroadcastss (%r10), %ymm5
456 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5],ymm5[6,7]
457 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6],ymm2[7]
458 ; AVX2-FAST-NEXT: vbroadcastsd {{.*#+}} ymm3 = [7,3,7,3,7,3,7,3]
459 ; AVX2-FAST-NEXT: vpermps %ymm6, %ymm3, %ymm3
460 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm1 = xmm1[3,3,3,3]
461 ; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0],xmm3[1,2,3]
462 ; AVX2-FAST-NEXT: vmovaps %ymm2, (%rax)
463 ; AVX2-FAST-NEXT: vmovaps %ymm4, 64(%rax)
464 ; AVX2-FAST-NEXT: vmovaps %ymm7, 32(%rax)
465 ; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3]
466 ; AVX2-FAST-NEXT: vmovaps %xmm0, 96(%rax)
467 ; AVX2-FAST-NEXT: vzeroupper
468 ; AVX2-FAST-NEXT: retq
470 ; AVX2-FAST-PERLANE-LABEL: store_i32_stride7_vf4:
471 ; AVX2-FAST-PERLANE: # %bb.0:
472 ; AVX2-FAST-PERLANE-NEXT: movq {{[0-9]+}}(%rsp), %rax
473 ; AVX2-FAST-PERLANE-NEXT: movq {{[0-9]+}}(%rsp), %r10
474 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rdi), %xmm4
475 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rdx), %xmm5
476 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rcx), %xmm0
477 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%r8), %xmm2
478 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%r9), %xmm3
479 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%r10), %xmm1
480 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, (%rsi), %ymm4, %ymm4
481 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm0, %ymm5, %ymm6
482 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm7
483 ; AVX2-FAST-PERLANE-NEXT: vbroadcastf128 {{.*#+}} ymm8 = [2,6,0,3,2,6,0,3]
484 ; AVX2-FAST-PERLANE-NEXT: # ymm8 = mem[0,1,0,1]
485 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm6, %ymm8, %ymm8
486 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm9 = ymm4[3,3,3,3,7,7,7,7]
487 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm9 = ymm9[0,1,0,2]
488 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1,2,3,4],ymm9[5,6],ymm8[7]
489 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm9
490 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm9 = ymm9[2],ymm3[2],ymm9[3],ymm3[3],ymm9[6],ymm3[6],ymm9[7],ymm3[7]
491 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm9 = ymm9[0,1,0,1,4,5,4,5]
492 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1],ymm9[2,3,4],ymm8[5,6,7]
493 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm9
494 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm10 = ymm7[1,1,1,1,5,5,5,5]
495 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm9 = ymm10[0,1,2,3,4],ymm9[5],ymm10[6,7]
496 ; AVX2-FAST-PERLANE-NEXT: vinsertps {{.*#+}} xmm5 = zero,xmm5[1],xmm0[1],zero
497 ; AVX2-FAST-PERLANE-NEXT: vbroadcastf128 {{.*#+}} ymm10 = [5,0,2,6,5,0,2,6]
498 ; AVX2-FAST-PERLANE-NEXT: # ymm10 = mem[0,1,0,1]
499 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm4, %ymm10, %ymm10
500 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm5 = ymm10[0],ymm5[1,2],ymm10[3,4,5,6,7]
501 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2],ymm9[3,4,5],ymm5[6,7]
502 ; AVX2-FAST-PERLANE-NEXT: vmovddup {{.*#+}} xmm9 = [0,4,0,4]
503 ; AVX2-FAST-PERLANE-NEXT: # xmm9 = mem[0,0]
504 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm6, %ymm9, %ymm6
505 ; AVX2-FAST-PERLANE-NEXT: vbroadcastf128 {{.*#+}} ymm9 = [0,4,0,1,0,4,0,1]
506 ; AVX2-FAST-PERLANE-NEXT: # ymm9 = mem[0,1,0,1]
507 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm4, %ymm9, %ymm4
508 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm6[2,3],ymm4[4,5,6,7]
509 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd {{.*#+}} ymm6 = [0,4,0,4,0,4,0,4]
510 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm7, %ymm6, %ymm6
511 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss (%r10), %ymm7
512 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3,4,5],ymm7[6,7]
513 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm6[4,5,6],ymm4[7]
514 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm2 = xmm2[3,3],xmm3[3,3]
515 ; AVX2-FAST-PERLANE-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[3],xmm2[1,2],zero
516 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3]
517 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm0, 96(%rax)
518 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm4, (%rax)
519 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm5, 32(%rax)
520 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm8, 64(%rax)
521 ; AVX2-FAST-PERLANE-NEXT: vzeroupper
522 ; AVX2-FAST-PERLANE-NEXT: retq
524 ; AVX512-LABEL: store_i32_stride7_vf4:
526 ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax
527 ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r10
528 ; AVX512-NEXT: vmovdqa (%rdi), %xmm0
529 ; AVX512-NEXT: vmovdqa (%rdx), %xmm1
530 ; AVX512-NEXT: vmovdqa (%r8), %xmm2
531 ; AVX512-NEXT: vinserti128 $1, (%rcx), %ymm1, %ymm1
532 ; AVX512-NEXT: vinserti128 $1, (%rsi), %ymm0, %ymm0
533 ; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
534 ; AVX512-NEXT: vinserti128 $1, (%r9), %ymm2, %ymm1
535 ; AVX512-NEXT: vinserti32x4 $2, (%r10), %zmm1, %zmm1
536 ; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,4,8,12,16,20,24,1,5,9,13,17,21,25,2,6]
537 ; AVX512-NEXT: vpermi2d %zmm1, %zmm0, %zmm2
538 ; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm3 = <10,14,18,22,26,3,7,11,15,19,23,27,u,u,u,u>
539 ; AVX512-NEXT: vpermi2d %zmm1, %zmm0, %zmm3
540 ; AVX512-NEXT: vextracti32x4 $2, %zmm3, 96(%rax)
541 ; AVX512-NEXT: vmovdqa64 %zmm2, (%rax)
542 ; AVX512-NEXT: vmovdqa %ymm3, 64(%rax)
543 ; AVX512-NEXT: vzeroupper
545 %in.vec0 = load <4 x i32>, ptr %in.vecptr0, align 64
546 %in.vec1 = load <4 x i32>, ptr %in.vecptr1, align 64
547 %in.vec2 = load <4 x i32>, ptr %in.vecptr2, align 64
548 %in.vec3 = load <4 x i32>, ptr %in.vecptr3, align 64
549 %in.vec4 = load <4 x i32>, ptr %in.vecptr4, align 64
550 %in.vec5 = load <4 x i32>, ptr %in.vecptr5, align 64
551 %in.vec6 = load <4 x i32>, ptr %in.vecptr6, align 64
552 %1 = shufflevector <4 x i32> %in.vec0, <4 x i32> %in.vec1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
553 %2 = shufflevector <4 x i32> %in.vec2, <4 x i32> %in.vec3, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
554 %3 = shufflevector <4 x i32> %in.vec4, <4 x i32> %in.vec5, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
555 %4 = shufflevector <8 x i32> %1, <8 x i32> %2, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
556 %5 = shufflevector <4 x i32> %in.vec6, <4 x i32> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
557 %6 = shufflevector <8 x i32> %3, <8 x i32> %5, <12 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11>
558 %7 = shufflevector <12 x i32> %6, <12 x i32> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 undef, i32 undef, i32 undef, i32 undef>
559 %8 = shufflevector <16 x i32> %4, <16 x i32> %7, <28 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27>
560 %interleaved.vec = shufflevector <28 x i32> %8, <28 x i32> poison, <28 x i32> <i32 0, i32 4, i32 8, i32 12, i32 16, i32 20, i32 24, i32 1, i32 5, i32 9, i32 13, i32 17, i32 21, i32 25, i32 2, i32 6, i32 10, i32 14, i32 18, i32 22, i32 26, i32 3, i32 7, i32 11, i32 15, i32 19, i32 23, i32 27>
561 store <28 x i32> %interleaved.vec, ptr %out.vec, align 64
565 define void @store_i32_stride7_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecptr2, ptr %in.vecptr3, ptr %in.vecptr4, ptr %in.vecptr5, ptr %in.vecptr6, ptr %out.vec) nounwind {
566 ; SSE-LABEL: store_i32_stride7_vf8:
568 ; SSE-NEXT: subq $24, %rsp
569 ; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax
570 ; SSE-NEXT: movdqa (%rdi), %xmm3
571 ; SSE-NEXT: movdqa 16(%rdi), %xmm9
572 ; SSE-NEXT: movdqa (%rsi), %xmm1
573 ; SSE-NEXT: movdqa 16(%rsi), %xmm5
574 ; SSE-NEXT: movdqa 16(%rdx), %xmm6
575 ; SSE-NEXT: movdqa 16(%rcx), %xmm12
576 ; SSE-NEXT: movdqa 16(%r8), %xmm11
577 ; SSE-NEXT: movdqa (%r9), %xmm8
578 ; SSE-NEXT: movaps 16(%r9), %xmm0
579 ; SSE-NEXT: movdqa (%rax), %xmm10
580 ; SSE-NEXT: movaps 16(%rax), %xmm7
581 ; SSE-NEXT: movaps %xmm7, %xmm2
582 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,3],xmm0[3,3]
583 ; SSE-NEXT: movaps %xmm0, %xmm13
584 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm11[3,3,3,3]
585 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm12[3,3,3,3]
586 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
587 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,0]
588 ; SSE-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
589 ; SSE-NEXT: movdqa %xmm11, %xmm2
590 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1],xmm12[1,1]
591 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm6[1,1,1,1]
592 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm5[1,1,1,1]
593 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
594 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,0]
595 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
596 ; SSE-NEXT: movdqa %xmm9, %xmm2
597 ; SSE-NEXT: punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm5[2],xmm2[3],xmm5[3]
598 ; SSE-NEXT: movdqa %xmm5, %xmm14
599 ; SSE-NEXT: movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
600 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm7[1,1,1,1]
601 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm13[1,1,1,1]
602 ; SSE-NEXT: movdqa %xmm13, %xmm15
603 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
604 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
605 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
606 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm10[1,1,1,1]
607 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm8[1,1,1,1]
608 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
609 ; SSE-NEXT: movdqa %xmm3, %xmm2
610 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
611 ; SSE-NEXT: punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm1[2],xmm2[3],xmm1[3]
612 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
613 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
614 ; SSE-NEXT: movaps (%rdx), %xmm2
615 ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm1[1,1,1,1]
616 ; SSE-NEXT: movaps %xmm2, %xmm13
617 ; SSE-NEXT: movss {{.*#+}} xmm13 = xmm5[0],xmm13[1,2,3]
618 ; SSE-NEXT: movaps (%rcx), %xmm0
619 ; SSE-NEXT: movaps (%r8), %xmm4
620 ; SSE-NEXT: movaps %xmm4, %xmm5
621 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[1,1],xmm0[1,1]
622 ; SSE-NEXT: shufps {{.*#+}} xmm13 = xmm13[0,1],xmm5[2,0]
623 ; SSE-NEXT: movdqa %xmm6, %xmm5
624 ; SSE-NEXT: punpckhdq {{.*#+}} xmm5 = xmm5[2],xmm12[2],xmm5[3],xmm12[3]
625 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm12 = xmm12[0],xmm6[0]
626 ; SSE-NEXT: movdqa %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
627 ; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[3,3],xmm14[3,3]
628 ; SSE-NEXT: movdqa %xmm9, %xmm14
629 ; SSE-NEXT: shufps {{.*#+}} xmm14 = xmm14[1,1],xmm7[0,3]
630 ; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[2,1],xmm9[3,3]
631 ; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,2],xmm6[2,0]
632 ; SSE-NEXT: movdqa %xmm11, %xmm6
633 ; SSE-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm15[0],xmm6[1],xmm15[1]
634 ; SSE-NEXT: movdqa %xmm15, %xmm12
635 ; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,1],xmm14[2,0]
636 ; SSE-NEXT: movaps %xmm4, %xmm15
637 ; SSE-NEXT: movdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
638 ; SSE-NEXT: unpckhps {{.*#+}} xmm15 = xmm15[2],xmm8[2],xmm15[3],xmm8[3]
639 ; SSE-NEXT: movaps %xmm2, %xmm14
640 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
641 ; SSE-NEXT: unpckhps {{.*#+}} xmm14 = xmm14[2],xmm0[2],xmm14[3],xmm0[3]
642 ; SSE-NEXT: movlhps {{.*#+}} xmm14 = xmm14[0],xmm15[0]
643 ; SSE-NEXT: movdqa %xmm10, %xmm15
644 ; SSE-NEXT: movdqa %xmm3, %xmm1
645 ; SSE-NEXT: shufps {{.*#+}} xmm15 = xmm15[0,1],xmm3[1,3]
646 ; SSE-NEXT: movaps %xmm4, %xmm3
647 ; SSE-NEXT: unpcklps {{.*#+}} xmm3 = xmm3[0],xmm8[0],xmm3[1],xmm8[1]
648 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,1],xmm15[0,2]
649 ; SSE-NEXT: movaps %xmm2, %xmm15
650 ; SSE-NEXT: unpcklps {{.*#+}} xmm15 = xmm15[0],xmm0[0],xmm15[1],xmm0[1]
651 ; SSE-NEXT: movdqa %xmm1, %xmm8
652 ; SSE-NEXT: movdqa %xmm1, %xmm0
653 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
654 ; SSE-NEXT: punpckldq {{.*#+}} xmm8 = xmm8[0],xmm1[0],xmm8[1],xmm1[1]
655 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm8 = xmm8[0],xmm15[0]
656 ; SSE-NEXT: punpckhdq {{.*#+}} xmm11 = xmm11[2],xmm12[2],xmm11[3],xmm12[3]
657 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm5 = xmm5[0],xmm11[0]
658 ; SSE-NEXT: unpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Folded Reload
659 ; SSE-NEXT: # xmm9 = xmm9[0],mem[0],xmm9[1],mem[1]
660 ; SSE-NEXT: shufps $36, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Folded Reload
661 ; SSE-NEXT: # xmm9 = xmm9[0,1],mem[2,0]
662 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,3],xmm1[3,3]
663 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,3],xmm2[2,0]
664 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm10[2,3,2,3]
665 ; SSE-NEXT: movss {{.*#+}} xmm0 = xmm2[0],xmm0[1,2,3]
666 ; SSE-NEXT: movaps %xmm0, %xmm1
667 ; SSE-NEXT: shufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Folded Reload
668 ; SSE-NEXT: # xmm10 = xmm10[3,3],mem[3,3]
669 ; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,3],xmm10[2,0]
670 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
671 ; SSE-NEXT: # xmm2 = mem[3,3,3,3]
672 ; SSE-NEXT: movss {{.*#+}} xmm4 = xmm2[0],xmm4[1,2,3]
673 ; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax
674 ; SSE-NEXT: movaps %xmm9, 112(%rax)
675 ; SSE-NEXT: movdqa %xmm5, 176(%rax)
676 ; SSE-NEXT: movdqa %xmm8, (%rax)
677 ; SSE-NEXT: movaps %xmm3, 16(%rax)
678 ; SSE-NEXT: movaps %xmm14, 64(%rax)
679 ; SSE-NEXT: movaps %xmm6, 128(%rax)
680 ; SSE-NEXT: movaps %xmm7, 192(%rax)
681 ; SSE-NEXT: movaps %xmm13, 32(%rax)
682 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
683 ; SSE-NEXT: movaps %xmm0, 48(%rax)
684 ; SSE-NEXT: movaps %xmm4, 96(%rax)
685 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
686 ; SSE-NEXT: movaps %xmm0, 160(%rax)
687 ; SSE-NEXT: movaps %xmm1, 80(%rax)
688 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
689 ; SSE-NEXT: movaps %xmm0, 144(%rax)
690 ; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
691 ; SSE-NEXT: movaps %xmm0, 208(%rax)
692 ; SSE-NEXT: addq $24, %rsp
695 ; AVX1-ONLY-LABEL: store_i32_stride7_vf8:
696 ; AVX1-ONLY: # %bb.0:
697 ; AVX1-ONLY-NEXT: movq {{[0-9]+}}(%rsp), %rax
698 ; AVX1-ONLY-NEXT: vmovaps (%rdi), %ymm0
699 ; AVX1-ONLY-NEXT: vmovaps (%rsi), %ymm1
700 ; AVX1-ONLY-NEXT: vmovaps (%rdx), %ymm2
701 ; AVX1-ONLY-NEXT: vmovaps (%rcx), %ymm3
702 ; AVX1-ONLY-NEXT: vmovaps (%r8), %ymm7
703 ; AVX1-ONLY-NEXT: vmovaps (%r9), %ymm8
704 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm4 = ymm2[1,1],ymm3[1,1],ymm2[5,5],ymm3[5,5]
705 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm5 = ymm1[1,1],ymm0[1,1],ymm1[5,5],ymm0[5,5]
706 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm5 = ymm5[2,3,2,3]
707 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0,1,2,3,4],ymm4[5,6],ymm5[7]
708 ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm5 = ymm8[0],ymm7[0],ymm8[2],ymm7[2]
709 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm5 = ymm5[2,0],ymm7[2,1],ymm5[6,4],ymm7[6,5]
710 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm6 = mem[2,3],ymm5[2,3]
711 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm5, %xmm5
712 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm5 = ymm5[0],ymm6[0],ymm5[2],ymm6[3]
713 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0,1,2],ymm4[3,4,5,6],ymm5[7]
714 ; AVX1-ONLY-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
715 ; AVX1-ONLY-NEXT: vmovaps (%r8), %xmm5
716 ; AVX1-ONLY-NEXT: vmovaps (%r9), %xmm6
717 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm4 = xmm6[1,1],xmm5[1,1]
718 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm4, %ymm4, %ymm4
719 ; AVX1-ONLY-NEXT: vmovaps (%rsi), %xmm11
720 ; AVX1-ONLY-NEXT: vmovaps (%rdi), %xmm12
721 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm9 = xmm12[1],xmm11[1]
722 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm9 = xmm11[1,1],xmm9[0,2]
723 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm9, %ymm9, %ymm9
724 ; AVX1-ONLY-NEXT: vmovaps (%rcx), %xmm15
725 ; AVX1-ONLY-NEXT: vmovaps (%rdx), %xmm14
726 ; AVX1-ONLY-NEXT: vinsertps {{.*#+}} xmm10 = zero,xmm14[1],xmm15[1],zero
727 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm9 = ymm9[0],ymm10[1,2],ymm9[3,4,5,6,7]
728 ; AVX1-ONLY-NEXT: vmovaps (%rax), %xmm10
729 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm10, %ymm0, %ymm13
730 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3,4],ymm13[5],ymm4[6,7]
731 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm4 = ymm9[0,1,2],ymm4[3,4,5],ymm9[6,7]
732 ; AVX1-ONLY-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
733 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} xmm4 = xmm5[0],xmm6[0],xmm5[1],xmm6[1]
734 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm4
735 ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm4 = ymm4[0],ymm13[0],ymm4[2],ymm13[2]
736 ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm13 = xmm11[0],xmm12[0]
737 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm13 = xmm13[2,0],xmm12[2,1]
738 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm13, %ymm13, %ymm13
739 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} xmm9 = xmm14[0],xmm15[0],xmm14[1],xmm15[1]
740 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm9 = xmm9[0,1,0,1]
741 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm9 = ymm13[0,1],ymm9[2,3],ymm13[4,5,6,7]
742 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm4 = ymm9[0,1,2,3],ymm4[4,5,6],ymm9[7]
743 ; AVX1-ONLY-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
744 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} ymm4 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
745 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm4 = ymm4[0,1,2,0,4,5,6,4]
746 ; AVX1-ONLY-NEXT: vunpckhps {{.*#+}} ymm9 = ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[6],ymm3[6],ymm2[7],ymm3[7]
747 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm4[2,3],ymm9[2,3]
748 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} ymm9 = ymm7[1],ymm8[1],ymm7[3],ymm8[3]
749 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm9 = ymm8[1,1],ymm9[0,2],ymm8[5,5],ymm9[4,6]
750 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm9 = ymm9[2,3,2,3]
751 ; AVX1-ONLY-NEXT: vmovaps 16(%rax), %xmm13
752 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm9 = ymm9[0],ymm13[1],ymm9[2,3,4,5,6,7]
753 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm4 = ymm9[0,1],ymm4[2,3,4,5],ymm9[6,7]
754 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm9 = ymm3[3,3],ymm2[3,3],ymm3[7,7],ymm2[7,7]
755 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm9 = ymm9[2,3,2,3]
756 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm13 = ymm0[3,3],ymm1[3,3],ymm0[7,7],ymm1[7,7]
757 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm13, %xmm13
758 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm9 = ymm13[0,1,2],ymm9[3,4,5,6,7]
759 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm7 = ymm7[3,3],ymm8[3,3],ymm7[7,7],ymm8[7,7]
760 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm7 = ymm9[0,1,2,3,4],ymm7[5,6],ymm9[7]
761 ; AVX1-ONLY-NEXT: vbroadcastsd 24(%rax), %ymm8
762 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm7 = ymm8[0],ymm7[1,2,3,4,5,6],ymm8[7]
763 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm8 = xmm12[3,3],xmm11[3,3]
764 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm8, %ymm0, %ymm8
765 ; AVX1-ONLY-NEXT: vunpckhps {{.*#+}} xmm9 = xmm14[2],xmm15[2],xmm14[3],xmm15[3]
766 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm14, %ymm9, %ymm9
767 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm8 = ymm9[0,1,2,3,4],ymm8[5,6],ymm9[7]
768 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm9 = xmm6[2,2,2,2]
769 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm9 = xmm5[0,1,2],xmm9[3]
770 ; AVX1-ONLY-NEXT: vbroadcastsd 8(%rax), %ymm11
771 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm9 = ymm9[0,1,2,3],ymm11[4,5,6,7]
772 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1],ymm9[2,3,4],ymm8[5,6,7]
773 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
774 ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm2[0],ymm3[0],ymm2[2],ymm3[2]
775 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm3[3,1],ymm1[0,2],ymm3[7,5],ymm1[4,6]
776 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5],ymm1[6,7]
777 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm5[3,3],xmm6[3,3]
778 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1,2],xmm10[3]
779 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4,5,6,7]
780 ; AVX1-ONLY-NEXT: movq {{[0-9]+}}(%rsp), %rax
781 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 96(%rax)
782 ; AVX1-ONLY-NEXT: vmovaps %ymm8, 64(%rax)
783 ; AVX1-ONLY-NEXT: vmovaps %ymm4, 160(%rax)
784 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
785 ; AVX1-ONLY-NEXT: vmovaps %ymm0, (%rax)
786 ; AVX1-ONLY-NEXT: vmovaps %ymm7, 192(%rax)
787 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
788 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 32(%rax)
789 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
790 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 128(%rax)
791 ; AVX1-ONLY-NEXT: vzeroupper
792 ; AVX1-ONLY-NEXT: retq
794 ; AVX2-SLOW-LABEL: store_i32_stride7_vf8:
795 ; AVX2-SLOW: # %bb.0:
796 ; AVX2-SLOW-NEXT: movq {{[0-9]+}}(%rsp), %rax
797 ; AVX2-SLOW-NEXT: vmovaps (%rdi), %ymm0
798 ; AVX2-SLOW-NEXT: vmovaps (%rsi), %ymm2
799 ; AVX2-SLOW-NEXT: vmovaps (%rdx), %ymm9
800 ; AVX2-SLOW-NEXT: vmovaps (%rcx), %ymm3
801 ; AVX2-SLOW-NEXT: vmovaps (%r8), %ymm6
802 ; AVX2-SLOW-NEXT: vmovaps (%r9), %ymm7
803 ; AVX2-SLOW-NEXT: vmovaps (%rax), %xmm1
804 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm8
805 ; AVX2-SLOW-NEXT: vmovaps (%r8), %xmm4
806 ; AVX2-SLOW-NEXT: vmovaps (%r9), %xmm5
807 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm10 = xmm5[1,1,1,1]
808 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm10 = xmm10[0],xmm4[1],xmm10[2,3]
809 ; AVX2-SLOW-NEXT: vbroadcastsd %xmm10, %ymm10
810 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm8 = ymm10[0,1,2,3,4],ymm8[5],ymm10[6,7]
811 ; AVX2-SLOW-NEXT: vmovaps (%rcx), %xmm12
812 ; AVX2-SLOW-NEXT: vmovaps (%rdx), %xmm13
813 ; AVX2-SLOW-NEXT: vinsertps {{.*#+}} xmm10 = zero,xmm13[1],xmm12[1],zero
814 ; AVX2-SLOW-NEXT: vmovaps (%rdi), %xmm14
815 ; AVX2-SLOW-NEXT: vmovaps (%rsi), %xmm15
816 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm11 = xmm15[1,1,2,2]
817 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm11 = xmm11[0,1],xmm14[2],xmm11[3]
818 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm11 = ymm11[0,1,2,1]
819 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm10 = ymm11[0],ymm10[1,2],ymm11[3,4,5,6,7]
820 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm8 = ymm10[0,1,2],ymm8[3,4,5],ymm10[6,7]
821 ; AVX2-SLOW-NEXT: vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
822 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm10 = ymm9[2],ymm3[2],ymm9[3],ymm3[3],ymm9[6],ymm3[6],ymm9[7],ymm3[7]
823 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm11 = ymm0[2],ymm2[2],ymm0[3],ymm2[3],ymm0[6],ymm2[6],ymm0[7],ymm2[7]
824 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm11 = ymm11[2,2,2,2]
825 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm10 = ymm11[0,1,2,3],ymm10[4,5,6,7]
826 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm11 = ymm7[1,1,2,2,5,5,6,6]
827 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm11 = ymm11[0,1],ymm6[2],ymm11[3,4,5],ymm6[6],ymm11[7]
828 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm11 = ymm11[2,1,2,3]
829 ; AVX2-SLOW-NEXT: vmovaps 16(%rax), %xmm8
830 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm8 = ymm11[0],ymm8[1],ymm11[2,3,4,5,6,7]
831 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1],ymm10[2,3,4,5],ymm8[6,7]
832 ; AVX2-SLOW-NEXT: vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
833 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm8 = xmm14[3,3],xmm15[3,3]
834 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm8, %ymm0, %ymm8
835 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} xmm11 = xmm13[2],xmm12[2],xmm13[3],xmm12[3]
836 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm11 = xmm11[0,1,2,2]
837 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm11 = ymm11[0,1,2,1]
838 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm8 = ymm11[0,1,2,3,4],ymm8[5,6],ymm11[7]
839 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm11 = xmm5[2,2,2,2]
840 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm11 = xmm4[0,1,2],xmm11[3]
841 ; AVX2-SLOW-NEXT: vbroadcastsd 8(%rax), %ymm10
842 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm10 = ymm11[0,1,2,3],ymm10[4,5,6,7]
843 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm11 = ymm8[0,1],ymm10[2,3,4],ymm8[5,6,7]
844 ; AVX2-SLOW-NEXT: vbroadcastss %xmm12, %xmm8
845 ; AVX2-SLOW-NEXT: vbroadcastss %xmm13, %xmm10
846 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} xmm8 = xmm10[0],xmm8[0],xmm10[1],xmm8[1]
847 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} xmm10 = xmm14[0],xmm15[0],xmm14[1],xmm15[1]
848 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm10 = xmm10[0,1,2,2]
849 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm10 = ymm10[0,1,2,1]
850 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm8 = ymm10[0,1],ymm8[2,3],ymm10[4,5,6,7]
851 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} xmm10 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
852 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm10, %ymm0, %ymm10
853 ; AVX2-SLOW-NEXT: vbroadcastsd %xmm1, %ymm12
854 ; AVX2-SLOW-NEXT: vunpcklpd {{.*#+}} ymm10 = ymm10[0],ymm12[0],ymm10[2],ymm12[2]
855 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm12 = ymm8[0,1,2,3],ymm10[4,5,6],ymm8[7]
856 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm8 = ymm9[1,1],ymm3[1,1],ymm9[5,5],ymm3[5,5]
857 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm10 = ymm2[1,1,1,1,5,5,5,5]
858 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0],ymm0[1],ymm10[2,3,4],ymm0[5],ymm10[6,7]
859 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm10 = ymm10[2,2,2,2]
860 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm8 = ymm10[0,1,2,3,4],ymm8[5,6],ymm10[7]
861 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm10 = ymm7[0,0,0,0,4,4,4,4]
862 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm13 = ymm6[0,1,0,1,4,5,4,5]
863 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm10 = ymm13[0],ymm10[1],ymm13[2,3,4],ymm10[5],ymm13[6,7]
864 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm10 = ymm10[2,1,2,3]
865 ; AVX2-SLOW-NEXT: vbroadcastsd 16(%rax), %ymm13
866 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0,1],ymm13[2,3],ymm10[4,5,6,7]
867 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm8 = ymm10[0,1,2],ymm8[3,4,5,6],ymm10[7]
868 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm9 = ymm3[2],ymm9[2],ymm3[3],ymm9[3],ymm3[6],ymm9[6],ymm3[7],ymm9[7]
869 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm9 = ymm9[3,3,3,3]
870 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm10 = ymm2[2],ymm0[2],ymm2[3],ymm0[3],ymm2[6],ymm0[6],ymm2[7],ymm0[7]
871 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm10 = ymm10[3,3,3,3]
872 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm9 = ymm10[0,1,2],ymm9[3,4,5,6,7]
873 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm6 = ymm6[3,3],ymm7[3,3],ymm6[7,7],ymm7[7,7]
874 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm6 = ymm9[0,1,2,3,4],ymm6[5,6],ymm9[7]
875 ; AVX2-SLOW-NEXT: vbroadcastsd 24(%rax), %ymm7
876 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm6 = ymm7[0],ymm6[1,2,3,4,5,6],ymm7[7]
877 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} ymm0 = ymm0[0],ymm2[0],ymm0[1],ymm2[1],ymm0[4],ymm2[4],ymm0[5],ymm2[5]
878 ; AVX2-SLOW-NEXT: vbroadcastss 16(%rdx), %ymm2
879 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm3 = ymm3[3,1,2,0,7,5,6,4]
880 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1,2,3,4,5],ymm2[6],ymm3[7]
881 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5],ymm2[6,7]
882 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm2 = xmm4[3,3],xmm5[3,3]
883 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm1 = xmm2[0,1,2],xmm1[3]
884 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4,5,6,7]
885 ; AVX2-SLOW-NEXT: movq {{[0-9]+}}(%rsp), %rax
886 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 96(%rax)
887 ; AVX2-SLOW-NEXT: vmovaps %ymm6, 192(%rax)
888 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
889 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 160(%rax)
890 ; AVX2-SLOW-NEXT: vmovaps %ymm8, 128(%rax)
891 ; AVX2-SLOW-NEXT: vmovaps %ymm12, (%rax)
892 ; AVX2-SLOW-NEXT: vmovaps %ymm11, 64(%rax)
893 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
894 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 32(%rax)
895 ; AVX2-SLOW-NEXT: vzeroupper
896 ; AVX2-SLOW-NEXT: retq
898 ; AVX2-FAST-LABEL: store_i32_stride7_vf8:
899 ; AVX2-FAST: # %bb.0:
900 ; AVX2-FAST-NEXT: movq {{[0-9]+}}(%rsp), %rax
901 ; AVX2-FAST-NEXT: vmovaps (%rdi), %ymm1
902 ; AVX2-FAST-NEXT: vmovaps (%rsi), %ymm2
903 ; AVX2-FAST-NEXT: vmovaps (%rdx), %ymm10
904 ; AVX2-FAST-NEXT: vmovaps (%rcx), %ymm3
905 ; AVX2-FAST-NEXT: vmovaps (%r8), %ymm7
906 ; AVX2-FAST-NEXT: vmovaps (%r9), %ymm8
907 ; AVX2-FAST-NEXT: vmovaps (%rax), %xmm0
908 ; AVX2-FAST-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
909 ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm6
910 ; AVX2-FAST-NEXT: vmovaps (%r8), %xmm4
911 ; AVX2-FAST-NEXT: vmovaps (%r9), %xmm5
912 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm9 = xmm5[1,1,1,1]
913 ; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm9 = xmm9[0],xmm4[1],xmm9[2,3]
914 ; AVX2-FAST-NEXT: vbroadcastsd %xmm9, %ymm9
915 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm6 = ymm9[0,1,2,3,4],ymm6[5],ymm9[6,7]
916 ; AVX2-FAST-NEXT: vmovaps (%rcx), %xmm11
917 ; AVX2-FAST-NEXT: vmovaps (%rdx), %xmm12
918 ; AVX2-FAST-NEXT: vinsertps {{.*#+}} xmm9 = zero,xmm12[1],xmm11[1],zero
919 ; AVX2-FAST-NEXT: vmovaps (%rdi), %xmm13
920 ; AVX2-FAST-NEXT: vmovaps (%rsi), %xmm14
921 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm15 = xmm14[1,1,2,2]
922 ; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm15 = xmm15[0,1],xmm13[2],xmm15[3]
923 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm15 = ymm15[0,1,2,1]
924 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm9 = ymm15[0],ymm9[1,2],ymm15[3,4,5,6,7]
925 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm9[0,1,2],ymm6[3,4,5],ymm9[6,7]
926 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
927 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm6 = ymm10[2],ymm3[2],ymm10[3],ymm3[3],ymm10[6],ymm3[6],ymm10[7],ymm3[7]
928 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm9 = ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[6],ymm2[6],ymm1[7],ymm2[7]
929 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm9 = ymm9[2,2,2,2]
930 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm6 = ymm9[0,1,2,3],ymm6[4,5,6,7]
931 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm9 = ymm8[1,1,2,2,5,5,6,6]
932 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm9 = ymm9[0,1],ymm7[2],ymm9[3,4,5],ymm7[6],ymm9[7]
933 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm9 = ymm9[2,1,2,3]
934 ; AVX2-FAST-NEXT: vmovaps 16(%rax), %xmm15
935 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm9 = ymm9[0],ymm15[1],ymm9[2,3,4,5,6,7]
936 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm9[0,1],ymm6[2,3,4,5],ymm9[6,7]
937 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
938 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm6 = ymm2[1,1,1,1,5,5,5,5]
939 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0],ymm1[1],ymm6[2,3,4],ymm1[5],ymm6[6,7]
940 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[2,2,2,2]
941 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm15 = ymm10[1,1],ymm3[1,1],ymm10[5,5],ymm3[5,5]
942 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3,4],ymm15[5,6],ymm6[7]
943 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm15 = ymm8[0,0,0,0,4,4,4,4]
944 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm9 = ymm7[0,1,0,1,4,5,4,5]
945 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm9 = ymm9[0],ymm15[1],ymm9[2,3,4],ymm15[5],ymm9[6,7]
946 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm9 = ymm9[2,1,2,3]
947 ; AVX2-FAST-NEXT: vbroadcastsd 16(%rax), %ymm15
948 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm9 = ymm9[0,1],ymm15[2,3],ymm9[4,5,6,7]
949 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm9[0,1,2],ymm6[3,4,5,6],ymm9[7]
950 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
951 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm6 = xmm13[3,3],xmm14[3,3]
952 ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm6, %ymm0, %ymm6
953 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} xmm9 = xmm12[2],xmm11[2],xmm12[3],xmm11[3]
954 ; AVX2-FAST-NEXT: vbroadcastf128 {{.*#+}} ymm0 = [0,1,2,2,0,1,2,2]
955 ; AVX2-FAST-NEXT: # ymm0 = mem[0,1,0,1]
956 ; AVX2-FAST-NEXT: vpermps %ymm9, %ymm0, %ymm9
957 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm6 = ymm9[0,1,2,3,4],ymm6[5,6],ymm9[7]
958 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm9 = xmm5[2,2,2,2]
959 ; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm9 = xmm4[0,1,2],xmm9[3]
960 ; AVX2-FAST-NEXT: vbroadcastsd 8(%rax), %ymm15
961 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm9 = ymm9[0,1,2,3],ymm15[4,5,6,7]
962 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1],ymm9[2,3,4],ymm6[5,6,7]
963 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm9 = ymm3[2],ymm10[2],ymm3[3],ymm10[3],ymm3[6],ymm10[6],ymm3[7],ymm10[7]
964 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm9 = ymm9[3,3,3,3]
965 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm10 = ymm2[2],ymm1[2],ymm2[3],ymm1[3],ymm2[6],ymm1[6],ymm2[7],ymm1[7]
966 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm10 = ymm10[3,3,3,3]
967 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm9 = ymm10[0,1,2],ymm9[3,4,5,6,7]
968 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm7 = ymm7[3,3],ymm8[3,3],ymm7[7,7],ymm8[7,7]
969 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm7 = ymm9[0,1,2,3,4],ymm7[5,6],ymm9[7]
970 ; AVX2-FAST-NEXT: vbroadcastsd 24(%rax), %ymm8
971 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm7 = ymm8[0],ymm7[1,2,3,4,5,6],ymm8[7]
972 ; AVX2-FAST-NEXT: vbroadcastss %xmm11, %xmm8
973 ; AVX2-FAST-NEXT: vbroadcastss %xmm12, %xmm9
974 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} xmm8 = xmm9[0],xmm8[0],xmm9[1],xmm8[1]
975 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} xmm9 = xmm13[0],xmm14[0],xmm13[1],xmm14[1]
976 ; AVX2-FAST-NEXT: vpermps %ymm9, %ymm0, %ymm0
977 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm8[2,3],ymm0[4,5,6,7]
978 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} xmm8 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
979 ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm8, %ymm0, %ymm8
980 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
981 ; AVX2-FAST-NEXT: vbroadcastsd %xmm10, %ymm9
982 ; AVX2-FAST-NEXT: vunpcklpd {{.*#+}} ymm8 = ymm8[0],ymm9[0],ymm8[2],ymm9[2]
983 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm8[4,5,6],ymm0[7]
984 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[1],ymm2[1],ymm1[4],ymm2[4],ymm1[5],ymm2[5]
985 ; AVX2-FAST-NEXT: vbroadcastss 16(%rdx), %ymm2
986 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm3 = ymm3[3,1,2,0,7,5,6,4]
987 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1,2,3,4,5],ymm2[6],ymm3[7]
988 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5],ymm2[6,7]
989 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm2 = xmm4[3,3],xmm5[3,3]
990 ; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm2 = xmm2[0,1,2],xmm10[3]
991 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2,3],ymm1[4,5,6,7]
992 ; AVX2-FAST-NEXT: movq {{[0-9]+}}(%rsp), %rax
993 ; AVX2-FAST-NEXT: vmovaps %ymm1, 96(%rax)
994 ; AVX2-FAST-NEXT: vmovaps %ymm0, (%rax)
995 ; AVX2-FAST-NEXT: vmovaps %ymm7, 192(%rax)
996 ; AVX2-FAST-NEXT: vmovaps %ymm6, 64(%rax)
997 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
998 ; AVX2-FAST-NEXT: vmovaps %ymm0, 160(%rax)
999 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
1000 ; AVX2-FAST-NEXT: vmovaps %ymm0, 128(%rax)
1001 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
1002 ; AVX2-FAST-NEXT: vmovaps %ymm0, 32(%rax)
1003 ; AVX2-FAST-NEXT: vzeroupper
1004 ; AVX2-FAST-NEXT: retq
1006 ; AVX2-FAST-PERLANE-LABEL: store_i32_stride7_vf8:
1007 ; AVX2-FAST-PERLANE: # %bb.0:
1008 ; AVX2-FAST-PERLANE-NEXT: movq {{[0-9]+}}(%rsp), %rax
1009 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rdi), %ymm0
1010 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rsi), %ymm2
1011 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rdx), %ymm9
1012 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rcx), %ymm3
1013 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%r8), %ymm6
1014 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%r9), %ymm7
1015 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rax), %xmm1
1016 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm8
1017 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%r8), %xmm4
1018 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%r9), %xmm5
1019 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm10 = xmm5[1,1,1,1]
1020 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} xmm10 = xmm10[0],xmm4[1],xmm10[2,3]
1021 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd %xmm10, %ymm10
1022 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm8 = ymm10[0,1,2,3,4],ymm8[5],ymm10[6,7]
1023 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rcx), %xmm12
1024 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rdx), %xmm13
1025 ; AVX2-FAST-PERLANE-NEXT: vinsertps {{.*#+}} xmm10 = zero,xmm13[1],xmm12[1],zero
1026 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rdi), %xmm14
1027 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rsi), %xmm15
1028 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm11 = xmm15[1,1,2,2]
1029 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} xmm11 = xmm11[0,1],xmm14[2],xmm11[3]
1030 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm11 = ymm11[0,1,2,1]
1031 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm10 = ymm11[0],ymm10[1,2],ymm11[3,4,5,6,7]
1032 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm8 = ymm10[0,1,2],ymm8[3,4,5],ymm10[6,7]
1033 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1034 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm10 = ymm9[2],ymm3[2],ymm9[3],ymm3[3],ymm9[6],ymm3[6],ymm9[7],ymm3[7]
1035 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm11 = ymm0[2],ymm2[2],ymm0[3],ymm2[3],ymm0[6],ymm2[6],ymm0[7],ymm2[7]
1036 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm11 = ymm11[2,2,2,2]
1037 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm10 = ymm11[0,1,2,3],ymm10[4,5,6,7]
1038 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm11 = ymm7[1,1,2,2,5,5,6,6]
1039 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm11 = ymm11[0,1],ymm6[2],ymm11[3,4,5],ymm6[6],ymm11[7]
1040 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm11 = ymm11[2,1,2,3]
1041 ; AVX2-FAST-PERLANE-NEXT: vmovaps 16(%rax), %xmm8
1042 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm8 = ymm11[0],ymm8[1],ymm11[2,3,4,5,6,7]
1043 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1],ymm10[2,3,4,5],ymm8[6,7]
1044 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1045 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm8 = xmm14[3,3],xmm15[3,3]
1046 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm8, %ymm0, %ymm8
1047 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} xmm11 = xmm13[2],xmm12[2],xmm13[3],xmm12[3]
1048 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm11 = xmm11[0,1,2,2]
1049 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm11 = ymm11[0,1,2,1]
1050 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm8 = ymm11[0,1,2,3,4],ymm8[5,6],ymm11[7]
1051 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm11 = xmm5[2,2,2,2]
1052 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} xmm11 = xmm4[0,1,2],xmm11[3]
1053 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 8(%rax), %ymm10
1054 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm10 = ymm11[0,1,2,3],ymm10[4,5,6,7]
1055 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm11 = ymm8[0,1],ymm10[2,3,4],ymm8[5,6,7]
1056 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss %xmm12, %xmm8
1057 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss %xmm13, %xmm10
1058 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} xmm8 = xmm10[0],xmm8[0],xmm10[1],xmm8[1]
1059 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} xmm10 = xmm14[0],xmm15[0],xmm14[1],xmm15[1]
1060 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm10 = xmm10[0,1,2,2]
1061 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm10 = ymm10[0,1,2,1]
1062 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm8 = ymm10[0,1],ymm8[2,3],ymm10[4,5,6,7]
1063 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} xmm10 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
1064 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm10, %ymm0, %ymm10
1065 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd %xmm1, %ymm12
1066 ; AVX2-FAST-PERLANE-NEXT: vunpcklpd {{.*#+}} ymm10 = ymm10[0],ymm12[0],ymm10[2],ymm12[2]
1067 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm12 = ymm8[0,1,2,3],ymm10[4,5,6],ymm8[7]
1068 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm8 = ymm9[1,1],ymm3[1,1],ymm9[5,5],ymm3[5,5]
1069 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm10 = ymm2[1,1,1,1,5,5,5,5]
1070 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0],ymm0[1],ymm10[2,3,4],ymm0[5],ymm10[6,7]
1071 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm10 = ymm10[2,2,2,2]
1072 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm8 = ymm10[0,1,2,3,4],ymm8[5,6],ymm10[7]
1073 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm10 = ymm7[0,0,0,0,4,4,4,4]
1074 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm13 = ymm6[0,1,0,1,4,5,4,5]
1075 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm10 = ymm13[0],ymm10[1],ymm13[2,3,4],ymm10[5],ymm13[6,7]
1076 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm10 = ymm10[2,1,2,3]
1077 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 16(%rax), %ymm13
1078 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0,1],ymm13[2,3],ymm10[4,5,6,7]
1079 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm8 = ymm10[0,1,2],ymm8[3,4,5,6],ymm10[7]
1080 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm9 = ymm3[2],ymm9[2],ymm3[3],ymm9[3],ymm3[6],ymm9[6],ymm3[7],ymm9[7]
1081 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm9 = ymm9[3,3,3,3]
1082 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm10 = ymm2[2],ymm0[2],ymm2[3],ymm0[3],ymm2[6],ymm0[6],ymm2[7],ymm0[7]
1083 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm10 = ymm10[3,3,3,3]
1084 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm9 = ymm10[0,1,2],ymm9[3,4,5,6,7]
1085 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm6 = ymm6[3,3],ymm7[3,3],ymm6[7,7],ymm7[7,7]
1086 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm6 = ymm9[0,1,2,3,4],ymm6[5,6],ymm9[7]
1087 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 24(%rax), %ymm7
1088 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm6 = ymm7[0],ymm6[1,2,3,4,5,6],ymm7[7]
1089 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} ymm0 = ymm0[0],ymm2[0],ymm0[1],ymm2[1],ymm0[4],ymm2[4],ymm0[5],ymm2[5]
1090 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss 16(%rdx), %ymm2
1091 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm3 = ymm3[3,1,2,0,7,5,6,4]
1092 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1,2,3,4,5],ymm2[6],ymm3[7]
1093 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5],ymm2[6,7]
1094 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm2 = xmm4[3,3],xmm5[3,3]
1095 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} xmm1 = xmm2[0,1,2],xmm1[3]
1096 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4,5,6,7]
1097 ; AVX2-FAST-PERLANE-NEXT: movq {{[0-9]+}}(%rsp), %rax
1098 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 96(%rax)
1099 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm6, 192(%rax)
1100 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
1101 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 160(%rax)
1102 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm8, 128(%rax)
1103 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm12, (%rax)
1104 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm11, 64(%rax)
1105 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
1106 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 32(%rax)
1107 ; AVX2-FAST-PERLANE-NEXT: vzeroupper
1108 ; AVX2-FAST-PERLANE-NEXT: retq
1110 ; AVX512F-LABEL: store_i32_stride7_vf8:
1112 ; AVX512F-NEXT: movq {{[0-9]+}}(%rsp), %rax
1113 ; AVX512F-NEXT: movq {{[0-9]+}}(%rsp), %r10
1114 ; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
1115 ; AVX512F-NEXT: vmovdqa (%rdx), %ymm1
1116 ; AVX512F-NEXT: vmovdqa (%r8), %ymm2
1117 ; AVX512F-NEXT: vmovdqa (%r10), %ymm3
1118 ; AVX512F-NEXT: vinserti64x4 $1, (%rsi), %zmm0, %zmm4
1119 ; AVX512F-NEXT: vinserti64x4 $1, (%rcx), %zmm1, %zmm5
1120 ; AVX512F-NEXT: vinserti64x4 $1, (%r9), %zmm2, %zmm2
1121 ; AVX512F-NEXT: vbroadcasti128 {{.*#+}} ymm0 = [31,7,15,23,31,7,15,23]
1122 ; AVX512F-NEXT: # ymm0 = mem[0,1,0,1]
1123 ; AVX512F-NEXT: vpermi2d %zmm5, %zmm4, %zmm0
1124 ; AVX512F-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [6,23,31,7,6,23,31,7]
1125 ; AVX512F-NEXT: # ymm1 = mem[0,1,0,1]
1126 ; AVX512F-NEXT: vpermi2d %zmm2, %zmm3, %zmm1
1127 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm6 = <u,u,u,u,0,8,16,u,u,u,u,1,9,17,u,u>
1128 ; AVX512F-NEXT: vpermi2d %zmm3, %zmm2, %zmm6
1129 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm7 = <0,8,16,24,u,u,u,1,9,17,25,u,u,u,2,10>
1130 ; AVX512F-NEXT: vpermi2d %zmm5, %zmm4, %zmm7
1131 ; AVX512F-NEXT: movw $14448, %cx # imm = 0x3870
1132 ; AVX512F-NEXT: kmovw %ecx, %k1
1133 ; AVX512F-NEXT: vmovdqa32 %zmm6, %zmm7 {%k1}
1134 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm6 = <u,u,2,10,18,u,u,u,u,3,11,19,u,u,u,u>
1135 ; AVX512F-NEXT: vpermi2d %zmm3, %zmm2, %zmm6
1136 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm8 = <2,10,u,u,u,19,27,3,11,u,u,u,20,28,4,12>
1137 ; AVX512F-NEXT: vpermi2d %zmm4, %zmm5, %zmm8
1138 ; AVX512F-NEXT: movw $3612, %cx # imm = 0xE1C
1139 ; AVX512F-NEXT: kmovw %ecx, %k1
1140 ; AVX512F-NEXT: vmovdqa32 %zmm6, %zmm8 {%k1}
1141 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm6 = <u,u,u,5,13,21,29,u,u,u,6,14,22,30,u,u>
1142 ; AVX512F-NEXT: vpermi2d %zmm5, %zmm4, %zmm6
1143 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm4 = <4,12,20,u,u,u,u,5,13,21,u,u,u,u,6,14>
1144 ; AVX512F-NEXT: vpermi2d %zmm3, %zmm2, %zmm4
1145 ; AVX512F-NEXT: movw $15480, %cx # imm = 0x3C78
1146 ; AVX512F-NEXT: kmovw %ecx, %k1
1147 ; AVX512F-NEXT: vmovdqa32 %zmm6, %zmm4 {%k1}
1148 ; AVX512F-NEXT: vmovdqa64 %zmm4, 128(%rax)
1149 ; AVX512F-NEXT: vmovdqa64 %zmm8, 64(%rax)
1150 ; AVX512F-NEXT: vmovdqa64 %zmm7, (%rax)
1151 ; AVX512F-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4],ymm1[5,6,7]
1152 ; AVX512F-NEXT: vmovdqa %ymm0, 192(%rax)
1153 ; AVX512F-NEXT: vzeroupper
1154 ; AVX512F-NEXT: retq
1156 ; AVX512BW-LABEL: store_i32_stride7_vf8:
1157 ; AVX512BW: # %bb.0:
1158 ; AVX512BW-NEXT: movq {{[0-9]+}}(%rsp), %rax
1159 ; AVX512BW-NEXT: movq {{[0-9]+}}(%rsp), %r10
1160 ; AVX512BW-NEXT: vmovdqa (%rdi), %ymm0
1161 ; AVX512BW-NEXT: vmovdqa (%rdx), %ymm1
1162 ; AVX512BW-NEXT: vmovdqa (%r8), %ymm2
1163 ; AVX512BW-NEXT: vmovdqa (%r10), %ymm3
1164 ; AVX512BW-NEXT: vinserti64x4 $1, (%rsi), %zmm0, %zmm4
1165 ; AVX512BW-NEXT: vinserti64x4 $1, (%rcx), %zmm1, %zmm5
1166 ; AVX512BW-NEXT: vinserti64x4 $1, (%r9), %zmm2, %zmm2
1167 ; AVX512BW-NEXT: vbroadcasti128 {{.*#+}} ymm0 = [31,7,15,23,31,7,15,23]
1168 ; AVX512BW-NEXT: # ymm0 = mem[0,1,0,1]
1169 ; AVX512BW-NEXT: vpermi2d %zmm5, %zmm4, %zmm0
1170 ; AVX512BW-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [6,23,31,7,6,23,31,7]
1171 ; AVX512BW-NEXT: # ymm1 = mem[0,1,0,1]
1172 ; AVX512BW-NEXT: vpermi2d %zmm2, %zmm3, %zmm1
1173 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm6 = <u,u,u,u,0,8,16,u,u,u,u,1,9,17,u,u>
1174 ; AVX512BW-NEXT: vpermi2d %zmm3, %zmm2, %zmm6
1175 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm7 = <0,8,16,24,u,u,u,1,9,17,25,u,u,u,2,10>
1176 ; AVX512BW-NEXT: vpermi2d %zmm5, %zmm4, %zmm7
1177 ; AVX512BW-NEXT: movw $14448, %cx # imm = 0x3870
1178 ; AVX512BW-NEXT: kmovd %ecx, %k1
1179 ; AVX512BW-NEXT: vmovdqa32 %zmm6, %zmm7 {%k1}
1180 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm6 = <u,u,2,10,18,u,u,u,u,3,11,19,u,u,u,u>
1181 ; AVX512BW-NEXT: vpermi2d %zmm3, %zmm2, %zmm6
1182 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm8 = <2,10,u,u,u,19,27,3,11,u,u,u,20,28,4,12>
1183 ; AVX512BW-NEXT: vpermi2d %zmm4, %zmm5, %zmm8
1184 ; AVX512BW-NEXT: movw $3612, %cx # imm = 0xE1C
1185 ; AVX512BW-NEXT: kmovd %ecx, %k1
1186 ; AVX512BW-NEXT: vmovdqa32 %zmm6, %zmm8 {%k1}
1187 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm6 = <u,u,u,5,13,21,29,u,u,u,6,14,22,30,u,u>
1188 ; AVX512BW-NEXT: vpermi2d %zmm5, %zmm4, %zmm6
1189 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm4 = <4,12,20,u,u,u,u,5,13,21,u,u,u,u,6,14>
1190 ; AVX512BW-NEXT: vpermi2d %zmm3, %zmm2, %zmm4
1191 ; AVX512BW-NEXT: movw $15480, %cx # imm = 0x3C78
1192 ; AVX512BW-NEXT: kmovd %ecx, %k1
1193 ; AVX512BW-NEXT: vmovdqa32 %zmm6, %zmm4 {%k1}
1194 ; AVX512BW-NEXT: vmovdqa64 %zmm4, 128(%rax)
1195 ; AVX512BW-NEXT: vmovdqa64 %zmm8, 64(%rax)
1196 ; AVX512BW-NEXT: vmovdqa64 %zmm7, (%rax)
1197 ; AVX512BW-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4],ymm1[5,6,7]
1198 ; AVX512BW-NEXT: vmovdqa %ymm0, 192(%rax)
1199 ; AVX512BW-NEXT: vzeroupper
1200 ; AVX512BW-NEXT: retq
1201 %in.vec0 = load <8 x i32>, ptr %in.vecptr0, align 64
1202 %in.vec1 = load <8 x i32>, ptr %in.vecptr1, align 64
1203 %in.vec2 = load <8 x i32>, ptr %in.vecptr2, align 64
1204 %in.vec3 = load <8 x i32>, ptr %in.vecptr3, align 64
1205 %in.vec4 = load <8 x i32>, ptr %in.vecptr4, align 64
1206 %in.vec5 = load <8 x i32>, ptr %in.vecptr5, align 64
1207 %in.vec6 = load <8 x i32>, ptr %in.vecptr6, align 64
1208 %1 = shufflevector <8 x i32> %in.vec0, <8 x i32> %in.vec1, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
1209 %2 = shufflevector <8 x i32> %in.vec2, <8 x i32> %in.vec3, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
1210 %3 = shufflevector <8 x i32> %in.vec4, <8 x i32> %in.vec5, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
1211 %4 = shufflevector <16 x i32> %1, <16 x i32> %2, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
1212 %5 = shufflevector <8 x i32> %in.vec6, <8 x i32> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
1213 %6 = shufflevector <16 x i32> %3, <16 x i32> %5, <24 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23>
1214 %7 = shufflevector <24 x i32> %6, <24 x i32> poison, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
1215 %8 = shufflevector <32 x i32> %4, <32 x i32> %7, <56 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55>
1216 %interleaved.vec = shufflevector <56 x i32> %8, <56 x i32> poison, <56 x i32> <i32 0, i32 8, i32 16, i32 24, i32 32, i32 40, i32 48, i32 1, i32 9, i32 17, i32 25, i32 33, i32 41, i32 49, i32 2, i32 10, i32 18, i32 26, i32 34, i32 42, i32 50, i32 3, i32 11, i32 19, i32 27, i32 35, i32 43, i32 51, i32 4, i32 12, i32 20, i32 28, i32 36, i32 44, i32 52, i32 5, i32 13, i32 21, i32 29, i32 37, i32 45, i32 53, i32 6, i32 14, i32 22, i32 30, i32 38, i32 46, i32 54, i32 7, i32 15, i32 23, i32 31, i32 39, i32 47, i32 55>
1217 store <56 x i32> %interleaved.vec, ptr %out.vec, align 64
1221 define void @store_i32_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecptr2, ptr %in.vecptr3, ptr %in.vecptr4, ptr %in.vecptr5, ptr %in.vecptr6, ptr %out.vec) nounwind {
1222 ; SSE-LABEL: store_i32_stride7_vf16:
1224 ; SSE-NEXT: subq $520, %rsp # imm = 0x208
1225 ; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax
1226 ; SSE-NEXT: movdqa (%rdi), %xmm10
1227 ; SSE-NEXT: movdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1228 ; SSE-NEXT: movdqa (%rsi), %xmm4
1229 ; SSE-NEXT: movdqa 16(%rsi), %xmm6
1230 ; SSE-NEXT: movaps (%rdx), %xmm5
1231 ; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1232 ; SSE-NEXT: movdqa 16(%rdx), %xmm9
1233 ; SSE-NEXT: movdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1234 ; SSE-NEXT: movaps (%rcx), %xmm8
1235 ; SSE-NEXT: movaps 16(%rcx), %xmm2
1236 ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1237 ; SSE-NEXT: movaps (%r8), %xmm15
1238 ; SSE-NEXT: movaps 16(%r8), %xmm14
1239 ; SSE-NEXT: movaps %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1240 ; SSE-NEXT: movdqa (%r9), %xmm13
1241 ; SSE-NEXT: movdqa 16(%r9), %xmm3
1242 ; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1243 ; SSE-NEXT: movdqa (%rax), %xmm11
1244 ; SSE-NEXT: movaps %xmm15, %xmm0
1245 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm8[1,1]
1246 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm4[1,1,1,1]
1247 ; SSE-NEXT: movss {{.*#+}} xmm5 = xmm1[0],xmm5[1,2,3]
1248 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,1],xmm0[2,0]
1249 ; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1250 ; SSE-NEXT: movdqa %xmm10, %xmm0
1251 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm4[2],xmm0[3],xmm4[3]
1252 ; SSE-NEXT: movdqa %xmm4, %xmm12
1253 ; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1254 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm11[1,1,1,1]
1255 ; SSE-NEXT: movdqa %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1256 ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm13[1,1,1,1]
1257 ; SSE-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm1[0],xmm5[1],xmm1[1]
1258 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm5 = xmm5[0],xmm0[0]
1259 ; SSE-NEXT: movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1260 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm9[1,1,1,1]
1261 ; SSE-NEXT: movdqa %xmm6, %xmm9
1262 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm6[1,1,1,1]
1263 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
1264 ; SSE-NEXT: movaps %xmm14, %xmm0
1265 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm2[1,1]
1266 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,0]
1267 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1268 ; SSE-NEXT: movdqa 16(%rax), %xmm10
1269 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm10[1,1,1,1]
1270 ; SSE-NEXT: movdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1271 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm3[1,1,1,1]
1272 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
1273 ; SSE-NEXT: movdqa 16(%rdi), %xmm4
1274 ; SSE-NEXT: movdqa %xmm4, %xmm0
1275 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm6[2],xmm0[3],xmm6[3]
1276 ; SSE-NEXT: movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1277 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
1278 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1279 ; SSE-NEXT: movdqa 32(%rsi), %xmm1
1280 ; SSE-NEXT: movaps 32(%rdx), %xmm2
1281 ; SSE-NEXT: movaps %xmm2, (%rsp) # 16-byte Spill
1282 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1]
1283 ; SSE-NEXT: movdqa %xmm1, %xmm6
1284 ; SSE-NEXT: movaps %xmm2, %xmm1
1285 ; SSE-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
1286 ; SSE-NEXT: movaps 32(%rcx), %xmm2
1287 ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1288 ; SSE-NEXT: movaps 32(%r8), %xmm0
1289 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1290 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm2[1,1]
1291 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,0]
1292 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1293 ; SSE-NEXT: movdqa 32(%r9), %xmm1
1294 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1295 ; SSE-NEXT: movdqa 32(%rax), %xmm0
1296 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1297 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
1298 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,1,1]
1299 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
1300 ; SSE-NEXT: movdqa 32(%rdi), %xmm0
1301 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1302 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm6[2],xmm0[3],xmm6[3]
1303 ; SSE-NEXT: movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1304 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
1305 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1306 ; SSE-NEXT: movdqa 48(%rsi), %xmm1
1307 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1308 ; SSE-NEXT: movdqa 48(%rdx), %xmm0
1309 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1310 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
1311 ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm1[1,1,1,1]
1312 ; SSE-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1]
1313 ; SSE-NEXT: movaps 48(%rcx), %xmm3
1314 ; SSE-NEXT: movaps 48(%r8), %xmm0
1315 ; SSE-NEXT: movaps %xmm0, %xmm1
1316 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm3[1,1]
1317 ; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1318 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,1],xmm1[2,0]
1319 ; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1320 ; SSE-NEXT: movaps 48(%rdi), %xmm2
1321 ; SSE-NEXT: movaps 48(%rax), %xmm1
1322 ; SSE-NEXT: movaps %xmm2, %xmm5
1323 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[1,1],xmm1[0,3]
1324 ; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1325 ; SSE-NEXT: movaps 48(%r9), %xmm7
1326 ; SSE-NEXT: movaps %xmm7, %xmm5
1327 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[1,1],xmm1[1,1]
1328 ; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1329 ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1330 ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1331 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,1],xmm1[2,3]
1332 ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1333 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,3],xmm7[3,3]
1334 ; SSE-NEXT: movaps %xmm0, %xmm2
1335 ; SSE-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm7[0],xmm2[1],xmm7[1]
1336 ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1337 ; SSE-NEXT: unpckhpd {{.*#+}} xmm7 = xmm7[1],xmm0[1]
1338 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,3,3,3]
1339 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm3[3,3,3,3]
1340 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
1341 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,0]
1342 ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1343 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
1344 ; SSE-NEXT: movaps %xmm14, %xmm0
1345 ; SSE-NEXT: movaps %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1346 ; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm8[0],xmm0[1],xmm8[1]
1347 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
1348 ; SSE-NEXT: movdqa %xmm2, %xmm3
1349 ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm12[0],xmm3[1],xmm12[1]
1350 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm0[0]
1351 ; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1352 ; SSE-NEXT: shufps {{.*#+}} xmm11 = xmm11[0,1],xmm2[1,3]
1353 ; SSE-NEXT: movaps %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1354 ; SSE-NEXT: movaps %xmm15, %xmm5
1355 ; SSE-NEXT: movdqa %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1356 ; SSE-NEXT: unpcklps {{.*#+}} xmm5 = xmm5[0],xmm13[0],xmm5[1],xmm13[1]
1357 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,1],xmm11[0,2]
1358 ; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1359 ; SSE-NEXT: unpckhps {{.*#+}} xmm15 = xmm15[2],xmm13[2],xmm15[3],xmm13[3]
1360 ; SSE-NEXT: unpckhps {{.*#+}} xmm14 = xmm14[2],xmm8[2],xmm14[3],xmm8[3]
1361 ; SSE-NEXT: movlhps {{.*#+}} xmm14 = xmm14[0],xmm15[0]
1362 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
1363 ; SSE-NEXT: movaps %xmm2, %xmm0
1364 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
1365 ; SSE-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm5[0]
1366 ; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1367 ; SSE-NEXT: movdqa %xmm4, %xmm13
1368 ; SSE-NEXT: punpckldq {{.*#+}} xmm13 = xmm13[0],xmm9[0],xmm13[1],xmm9[1]
1369 ; SSE-NEXT: shufps {{.*#+}} xmm13 = xmm13[0,1],xmm0[2,0]
1370 ; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[1,1],xmm10[0,3]
1371 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
1372 ; SSE-NEXT: movaps %xmm12, %xmm11
1373 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
1374 ; SSE-NEXT: unpcklps {{.*#+}} xmm11 = xmm11[0],xmm3[0],xmm11[1],xmm3[1]
1375 ; SSE-NEXT: shufps {{.*#+}} xmm11 = xmm11[0,1],xmm4[2,0]
1376 ; SSE-NEXT: movaps %xmm5, %xmm10
1377 ; SSE-NEXT: unpckhps {{.*#+}} xmm10 = xmm10[2],xmm2[2],xmm10[3],xmm2[3]
1378 ; SSE-NEXT: movaps %xmm12, %xmm0
1379 ; SSE-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],xmm3[2],xmm0[3],xmm3[3]
1380 ; SSE-NEXT: movlhps {{.*#+}} xmm10 = xmm10[0],xmm0[0]
1381 ; SSE-NEXT: movdqa (%rsp), %xmm5 # 16-byte Reload
1382 ; SSE-NEXT: movdqa %xmm5, %xmm0
1383 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
1384 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
1385 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
1386 ; SSE-NEXT: movdqa %xmm9, %xmm15
1387 ; SSE-NEXT: punpckldq {{.*#+}} xmm15 = xmm15[0],xmm6[0],xmm15[1],xmm6[1]
1388 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm15 = xmm15[0],xmm0[0]
1389 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1390 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm9[1,3]
1391 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
1392 ; SSE-NEXT: movaps %xmm8, %xmm6
1393 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
1394 ; SSE-NEXT: unpcklps {{.*#+}} xmm6 = xmm6[0],xmm3[0],xmm6[1],xmm3[1]
1395 ; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,1],xmm0[0,2]
1396 ; SSE-NEXT: movaps %xmm8, %xmm0
1397 ; SSE-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],xmm3[2],xmm0[3],xmm3[3]
1398 ; SSE-NEXT: punpckhdq {{.*#+}} xmm5 = xmm5[2],xmm2[2],xmm5[3],xmm2[3]
1399 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm5 = xmm5[0],xmm0[0]
1400 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1401 ; SSE-NEXT: movaps %xmm0, %xmm3
1402 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
1403 ; SSE-NEXT: unpckhps {{.*#+}} xmm3 = xmm3[2],xmm1[2],xmm3[3],xmm1[3]
1404 ; SSE-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
1405 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
1406 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
1407 ; SSE-NEXT: unpcklps {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1]
1408 ; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,1],xmm1[2,0]
1409 ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1410 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
1411 ; SSE-NEXT: shufps $36, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
1412 ; SSE-NEXT: # xmm1 = xmm1[0,1],mem[2,0]
1413 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1414 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
1415 ; SSE-NEXT: unpckhps {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
1416 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
1417 ; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,2],xmm1[0,1]
1418 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,1],xmm7[2,0]
1419 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3],xmm2[3,3]
1420 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
1421 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[2,0]
1422 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
1423 ; SSE-NEXT: shufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload
1424 ; SSE-NEXT: # xmm7 = xmm7[3,3],mem[3,3]
1425 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
1426 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,3],xmm7[2,0]
1427 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1428 ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm0[2,3,2,3]
1429 ; SSE-NEXT: movss {{.*#+}} xmm2 = xmm7[0],xmm2[1,2,3]
1430 ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1431 ; SSE-NEXT: movdqa %xmm0, %xmm7
1432 ; SSE-NEXT: shufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload
1433 ; SSE-NEXT: # xmm7 = xmm7[3,3],mem[3,3]
1434 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1435 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,3],xmm7[2,0]
1436 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload
1437 ; SSE-NEXT: # xmm7 = mem[3,3,3,3]
1438 ; SSE-NEXT: movss {{.*#+}} xmm0 = xmm7[0],xmm0[1,2,3]
1439 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1440 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
1441 ; SSE-NEXT: shufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload
1442 ; SSE-NEXT: # xmm7 = xmm7[3,3],mem[3,3]
1443 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
1444 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,3],xmm7[2,0]
1445 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1446 ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm0[2,3,2,3]
1447 ; SSE-NEXT: movss {{.*#+}} xmm2 = xmm7[0],xmm2[1,2,3]
1448 ; SSE-NEXT: movdqa %xmm0, %xmm7
1449 ; SSE-NEXT: shufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload
1450 ; SSE-NEXT: # xmm7 = xmm7[3,3],mem[3,3]
1451 ; SSE-NEXT: shufps {{.*#+}} xmm12 = xmm12[0,3],xmm7[2,0]
1452 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload
1453 ; SSE-NEXT: # xmm7 = mem[3,3,3,3]
1454 ; SSE-NEXT: movss {{.*#+}} xmm12 = xmm7[0],xmm12[1,2,3]
1455 ; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
1456 ; SSE-NEXT: shufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
1457 ; SSE-NEXT: # xmm0 = xmm0[3,3],mem[3,3]
1458 ; SSE-NEXT: shufps {{.*#+}} xmm9 = xmm9[0,3],xmm0[2,0]
1459 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1460 ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm0[2,3,2,3]
1461 ; SSE-NEXT: movss {{.*#+}} xmm9 = xmm7[0],xmm9[1,2,3]
1462 ; SSE-NEXT: shufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
1463 ; SSE-NEXT: # xmm0 = xmm0[3,3],mem[3,3]
1464 ; SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[0,3],xmm0[2,0]
1465 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload
1466 ; SSE-NEXT: # xmm7 = mem[3,3,3,3]
1467 ; SSE-NEXT: movss {{.*#+}} xmm8 = xmm7[0],xmm8[1,2,3]
1468 ; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax
1469 ; SSE-NEXT: movaps %xmm1, 416(%rax)
1470 ; SSE-NEXT: movaps %xmm3, 400(%rax)
1471 ; SSE-NEXT: movaps %xmm4, 384(%rax)
1472 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1473 ; SSE-NEXT: movaps %xmm0, 352(%rax)
1474 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1475 ; SSE-NEXT: movaps %xmm0, 336(%rax)
1476 ; SSE-NEXT: movdqa %xmm5, 288(%rax)
1477 ; SSE-NEXT: movaps %xmm6, 240(%rax)
1478 ; SSE-NEXT: movdqa %xmm15, 224(%rax)
1479 ; SSE-NEXT: movaps %xmm10, 176(%rax)
1480 ; SSE-NEXT: movaps %xmm11, 128(%rax)
1481 ; SSE-NEXT: movaps %xmm13, 112(%rax)
1482 ; SSE-NEXT: movaps %xmm14, 64(%rax)
1483 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1484 ; SSE-NEXT: movaps %xmm0, 16(%rax)
1485 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1486 ; SSE-NEXT: movaps %xmm0, (%rax)
1487 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1488 ; SSE-NEXT: movaps %xmm0, 432(%rax)
1489 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1490 ; SSE-NEXT: movaps %xmm0, 368(%rax)
1491 ; SSE-NEXT: movaps %xmm8, 320(%rax)
1492 ; SSE-NEXT: movaps %xmm9, 304(%rax)
1493 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1494 ; SSE-NEXT: movaps %xmm0, 272(%rax)
1495 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1496 ; SSE-NEXT: movaps %xmm0, 256(%rax)
1497 ; SSE-NEXT: movaps %xmm12, 208(%rax)
1498 ; SSE-NEXT: movaps %xmm2, 192(%rax)
1499 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1500 ; SSE-NEXT: movaps %xmm0, 160(%rax)
1501 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1502 ; SSE-NEXT: movaps %xmm0, 144(%rax)
1503 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1504 ; SSE-NEXT: movaps %xmm0, 96(%rax)
1505 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1506 ; SSE-NEXT: movaps %xmm0, 80(%rax)
1507 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1508 ; SSE-NEXT: movaps %xmm0, 48(%rax)
1509 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1510 ; SSE-NEXT: movaps %xmm0, 32(%rax)
1511 ; SSE-NEXT: addq $520, %rsp # imm = 0x208
1514 ; AVX1-ONLY-LABEL: store_i32_stride7_vf16:
1515 ; AVX1-ONLY: # %bb.0:
1516 ; AVX1-ONLY-NEXT: subq $456, %rsp # imm = 0x1C8
1517 ; AVX1-ONLY-NEXT: movq {{[0-9]+}}(%rsp), %rax
1518 ; AVX1-ONLY-NEXT: vmovaps 32(%rdi), %ymm5
1519 ; AVX1-ONLY-NEXT: vmovaps 32(%rsi), %ymm6
1520 ; AVX1-ONLY-NEXT: vmovaps 32(%rdx), %ymm4
1521 ; AVX1-ONLY-NEXT: vmovaps 32(%rcx), %ymm7
1522 ; AVX1-ONLY-NEXT: vmovaps 32(%r8), %ymm0
1523 ; AVX1-ONLY-NEXT: vmovaps 32(%r9), %ymm1
1524 ; AVX1-ONLY-NEXT: vmovaps 32(%rax), %ymm2
1525 ; AVX1-ONLY-NEXT: vunpckhps {{.*#+}} ymm3 = ymm4[2],ymm7[2],ymm4[3],ymm7[3],ymm4[6],ymm7[6],ymm4[7],ymm7[7]
1526 ; AVX1-ONLY-NEXT: vmovaps %ymm7, %ymm8
1527 ; AVX1-ONLY-NEXT: vmovaps %ymm4, %ymm7
1528 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} ymm4 = ymm6[1],ymm5[1],ymm6[3],ymm5[3]
1529 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm4 = ymm5[0,1],ymm4[2,0],ymm5[4,5],ymm4[6,4]
1530 ; AVX1-ONLY-NEXT: vmovaps %ymm5, %ymm9
1531 ; AVX1-ONLY-NEXT: vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1532 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm3 = ymm4[2,3],ymm3[2,3]
1533 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5],ymm0[6,7]
1534 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm1[2,3,2,3]
1535 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm4 = ymm4[1,1,2,2,5,5,6,6]
1536 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0],ymm3[1,2,3,4,5,6],ymm4[7]
1537 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm2[2,3],ymm3[2,3]
1538 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0],ymm4[1],ymm3[2,3,4],ymm4[5],ymm3[6,7]
1539 ; AVX1-ONLY-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1540 ; AVX1-ONLY-NEXT: vmovaps 32(%r9), %xmm10
1541 ; AVX1-ONLY-NEXT: vmovaps %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1542 ; AVX1-ONLY-NEXT: vmovaps 32(%r8), %xmm11
1543 ; AVX1-ONLY-NEXT: vmovaps %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1544 ; AVX1-ONLY-NEXT: vmovaps 32(%rdi), %xmm4
1545 ; AVX1-ONLY-NEXT: vmovaps 32(%rsi), %xmm12
1546 ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm3 = xmm12[0],xmm4[0]
1547 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm3 = xmm3[2,0],xmm4[2,1]
1548 ; AVX1-ONLY-NEXT: vmovaps %xmm4, %xmm13
1549 ; AVX1-ONLY-NEXT: vmovaps %xmm4, (%rsp) # 16-byte Spill
1550 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm3, %ymm3, %ymm3
1551 ; AVX1-ONLY-NEXT: vmovaps 32(%rcx), %xmm5
1552 ; AVX1-ONLY-NEXT: vmovaps 32(%rdx), %xmm14
1553 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} xmm4 = xmm14[0],xmm5[0],xmm14[1],xmm5[1]
1554 ; AVX1-ONLY-NEXT: vmovaps %xmm14, %xmm15
1555 ; AVX1-ONLY-NEXT: vmovaps %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1556 ; AVX1-ONLY-NEXT: vmovaps %xmm5, %xmm14
1557 ; AVX1-ONLY-NEXT: vmovaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1558 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm4 = xmm4[0,1,0,1]
1559 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm4[2,3],ymm3[4,5,6,7]
1560 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} xmm4 = xmm11[0],xmm10[0],xmm11[1],xmm10[1]
1561 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm4
1562 ; AVX1-ONLY-NEXT: vmovaps 32(%rax), %xmm5
1563 ; AVX1-ONLY-NEXT: vmovaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1564 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm5, %ymm0, %ymm5
1565 ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm4 = ymm4[0],ymm5[0],ymm4[2],ymm5[2]
1566 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm4[4,5,6],ymm3[7]
1567 ; AVX1-ONLY-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1568 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm3 = xmm10[1,1],xmm11[1,1]
1569 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm3, %ymm3, %ymm3
1570 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3,4],ymm5[5],ymm3[6,7]
1571 ; AVX1-ONLY-NEXT: vmovaps %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1572 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm4 = xmm13[1],xmm12[1]
1573 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm4 = xmm12[1,1],xmm4[0,2]
1574 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm4, %ymm4, %ymm4
1575 ; AVX1-ONLY-NEXT: vinsertps {{.*#+}} xmm5 = zero,xmm15[1],xmm14[1],zero
1576 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0],ymm5[1,2],ymm4[3,4,5,6,7]
1577 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0,1,2],ymm3[3,4,5],ymm4[6,7]
1578 ; AVX1-ONLY-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1579 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm3 = ymm6[1,1],ymm9[1,1],ymm6[5,5],ymm9[5,5]
1580 ; AVX1-ONLY-NEXT: vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1581 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm3 = ymm3[2,3,2,3]
1582 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm4 = ymm7[1,1],ymm8[1,1],ymm7[5,5],ymm8[5,5]
1583 ; AVX1-ONLY-NEXT: vmovaps %ymm8, %ymm13
1584 ; AVX1-ONLY-NEXT: vmovaps %ymm7, %ymm11
1585 ; AVX1-ONLY-NEXT: vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1586 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3,4],ymm4[5,6],ymm3[7]
1587 ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
1588 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm1[2,0],ymm0[2,1],ymm1[6,4],ymm0[6,5]
1589 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm2[2,3],ymm0[2,3]
1590 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm0, %xmm0
1591 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[3]
1592 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm3[3,4,5,6],ymm0[7]
1593 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1594 ; AVX1-ONLY-NEXT: vmovaps (%rdi), %xmm5
1595 ; AVX1-ONLY-NEXT: vmovaps (%rsi), %xmm14
1596 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm5[1],xmm14[1]
1597 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm14[1,1],xmm0[0,2]
1598 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
1599 ; AVX1-ONLY-NEXT: vmovaps (%rcx), %xmm2
1600 ; AVX1-ONLY-NEXT: vmovaps (%rdx), %xmm12
1601 ; AVX1-ONLY-NEXT: vinsertps {{.*#+}} xmm1 = zero,xmm12[1],xmm2[1],zero
1602 ; AVX1-ONLY-NEXT: vmovaps %xmm2, %xmm7
1603 ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1604 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2],ymm0[3,4,5,6,7]
1605 ; AVX1-ONLY-NEXT: vmovaps (%r9), %xmm3
1606 ; AVX1-ONLY-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1607 ; AVX1-ONLY-NEXT: vmovaps (%r8), %xmm4
1608 ; AVX1-ONLY-NEXT: vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1609 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm3[1,1],xmm4[1,1]
1610 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm1
1611 ; AVX1-ONLY-NEXT: vmovaps (%rax), %xmm2
1612 ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1613 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2
1614 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5],ymm1[6,7]
1615 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5],ymm0[6,7]
1616 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1617 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} xmm0 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
1618 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
1619 ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm2[0],ymm0[2],ymm2[2]
1620 ; AVX1-ONLY-NEXT: vmovaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1621 ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm1 = xmm14[0],xmm5[0]
1622 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm1[2,0],xmm5[2,1]
1623 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm1
1624 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} xmm2 = xmm12[0],xmm7[0],xmm12[1],xmm7[1]
1625 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm2 = xmm2[0,1,0,1]
1626 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3],ymm1[4,5,6,7]
1627 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6],ymm1[7]
1628 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1629 ; AVX1-ONLY-NEXT: vmovaps (%rdi), %ymm10
1630 ; AVX1-ONLY-NEXT: vmovaps (%rsi), %ymm9
1631 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm9[1,1],ymm10[1,1],ymm9[5,5],ymm10[5,5]
1632 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
1633 ; AVX1-ONLY-NEXT: vmovaps (%rdx), %ymm8
1634 ; AVX1-ONLY-NEXT: vmovaps (%rcx), %ymm7
1635 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm2 = ymm8[1,1],ymm7[1,1],ymm8[5,5],ymm7[5,5]
1636 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm3 = ymm0[0,1,2,3,4],ymm2[5,6],ymm0[7]
1637 ; AVX1-ONLY-NEXT: vmovaps (%r8), %ymm15
1638 ; AVX1-ONLY-NEXT: vmovaps (%r9), %ymm1
1639 ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm2 = ymm1[0],ymm15[0],ymm1[2],ymm15[2]
1640 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm2[2,0],ymm15[2,1],ymm2[6,4],ymm15[6,5]
1641 ; AVX1-ONLY-NEXT: vmovaps (%rax), %ymm5
1642 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm5[2,3],ymm0[2,3]
1643 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm0, %xmm0
1644 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[0],ymm2[0],ymm0[2],ymm2[3]
1645 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm3[3,4,5,6],ymm0[7]
1646 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1647 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm9[1],ymm10[1],ymm9[3],ymm10[3]
1648 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,1,2,0,4,5,6,4]
1649 ; AVX1-ONLY-NEXT: vunpckhps {{.*#+}} ymm2 = ymm8[2],ymm7[2],ymm8[3],ymm7[3],ymm8[6],ymm7[6],ymm8[7],ymm7[7]
1650 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm2[2,3]
1651 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} ymm2 = ymm15[1],ymm1[1],ymm15[3],ymm1[3]
1652 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm2 = ymm1[1,1],ymm2[0,2],ymm1[5,5],ymm2[4,6]
1653 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm2[2,3,2,3]
1654 ; AVX1-ONLY-NEXT: vmovaps 16(%rax), %xmm3
1655 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0],ymm3[1],ymm2[2,3,4,5,6,7]
1656 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1],ymm0[2,3,4,5],ymm2[6,7]
1657 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1658 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm13[3,3],ymm11[3,3],ymm13[7,7],ymm11[7,7]
1659 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
1660 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
1661 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm2 = ymm11[3,3],ymm6[3,3],ymm11[7,7],ymm6[7,7]
1662 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm2, %xmm2
1663 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2],ymm0[3,4,5,6,7]
1664 ; AVX1-ONLY-NEXT: vbroadcastss 60(%r8), %ymm2
1665 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm2[5],ymm0[6,7]
1666 ; AVX1-ONLY-NEXT: vbroadcastss 60(%r9), %ymm2
1667 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm2[6,7]
1668 ; AVX1-ONLY-NEXT: vbroadcastsd 56(%rax), %ymm2
1669 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0],ymm0[1,2,3,4,5,6],ymm2[7]
1670 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1671 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm7[3,3],ymm8[3,3],ymm7[7,7],ymm8[7,7]
1672 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
1673 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm2 = ymm10[3,3],ymm9[3,3],ymm10[7,7],ymm9[7,7]
1674 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm2, %xmm2
1675 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2],ymm0[3,4,5,6,7]
1676 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm15[3,3],ymm1[3,3],ymm15[7,7],ymm1[7,7]
1677 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm5[2,3],ymm1[1,2],ymm5[6,7],ymm1[5,6]
1678 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[2,3,2,3]
1679 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,2,3,1,4,6,7,5]
1680 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm15 = ymm1[0],ymm0[1,2,3,4],ymm1[5,6,7]
1681 ; AVX1-ONLY-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
1682 ; AVX1-ONLY-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
1683 ; AVX1-ONLY-NEXT: # xmm0 = xmm0[3,3],mem[3,3]
1684 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
1685 ; AVX1-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm1 # 16-byte Folded Reload
1686 ; AVX1-ONLY-NEXT: # xmm1 = xmm2[2],mem[2],xmm2[3],mem[3]
1687 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
1688 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
1689 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5,6],ymm1[7]
1690 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
1691 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm6[2,2,2,2]
1692 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
1693 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm4[0,1,2],xmm1[3]
1694 ; AVX1-ONLY-NEXT: vbroadcastsd 40(%rax), %ymm2
1695 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
1696 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4],ymm0[5,6,7]
1697 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
1698 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm1[3,3],xmm14[3,3]
1699 ; AVX1-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm12, %xmm2 # 16-byte Folded Reload
1700 ; AVX1-ONLY-NEXT: # xmm2 = xmm12[2],mem[2],xmm12[3],mem[3]
1701 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm12, %ymm2, %ymm2
1702 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
1703 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3,4],ymm1[5,6],ymm2[7]
1704 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
1705 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm2 = xmm14[2,2,2,2]
1706 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
1707 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm2 = xmm12[0,1,2],xmm2[3]
1708 ; AVX1-ONLY-NEXT: vbroadcastsd 8(%rax), %ymm3
1709 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
1710 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3,4],ymm1[5,6,7]
1711 ; AVX1-ONLY-NEXT: vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm2 # 32-byte Folded Reload
1712 ; AVX1-ONLY-NEXT: # ymm2 = ymm11[0],mem[0],ymm11[1],mem[1],ymm11[4],mem[4],ymm11[5],mem[5]
1713 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
1714 ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm3 = ymm3[0],ymm13[0],ymm3[2],ymm13[2]
1715 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm3 = ymm13[3,1],ymm3[0,2],ymm13[7,5],ymm3[4,6]
1716 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm2[4,5],ymm3[6,7]
1717 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm3 = xmm4[3,3],xmm6[3,3]
1718 ; AVX1-ONLY-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm3 # 16-byte Folded Reload
1719 ; AVX1-ONLY-NEXT: # xmm3 = xmm3[0,1,2],mem[3]
1720 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0],ymm3[1,2,3],ymm2[4,5,6,7]
1721 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} ymm3 = ymm10[0],ymm9[0],ymm10[1],ymm9[1],ymm10[4],ymm9[4],ymm10[5],ymm9[5]
1722 ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm5 = ymm8[0],ymm7[0],ymm8[2],ymm7[2]
1723 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm4 = ymm7[3,1],ymm5[0,2],ymm7[7,5],ymm5[4,6]
1724 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5],ymm4[6,7]
1725 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm4 = xmm12[3,3],xmm14[3,3]
1726 ; AVX1-ONLY-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm4 # 16-byte Folded Reload
1727 ; AVX1-ONLY-NEXT: # xmm4 = xmm4[0,1,2],mem[3]
1728 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0],ymm4[1,2,3],ymm3[4,5,6,7]
1729 ; AVX1-ONLY-NEXT: movq {{[0-9]+}}(%rsp), %rax
1730 ; AVX1-ONLY-NEXT: vmovaps %ymm3, 96(%rax)
1731 ; AVX1-ONLY-NEXT: vmovaps %ymm2, 320(%rax)
1732 ; AVX1-ONLY-NEXT: vmovaps %ymm1, 64(%rax)
1733 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 288(%rax)
1734 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
1735 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 160(%rax)
1736 ; AVX1-ONLY-NEXT: vmovaps %ymm15, 192(%rax)
1737 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
1738 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 128(%rax)
1739 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
1740 ; AVX1-ONLY-NEXT: vmovaps %ymm0, (%rax)
1741 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
1742 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 32(%rax)
1743 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
1744 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 352(%rax)
1745 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
1746 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 256(%rax)
1747 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
1748 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 224(%rax)
1749 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
1750 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 416(%rax)
1751 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
1752 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 384(%rax)
1753 ; AVX1-ONLY-NEXT: addq $456, %rsp # imm = 0x1C8
1754 ; AVX1-ONLY-NEXT: vzeroupper
1755 ; AVX1-ONLY-NEXT: retq
1757 ; AVX2-SLOW-LABEL: store_i32_stride7_vf16:
1758 ; AVX2-SLOW: # %bb.0:
1759 ; AVX2-SLOW-NEXT: subq $488, %rsp # imm = 0x1E8
1760 ; AVX2-SLOW-NEXT: movq {{[0-9]+}}(%rsp), %rax
1761 ; AVX2-SLOW-NEXT: vmovaps (%rax), %xmm6
1762 ; AVX2-SLOW-NEXT: vmovaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1763 ; AVX2-SLOW-NEXT: vmovaps 32(%rax), %xmm0
1764 ; AVX2-SLOW-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
1765 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
1766 ; AVX2-SLOW-NEXT: vmovaps (%r8), %xmm8
1767 ; AVX2-SLOW-NEXT: vmovaps %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1768 ; AVX2-SLOW-NEXT: vmovaps 32(%r8), %xmm2
1769 ; AVX2-SLOW-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1770 ; AVX2-SLOW-NEXT: vmovaps (%r9), %xmm9
1771 ; AVX2-SLOW-NEXT: vmovaps %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1772 ; AVX2-SLOW-NEXT: vmovaps 32(%r9), %xmm1
1773 ; AVX2-SLOW-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1774 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,1,1,1]
1775 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2,3]
1776 ; AVX2-SLOW-NEXT: vbroadcastsd %xmm1, %ymm1
1777 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5],ymm1[6,7]
1778 ; AVX2-SLOW-NEXT: vmovaps (%rcx), %xmm3
1779 ; AVX2-SLOW-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1780 ; AVX2-SLOW-NEXT: vmovaps 32(%rcx), %xmm12
1781 ; AVX2-SLOW-NEXT: vmovaps 32(%rdx), %xmm10
1782 ; AVX2-SLOW-NEXT: vinsertps {{.*#+}} xmm2 = zero,xmm10[1],xmm12[1],zero
1783 ; AVX2-SLOW-NEXT: vmovaps (%rdi), %xmm11
1784 ; AVX2-SLOW-NEXT: vmovaps %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1785 ; AVX2-SLOW-NEXT: vmovaps 32(%rdi), %xmm7
1786 ; AVX2-SLOW-NEXT: vmovaps (%rsi), %xmm1
1787 ; AVX2-SLOW-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1788 ; AVX2-SLOW-NEXT: vmovaps 32(%rsi), %xmm5
1789 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm4 = xmm5[1,1,2,2]
1790 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm4 = xmm4[0,1],xmm7[2],xmm4[3]
1791 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,1,2,1]
1792 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm4[0],ymm2[1,2],ymm4[3,4,5,6,7]
1793 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2],ymm0[3,4,5],ymm2[6,7]
1794 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1795 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm6, %ymm0, %ymm0
1796 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm2 = xmm9[1,1,1,1]
1797 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm2 = xmm2[0],xmm8[1],xmm2[2,3]
1798 ; AVX2-SLOW-NEXT: vbroadcastsd %xmm2, %ymm2
1799 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3,4],ymm0[5],ymm2[6,7]
1800 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm2 = xmm1[1,1,2,2]
1801 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm2 = xmm2[0,1],xmm11[2],xmm2[3]
1802 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,1,2,1]
1803 ; AVX2-SLOW-NEXT: vmovaps (%rdx), %xmm13
1804 ; AVX2-SLOW-NEXT: vinsertps {{.*#+}} xmm4 = zero,xmm13[1],xmm3[1],zero
1805 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0],ymm4[1,2],ymm2[3,4,5,6,7]
1806 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2],ymm0[3,4,5],ymm2[6,7]
1807 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1808 ; AVX2-SLOW-NEXT: vmovaps (%rdi), %ymm0
1809 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1810 ; AVX2-SLOW-NEXT: vmovaps (%rsi), %ymm1
1811 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1812 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
1813 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
1814 ; AVX2-SLOW-NEXT: vmovaps (%rdx), %ymm1
1815 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1816 ; AVX2-SLOW-NEXT: vmovaps (%rcx), %ymm14
1817 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm2 = ymm1[2],ymm14[2],ymm1[3],ymm14[3],ymm1[6],ymm14[6],ymm1[7],ymm14[7]
1818 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7]
1819 ; AVX2-SLOW-NEXT: vmovaps (%r8), %ymm1
1820 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1821 ; AVX2-SLOW-NEXT: vmovaps (%r9), %ymm2
1822 ; AVX2-SLOW-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1823 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm2 = ymm2[1,1,2,2,5,5,6,6]
1824 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm1[2],ymm2[3,4,5],ymm1[6],ymm2[7]
1825 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[2,1,2,3]
1826 ; AVX2-SLOW-NEXT: vmovaps 16(%rax), %xmm4
1827 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0],ymm4[1],ymm2[2,3,4,5,6,7]
1828 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1],ymm0[2,3,4,5],ymm2[6,7]
1829 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1830 ; AVX2-SLOW-NEXT: vmovaps 32(%rdi), %ymm11
1831 ; AVX2-SLOW-NEXT: vmovaps 32(%rsi), %ymm6
1832 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm0 = ymm11[2],ymm6[2],ymm11[3],ymm6[3],ymm11[6],ymm6[6],ymm11[7],ymm6[7]
1833 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm9 = ymm0[2,2,2,2]
1834 ; AVX2-SLOW-NEXT: vmovaps 32(%rdx), %ymm4
1835 ; AVX2-SLOW-NEXT: vmovaps 32(%rcx), %ymm8
1836 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm0 = ymm4[2],ymm8[2],ymm4[3],ymm8[3],ymm4[6],ymm8[6],ymm4[7],ymm8[7]
1837 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm9 = ymm9[0,1,2,3],ymm0[4,5,6,7]
1838 ; AVX2-SLOW-NEXT: vmovaps 32(%r8), %ymm1
1839 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1840 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm9[0,1,2,3,4,5],ymm1[6,7]
1841 ; AVX2-SLOW-NEXT: vmovaps 32(%r9), %ymm9
1842 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm15 = ymm9[1,2,2,3,5,6,6,7]
1843 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm15 = ymm15[2,2,2,2]
1844 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm15[0],ymm1[1,2,3,4,5,6],ymm15[7]
1845 ; AVX2-SLOW-NEXT: vperm2f128 {{.*#+}} ymm0 = mem[2,3],ymm0[2,3]
1846 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3,4],ymm0[5],ymm1[6,7]
1847 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1848 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm0 = ymm8[2],ymm4[2],ymm8[3],ymm4[3],ymm8[6],ymm4[6],ymm8[7],ymm4[7]
1849 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,3,3,3]
1850 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm1 = ymm6[2],ymm11[2],ymm6[3],ymm11[3],ymm6[6],ymm11[6],ymm6[7],ymm11[7]
1851 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[3,3,3,3]
1852 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
1853 ; AVX2-SLOW-NEXT: vbroadcastss 60(%r8), %ymm1
1854 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
1855 ; AVX2-SLOW-NEXT: vbroadcastss 60(%r9), %ymm1
1856 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
1857 ; AVX2-SLOW-NEXT: vbroadcastsd 56(%rax), %ymm1
1858 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5,6],ymm1[7]
1859 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1860 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm7[3,3],xmm5[3,3]
1861 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
1862 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} xmm1 = xmm10[2],xmm12[2],xmm10[3],xmm12[3]
1863 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,1,2,2]
1864 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1]
1865 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm3 = ymm1[0,1,2,3,4],ymm0[5,6],ymm1[7]
1866 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
1867 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm2[2,2,2,2]
1868 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1869 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm1 = xmm0[0,1,2],xmm1[3]
1870 ; AVX2-SLOW-NEXT: vbroadcastsd 40(%rax), %ymm15
1871 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm15[4,5,6,7]
1872 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm3[0,1],ymm1[2,3,4],ymm3[5,6,7]
1873 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1874 ; AVX2-SLOW-NEXT: vbroadcastss %xmm12, %xmm3
1875 ; AVX2-SLOW-NEXT: vbroadcastss %xmm10, %xmm1
1876 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} xmm3 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
1877 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} xmm1 = xmm7[0],xmm5[0],xmm7[1],xmm5[1]
1878 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,1,2,2]
1879 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1]
1880 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm5 = ymm1[0,1],ymm3[2,3],ymm1[4,5,6,7]
1881 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} xmm1 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
1882 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
1883 ; AVX2-SLOW-NEXT: vmovaps (%rsp), %xmm15 # 16-byte Reload
1884 ; AVX2-SLOW-NEXT: vbroadcastsd %xmm15, %ymm3
1885 ; AVX2-SLOW-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm1[0],ymm3[0],ymm1[2],ymm3[2]
1886 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm5[0,1,2,3],ymm1[4,5,6],ymm5[7]
1887 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1888 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
1889 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
1890 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm7[3,3],xmm12[3,3]
1891 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
1892 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
1893 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} xmm1 = xmm13[2],xmm2[2],xmm13[3],xmm2[3]
1894 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,1,2,2]
1895 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1]
1896 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5,6],ymm1[7]
1897 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
1898 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm5[2,2,2,2]
1899 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
1900 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm1 = xmm10[0,1,2],xmm1[3]
1901 ; AVX2-SLOW-NEXT: vbroadcastsd 8(%rax), %ymm3
1902 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm3[4,5,6,7]
1903 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4],ymm0[5,6,7]
1904 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1905 ; AVX2-SLOW-NEXT: vbroadcastss %xmm2, %xmm0
1906 ; AVX2-SLOW-NEXT: vbroadcastss %xmm13, %xmm3
1907 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} xmm0 = xmm3[0],xmm0[0],xmm3[1],xmm0[1]
1908 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} xmm3 = xmm7[0],xmm12[0],xmm7[1],xmm12[1]
1909 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm3 = xmm3[0,1,2,2]
1910 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[0,1,2,1]
1911 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm3[0,1],ymm0[2,3],ymm3[4,5,6,7]
1912 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} xmm3 = xmm10[0],xmm5[0],xmm10[1],xmm5[1]
1913 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm3
1914 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
1915 ; AVX2-SLOW-NEXT: vbroadcastsd %xmm12, %ymm5
1916 ; AVX2-SLOW-NEXT: vunpcklpd {{.*#+}} ymm3 = ymm3[0],ymm5[0],ymm3[2],ymm5[2]
1917 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm3[4,5,6],ymm0[7]
1918 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1919 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm0 = ymm4[1,1],ymm8[1,1],ymm4[5,5],ymm8[5,5]
1920 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm2 = ymm6[1,1,1,1,5,5,5,5]
1921 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0],ymm11[1],ymm2[2,3,4],ymm11[5],ymm2[6,7]
1922 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[2,2,2,2]
1923 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3,4],ymm0[5,6],ymm2[7]
1924 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm2 = ymm9[0,0,0,0,4,4,4,4]
1925 ; AVX2-SLOW-NEXT: vpermilps $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload
1926 ; AVX2-SLOW-NEXT: # ymm5 = mem[0,1,0,1,4,5,4,5]
1927 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm5[0],ymm2[1],ymm5[2,3,4],ymm2[5],ymm5[6,7]
1928 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[2,1,2,3]
1929 ; AVX2-SLOW-NEXT: vbroadcastsd 48(%rax), %ymm5
1930 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm5[2,3],ymm2[4,5,6,7]
1931 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm10 = ymm2[0,1,2],ymm0[3,4,5,6],ymm2[7]
1932 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
1933 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm2 = ymm4[1,1,1,1,5,5,5,5]
1934 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
1935 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0],ymm9[1],ymm2[2,3,4],ymm9[5],ymm2[6,7]
1936 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[2,2,2,2]
1937 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
1938 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm5 = ymm0[1,1],ymm14[1,1],ymm0[5,5],ymm14[5,5]
1939 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm5[5,6],ymm2[7]
1940 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
1941 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm5 = ymm3[0,0,0,0,4,4,4,4]
1942 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
1943 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm7 = ymm1[0,1,0,1,4,5,4,5]
1944 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm5 = ymm7[0],ymm5[1],ymm7[2,3,4],ymm5[5],ymm7[6,7]
1945 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[2,1,2,3]
1946 ; AVX2-SLOW-NEXT: vbroadcastsd 16(%rax), %ymm7
1947 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1],ymm7[2,3],ymm5[4,5,6,7]
1948 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2],ymm2[3,4,5,6],ymm5[7]
1949 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm2 = ymm14[2],ymm0[2],ymm14[3],ymm0[3],ymm14[6],ymm0[6],ymm14[7],ymm0[7]
1950 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[3,3,3,3]
1951 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm7 = ymm4[2],ymm9[2],ymm4[3],ymm9[3],ymm4[6],ymm9[6],ymm4[7],ymm9[7]
1952 ; AVX2-SLOW-NEXT: vmovaps %ymm9, %ymm0
1953 ; AVX2-SLOW-NEXT: vmovaps %ymm4, %ymm13
1954 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm7 = ymm7[3,3,3,3]
1955 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm7[0,1,2],ymm2[3,4,5,6,7]
1956 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm7 = ymm1[3,3],ymm3[3,3],ymm1[7,7],ymm3[7,7]
1957 ; AVX2-SLOW-NEXT: vpermilps {{.*#+}} ymm9 = mem[2,3,2,3,6,7,6,7]
1958 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm7 = ymm9[0],ymm7[1,2],ymm9[3,4],ymm7[5,6],ymm9[7]
1959 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm7 = ymm7[2,1,2,3]
1960 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm7[0],ymm2[1,2,3,4],ymm7[5,6,7]
1961 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} ymm6 = ymm11[0],ymm6[0],ymm11[1],ymm6[1],ymm11[4],ymm6[4],ymm11[5],ymm6[5]
1962 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm4 = ymm8[3,1,2,0,7,5,6,4]
1963 ; AVX2-SLOW-NEXT: vbroadcastss 48(%rdx), %ymm7
1964 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3,4,5],ymm7[6],ymm4[7]
1965 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm6[4,5],ymm4[6,7]
1966 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
1967 ; AVX2-SLOW-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm6 # 16-byte Folded Reload
1968 ; AVX2-SLOW-NEXT: # xmm6 = xmm1[3,3],mem[3,3]
1969 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm6 = xmm6[0,1,2],xmm15[3]
1970 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0],ymm6[1,2,3],ymm4[4,5,6,7]
1971 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} ymm6 = ymm0[0],ymm13[0],ymm0[1],ymm13[1],ymm0[4],ymm13[4],ymm0[5],ymm13[5]
1972 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm7 = ymm14[3,1,2,0,7,5,6,4]
1973 ; AVX2-SLOW-NEXT: vbroadcastss 16(%rdx), %ymm8
1974 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3,4,5],ymm8[6],ymm7[7]
1975 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm6 = ymm7[0,1,2,3],ymm6[4,5],ymm7[6,7]
1976 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1977 ; AVX2-SLOW-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm7 # 16-byte Folded Reload
1978 ; AVX2-SLOW-NEXT: # xmm7 = xmm0[3,3],mem[3,3]
1979 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm7 = xmm7[0,1,2],xmm12[3]
1980 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0],ymm7[1,2,3],ymm6[4,5,6,7]
1981 ; AVX2-SLOW-NEXT: movq {{[0-9]+}}(%rsp), %rax
1982 ; AVX2-SLOW-NEXT: vmovaps %ymm6, 96(%rax)
1983 ; AVX2-SLOW-NEXT: vmovaps %ymm4, 320(%rax)
1984 ; AVX2-SLOW-NEXT: vmovaps %ymm2, 192(%rax)
1985 ; AVX2-SLOW-NEXT: vmovaps %ymm5, 128(%rax)
1986 ; AVX2-SLOW-NEXT: vmovaps %ymm10, 352(%rax)
1987 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
1988 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 160(%rax)
1989 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
1990 ; AVX2-SLOW-NEXT: vmovaps %ymm0, (%rax)
1991 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
1992 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 32(%rax)
1993 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
1994 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 64(%rax)
1995 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
1996 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 256(%rax)
1997 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
1998 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 224(%rax)
1999 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2000 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 288(%rax)
2001 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2002 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 416(%rax)
2003 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2004 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 384(%rax)
2005 ; AVX2-SLOW-NEXT: addq $488, %rsp # imm = 0x1E8
2006 ; AVX2-SLOW-NEXT: vzeroupper
2007 ; AVX2-SLOW-NEXT: retq
2009 ; AVX2-FAST-LABEL: store_i32_stride7_vf16:
2010 ; AVX2-FAST: # %bb.0:
2011 ; AVX2-FAST-NEXT: subq $536, %rsp # imm = 0x218
2012 ; AVX2-FAST-NEXT: movq {{[0-9]+}}(%rsp), %rax
2013 ; AVX2-FAST-NEXT: vmovaps (%rax), %xmm3
2014 ; AVX2-FAST-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2015 ; AVX2-FAST-NEXT: vmovaps 32(%rax), %xmm0
2016 ; AVX2-FAST-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2017 ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
2018 ; AVX2-FAST-NEXT: vmovaps (%r8), %xmm5
2019 ; AVX2-FAST-NEXT: vmovaps %xmm5, (%rsp) # 16-byte Spill
2020 ; AVX2-FAST-NEXT: vmovaps 32(%r8), %xmm2
2021 ; AVX2-FAST-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2022 ; AVX2-FAST-NEXT: vmovaps (%r9), %xmm7
2023 ; AVX2-FAST-NEXT: vmovaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2024 ; AVX2-FAST-NEXT: vmovaps 32(%r9), %xmm4
2025 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm1 = xmm4[1,1,1,1]
2026 ; AVX2-FAST-NEXT: vmovaps %xmm4, %xmm14
2027 ; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2,3]
2028 ; AVX2-FAST-NEXT: vbroadcastsd %xmm1, %ymm1
2029 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5],ymm1[6,7]
2030 ; AVX2-FAST-NEXT: vmovaps (%rcx), %xmm8
2031 ; AVX2-FAST-NEXT: vmovaps %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2032 ; AVX2-FAST-NEXT: vmovaps 32(%rcx), %xmm11
2033 ; AVX2-FAST-NEXT: vmovaps 32(%rdx), %xmm10
2034 ; AVX2-FAST-NEXT: vinsertps {{.*#+}} xmm1 = zero,xmm10[1],xmm11[1],zero
2035 ; AVX2-FAST-NEXT: vmovaps (%rdi), %xmm12
2036 ; AVX2-FAST-NEXT: vmovaps %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2037 ; AVX2-FAST-NEXT: vmovaps 32(%rdi), %xmm9
2038 ; AVX2-FAST-NEXT: vmovaps (%rsi), %xmm4
2039 ; AVX2-FAST-NEXT: vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2040 ; AVX2-FAST-NEXT: vmovaps 32(%rsi), %xmm6
2041 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm2 = xmm6[1,1,2,2]
2042 ; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm2 = xmm2[0,1],xmm9[2],xmm2[3]
2043 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,1,2,1]
2044 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0],ymm1[1,2],ymm2[3,4,5,6,7]
2045 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
2046 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2047 ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0
2048 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm1 = xmm7[1,1,1,1]
2049 ; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0],xmm5[1],xmm1[2,3]
2050 ; AVX2-FAST-NEXT: vbroadcastsd %xmm1, %ymm1
2051 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5],ymm1[6,7]
2052 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm1 = xmm4[1,1,2,2]
2053 ; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm12[2],xmm1[3]
2054 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1]
2055 ; AVX2-FAST-NEXT: vmovaps (%rdx), %xmm2
2056 ; AVX2-FAST-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2057 ; AVX2-FAST-NEXT: vinsertps {{.*#+}} xmm2 = zero,xmm2[1],xmm8[1],zero
2058 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2],ymm1[3,4,5,6,7]
2059 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
2060 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2061 ; AVX2-FAST-NEXT: vmovaps (%rdi), %ymm0
2062 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2063 ; AVX2-FAST-NEXT: vmovaps (%rsi), %ymm1
2064 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2065 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
2066 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
2067 ; AVX2-FAST-NEXT: vmovaps (%rdx), %ymm1
2068 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2069 ; AVX2-FAST-NEXT: vmovaps (%rcx), %ymm2
2070 ; AVX2-FAST-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2071 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm1 = ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[6],ymm2[6],ymm1[7],ymm2[7]
2072 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
2073 ; AVX2-FAST-NEXT: vmovaps (%r8), %ymm2
2074 ; AVX2-FAST-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2075 ; AVX2-FAST-NEXT: vmovaps (%r9), %ymm1
2076 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2077 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm1 = ymm1[1,1,2,2,5,5,6,6]
2078 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4,5],ymm2[6],ymm1[7]
2079 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
2080 ; AVX2-FAST-NEXT: vmovaps 16(%rax), %xmm2
2081 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7]
2082 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
2083 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2084 ; AVX2-FAST-NEXT: vmovaps 32(%rdi), %ymm13
2085 ; AVX2-FAST-NEXT: vmovaps 32(%rsi), %ymm12
2086 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm0 = ymm13[2],ymm12[2],ymm13[3],ymm12[3],ymm13[6],ymm12[6],ymm13[7],ymm12[7]
2087 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm2 = ymm0[2,2,2,2]
2088 ; AVX2-FAST-NEXT: vmovaps 32(%rdx), %ymm3
2089 ; AVX2-FAST-NEXT: vmovaps 32(%rcx), %ymm8
2090 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm0 = ymm3[2],ymm8[2],ymm3[3],ymm8[3],ymm3[6],ymm8[6],ymm3[7],ymm8[7]
2091 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm0[4,5,6,7]
2092 ; AVX2-FAST-NEXT: vmovaps 32(%r8), %ymm7
2093 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3,4,5],ymm7[6,7]
2094 ; AVX2-FAST-NEXT: vmovaps 32(%r9), %ymm4
2095 ; AVX2-FAST-NEXT: vbroadcastsd {{.*#+}} ymm15 = [5,6,5,6,5,6,5,6]
2096 ; AVX2-FAST-NEXT: vpermps %ymm4, %ymm15, %ymm15
2097 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm15[0],ymm1[1,2,3,4,5,6],ymm15[7]
2098 ; AVX2-FAST-NEXT: vperm2f128 {{.*#+}} ymm0 = mem[2,3],ymm0[2,3]
2099 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3,4],ymm0[5],ymm1[6,7]
2100 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2101 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm0 = ymm8[2],ymm3[2],ymm8[3],ymm3[3],ymm8[6],ymm3[6],ymm8[7],ymm3[7]
2102 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,3,3,3]
2103 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm1 = ymm12[2],ymm13[2],ymm12[3],ymm13[3],ymm12[6],ymm13[6],ymm12[7],ymm13[7]
2104 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[3,3,3,3]
2105 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
2106 ; AVX2-FAST-NEXT: vbroadcastss 60(%r8), %ymm1
2107 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
2108 ; AVX2-FAST-NEXT: vbroadcastss 60(%r9), %ymm1
2109 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
2110 ; AVX2-FAST-NEXT: vbroadcastsd 56(%rax), %ymm1
2111 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5,6],ymm1[7]
2112 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2113 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm0 = xmm9[3,3],xmm6[3,3]
2114 ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm1
2115 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} xmm15 = xmm10[2],xmm11[2],xmm10[3],xmm11[3]
2116 ; AVX2-FAST-NEXT: vbroadcastf128 {{.*#+}} ymm5 = [0,1,2,2,0,1,2,2]
2117 ; AVX2-FAST-NEXT: # ymm5 = mem[0,1,0,1]
2118 ; AVX2-FAST-NEXT: vpermps %ymm15, %ymm5, %ymm15
2119 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm15[0,1,2,3,4],ymm1[5,6],ymm15[7]
2120 ; AVX2-FAST-NEXT: vmovaps %xmm14, %xmm1
2121 ; AVX2-FAST-NEXT: vmovaps %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2122 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm15 = xmm14[2,2,2,2]
2123 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2124 ; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm15 = xmm0[0,1,2],xmm15[3]
2125 ; AVX2-FAST-NEXT: vbroadcastsd 40(%rax), %ymm14
2126 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm14 = ymm15[0,1,2,3],ymm14[4,5,6,7]
2127 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm14[2,3,4],ymm2[5,6,7]
2128 ; AVX2-FAST-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2129 ; AVX2-FAST-NEXT: vbroadcastss %xmm11, %xmm2
2130 ; AVX2-FAST-NEXT: vbroadcastss %xmm10, %xmm10
2131 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} xmm2 = xmm10[0],xmm2[0],xmm10[1],xmm2[1]
2132 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} xmm6 = xmm9[0],xmm6[0],xmm9[1],xmm6[1]
2133 ; AVX2-FAST-NEXT: vpermps %ymm6, %ymm5, %ymm6
2134 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm6[0,1],ymm2[2,3],ymm6[4,5,6,7]
2135 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} xmm6 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
2136 ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm6, %ymm0, %ymm6
2137 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
2138 ; AVX2-FAST-NEXT: vbroadcastsd %xmm14, %ymm9
2139 ; AVX2-FAST-NEXT: vunpcklpd {{.*#+}} ymm6 = ymm6[0],ymm9[0],ymm6[2],ymm9[2]
2140 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm6[4,5,6],ymm2[7]
2141 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2142 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm1 = ymm3[1,1],ymm8[1,1],ymm3[5,5],ymm8[5,5]
2143 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm3 = ymm12[1,1,1,1,5,5,5,5]
2144 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0],ymm13[1],ymm3[2,3,4],ymm13[5],ymm3[6,7]
2145 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[2,2,2,2]
2146 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm3[0,1,2,3,4],ymm1[5,6],ymm3[7]
2147 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm2 = ymm4[0,0,0,0,4,4,4,4]
2148 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm3 = ymm7[0,1,0,1,4,5,4,5]
2149 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0],ymm2[1],ymm3[2,3,4],ymm2[5],ymm3[6,7]
2150 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[2,1,2,3]
2151 ; AVX2-FAST-NEXT: vbroadcastsd 48(%rax), %ymm3
2152 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm3[2,3],ymm2[4,5,6,7]
2153 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2],ymm1[3,4,5,6],ymm2[7]
2154 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2155 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
2156 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
2157 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm2 = xmm1[3,3],xmm6[3,3]
2158 ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2
2159 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2160 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
2161 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} xmm3 = xmm7[2],xmm0[2],xmm7[3],xmm0[3]
2162 ; AVX2-FAST-NEXT: vpermps %ymm3, %ymm5, %ymm3
2163 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1,2,3,4],ymm2[5,6],ymm3[7]
2164 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
2165 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm3 = xmm9[2,2,2,2]
2166 ; AVX2-FAST-NEXT: vmovaps (%rsp), %xmm15 # 16-byte Reload
2167 ; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm3 = xmm15[0,1,2],xmm3[3]
2168 ; AVX2-FAST-NEXT: vbroadcastsd 8(%rax), %ymm4
2169 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm4[4,5,6,7]
2170 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm3[2,3,4],ymm2[5,6,7]
2171 ; AVX2-FAST-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2172 ; AVX2-FAST-NEXT: vbroadcastss %xmm0, %xmm3
2173 ; AVX2-FAST-NEXT: vbroadcastss %xmm7, %xmm4
2174 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
2175 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} xmm4 = xmm1[0],xmm6[0],xmm1[1],xmm6[1]
2176 ; AVX2-FAST-NEXT: vpermps %ymm4, %ymm5, %ymm0
2177 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm3[2,3],ymm0[4,5,6,7]
2178 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} xmm3 = xmm15[0],xmm9[0],xmm15[1],xmm9[1]
2179 ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm3
2180 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
2181 ; AVX2-FAST-NEXT: vbroadcastsd %xmm6, %ymm4
2182 ; AVX2-FAST-NEXT: vunpcklpd {{.*#+}} ymm3 = ymm3[0],ymm4[0],ymm3[2],ymm4[2]
2183 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm3[4,5,6],ymm0[7]
2184 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2185 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
2186 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm3 = ymm5[1,1,1,1,5,5,5,5]
2187 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2188 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0],ymm0[1],ymm3[2,3,4],ymm0[5],ymm3[6,7]
2189 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[2,2,2,2]
2190 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
2191 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
2192 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm4 = ymm7[1,1],ymm1[1,1],ymm7[5,5],ymm1[5,5]
2193 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3,4],ymm4[5,6],ymm3[7]
2194 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
2195 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm4 = ymm10[0,0,0,0,4,4,4,4]
2196 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
2197 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm9 = ymm2[0,1,0,1,4,5,4,5]
2198 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm4 = ymm9[0],ymm4[1],ymm9[2,3,4],ymm4[5],ymm9[6,7]
2199 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[2,1,2,3]
2200 ; AVX2-FAST-NEXT: vbroadcastsd 16(%rax), %ymm9
2201 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm9[2,3],ymm4[4,5,6,7]
2202 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm11 = ymm4[0,1,2],ymm3[3,4,5,6],ymm4[7]
2203 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm4 = ymm1[2],ymm7[2],ymm1[3],ymm7[3],ymm1[6],ymm7[6],ymm1[7],ymm7[7]
2204 ; AVX2-FAST-NEXT: vmovaps %ymm1, %ymm3
2205 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[3,3,3,3]
2206 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm9 = ymm5[2],ymm0[2],ymm5[3],ymm0[3],ymm5[6],ymm0[6],ymm5[7],ymm0[7]
2207 ; AVX2-FAST-NEXT: vmovaps %ymm0, %ymm1
2208 ; AVX2-FAST-NEXT: vmovaps %ymm5, %ymm0
2209 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm9 = ymm9[3,3,3,3]
2210 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm4 = ymm9[0,1,2],ymm4[3,4,5,6,7]
2211 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm9 = ymm2[3,3],ymm10[3,3],ymm2[7,7],ymm10[7,7]
2212 ; AVX2-FAST-NEXT: vpermilps {{.*#+}} ymm10 = mem[2,3,2,3,6,7,6,7]
2213 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm9 = ymm10[0],ymm9[1,2],ymm10[3,4],ymm9[5,6],ymm10[7]
2214 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm9 = ymm9[2,1,2,3]
2215 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm4 = ymm9[0],ymm4[1,2,3,4],ymm9[5,6,7]
2216 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} ymm7 = ymm13[0],ymm12[0],ymm13[1],ymm12[1],ymm13[4],ymm12[4],ymm13[5],ymm12[5]
2217 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm5 = ymm8[3,1,2,0,7,5,6,4]
2218 ; AVX2-FAST-NEXT: vbroadcastss 48(%rdx), %ymm8
2219 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3,4,5],ymm8[6],ymm5[7]
2220 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm7[4,5],ymm5[6,7]
2221 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
2222 ; AVX2-FAST-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm7 # 16-byte Folded Reload
2223 ; AVX2-FAST-NEXT: # xmm7 = xmm2[3,3],mem[3,3]
2224 ; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm7 = xmm7[0,1,2],xmm14[3]
2225 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0],ymm7[1,2,3],ymm5[4,5,6,7]
2226 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} ymm7 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[4],ymm0[4],ymm1[5],ymm0[5]
2227 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm8 = ymm3[3,1,2,0,7,5,6,4]
2228 ; AVX2-FAST-NEXT: vbroadcastss 16(%rdx), %ymm9
2229 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1,2,3,4,5],ymm9[6],ymm8[7]
2230 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm7 = ymm8[0,1,2,3],ymm7[4,5],ymm8[6,7]
2231 ; AVX2-FAST-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm15, %xmm8 # 16-byte Folded Reload
2232 ; AVX2-FAST-NEXT: # xmm8 = xmm15[3,3],mem[3,3]
2233 ; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm8 = xmm8[0,1,2],xmm6[3]
2234 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0],ymm8[1,2,3],ymm7[4,5,6,7]
2235 ; AVX2-FAST-NEXT: movq {{[0-9]+}}(%rsp), %rax
2236 ; AVX2-FAST-NEXT: vmovaps %ymm7, 96(%rax)
2237 ; AVX2-FAST-NEXT: vmovaps %ymm5, 320(%rax)
2238 ; AVX2-FAST-NEXT: vmovaps %ymm4, 192(%rax)
2239 ; AVX2-FAST-NEXT: vmovaps %ymm11, 128(%rax)
2240 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2241 ; AVX2-FAST-NEXT: vmovaps %ymm0, (%rax)
2242 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2243 ; AVX2-FAST-NEXT: vmovaps %ymm0, 64(%rax)
2244 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2245 ; AVX2-FAST-NEXT: vmovaps %ymm0, 352(%rax)
2246 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2247 ; AVX2-FAST-NEXT: vmovaps %ymm0, 224(%rax)
2248 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2249 ; AVX2-FAST-NEXT: vmovaps %ymm0, 288(%rax)
2250 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2251 ; AVX2-FAST-NEXT: vmovaps %ymm0, 160(%rax)
2252 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2253 ; AVX2-FAST-NEXT: vmovaps %ymm0, 32(%rax)
2254 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2255 ; AVX2-FAST-NEXT: vmovaps %ymm0, 256(%rax)
2256 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2257 ; AVX2-FAST-NEXT: vmovaps %ymm0, 416(%rax)
2258 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2259 ; AVX2-FAST-NEXT: vmovaps %ymm0, 384(%rax)
2260 ; AVX2-FAST-NEXT: addq $536, %rsp # imm = 0x218
2261 ; AVX2-FAST-NEXT: vzeroupper
2262 ; AVX2-FAST-NEXT: retq
2264 ; AVX2-FAST-PERLANE-LABEL: store_i32_stride7_vf16:
2265 ; AVX2-FAST-PERLANE: # %bb.0:
2266 ; AVX2-FAST-PERLANE-NEXT: subq $488, %rsp # imm = 0x1E8
2267 ; AVX2-FAST-PERLANE-NEXT: movq {{[0-9]+}}(%rsp), %rax
2268 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rax), %xmm6
2269 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2270 ; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%rax), %xmm0
2271 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
2272 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
2273 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%r8), %xmm8
2274 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2275 ; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%r8), %xmm2
2276 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2277 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%r9), %xmm9
2278 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2279 ; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%r9), %xmm1
2280 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2281 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,1,1,1]
2282 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2,3]
2283 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd %xmm1, %ymm1
2284 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5],ymm1[6,7]
2285 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rcx), %xmm3
2286 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2287 ; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%rcx), %xmm12
2288 ; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%rdx), %xmm10
2289 ; AVX2-FAST-PERLANE-NEXT: vinsertps {{.*#+}} xmm2 = zero,xmm10[1],xmm12[1],zero
2290 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rdi), %xmm11
2291 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2292 ; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%rdi), %xmm7
2293 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rsi), %xmm1
2294 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2295 ; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%rsi), %xmm5
2296 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm4 = xmm5[1,1,2,2]
2297 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} xmm4 = xmm4[0,1],xmm7[2],xmm4[3]
2298 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,1,2,1]
2299 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm4[0],ymm2[1,2],ymm4[3,4,5,6,7]
2300 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2],ymm0[3,4,5],ymm2[6,7]
2301 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2302 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm6, %ymm0, %ymm0
2303 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm2 = xmm9[1,1,1,1]
2304 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} xmm2 = xmm2[0],xmm8[1],xmm2[2,3]
2305 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd %xmm2, %ymm2
2306 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3,4],ymm0[5],ymm2[6,7]
2307 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm2 = xmm1[1,1,2,2]
2308 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} xmm2 = xmm2[0,1],xmm11[2],xmm2[3]
2309 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,1,2,1]
2310 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rdx), %xmm13
2311 ; AVX2-FAST-PERLANE-NEXT: vinsertps {{.*#+}} xmm4 = zero,xmm13[1],xmm3[1],zero
2312 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0],ymm4[1,2],ymm2[3,4,5,6,7]
2313 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2],ymm0[3,4,5],ymm2[6,7]
2314 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2315 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rdi), %ymm0
2316 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2317 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rsi), %ymm1
2318 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2319 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
2320 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
2321 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rdx), %ymm1
2322 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2323 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rcx), %ymm14
2324 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm2 = ymm1[2],ymm14[2],ymm1[3],ymm14[3],ymm1[6],ymm14[6],ymm1[7],ymm14[7]
2325 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7]
2326 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%r8), %ymm1
2327 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2328 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%r9), %ymm2
2329 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2330 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm2 = ymm2[1,1,2,2,5,5,6,6]
2331 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm1[2],ymm2[3,4,5],ymm1[6],ymm2[7]
2332 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[2,1,2,3]
2333 ; AVX2-FAST-PERLANE-NEXT: vmovaps 16(%rax), %xmm4
2334 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0],ymm4[1],ymm2[2,3,4,5,6,7]
2335 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1],ymm0[2,3,4,5],ymm2[6,7]
2336 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2337 ; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%rdi), %ymm11
2338 ; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%rsi), %ymm6
2339 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm0 = ymm11[2],ymm6[2],ymm11[3],ymm6[3],ymm11[6],ymm6[6],ymm11[7],ymm6[7]
2340 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm9 = ymm0[2,2,2,2]
2341 ; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%rdx), %ymm4
2342 ; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%rcx), %ymm8
2343 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm0 = ymm4[2],ymm8[2],ymm4[3],ymm8[3],ymm4[6],ymm8[6],ymm4[7],ymm8[7]
2344 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm9 = ymm9[0,1,2,3],ymm0[4,5,6,7]
2345 ; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%r8), %ymm1
2346 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2347 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm9[0,1,2,3,4,5],ymm1[6,7]
2348 ; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%r9), %ymm9
2349 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm15 = ymm9[1,2,2,3,5,6,6,7]
2350 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm15 = ymm15[2,2,2,2]
2351 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm15[0],ymm1[1,2,3,4,5,6],ymm15[7]
2352 ; AVX2-FAST-PERLANE-NEXT: vperm2f128 {{.*#+}} ymm0 = mem[2,3],ymm0[2,3]
2353 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3,4],ymm0[5],ymm1[6,7]
2354 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2355 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm0 = ymm8[2],ymm4[2],ymm8[3],ymm4[3],ymm8[6],ymm4[6],ymm8[7],ymm4[7]
2356 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,3,3,3]
2357 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm1 = ymm6[2],ymm11[2],ymm6[3],ymm11[3],ymm6[6],ymm11[6],ymm6[7],ymm11[7]
2358 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[3,3,3,3]
2359 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
2360 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss 60(%r8), %ymm1
2361 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
2362 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss 60(%r9), %ymm1
2363 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
2364 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 56(%rax), %ymm1
2365 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5,6],ymm1[7]
2366 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2367 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm0 = xmm7[3,3],xmm5[3,3]
2368 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
2369 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} xmm1 = xmm10[2],xmm12[2],xmm10[3],xmm12[3]
2370 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,1,2,2]
2371 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1]
2372 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm3 = ymm1[0,1,2,3,4],ymm0[5,6],ymm1[7]
2373 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
2374 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm1 = xmm2[2,2,2,2]
2375 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2376 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} xmm1 = xmm0[0,1,2],xmm1[3]
2377 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 40(%rax), %ymm15
2378 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm15[4,5,6,7]
2379 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm3[0,1],ymm1[2,3,4],ymm3[5,6,7]
2380 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2381 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss %xmm12, %xmm3
2382 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss %xmm10, %xmm1
2383 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} xmm3 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
2384 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} xmm1 = xmm7[0],xmm5[0],xmm7[1],xmm5[1]
2385 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,1,2,2]
2386 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1]
2387 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm5 = ymm1[0,1],ymm3[2,3],ymm1[4,5,6,7]
2388 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} xmm1 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
2389 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
2390 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rsp), %xmm15 # 16-byte Reload
2391 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd %xmm15, %ymm3
2392 ; AVX2-FAST-PERLANE-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm1[0],ymm3[0],ymm1[2],ymm3[2]
2393 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm5[0,1,2,3],ymm1[4,5,6],ymm5[7]
2394 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2395 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
2396 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
2397 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm0 = xmm7[3,3],xmm12[3,3]
2398 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
2399 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
2400 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} xmm1 = xmm13[2],xmm2[2],xmm13[3],xmm2[3]
2401 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,1,2,2]
2402 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1]
2403 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5,6],ymm1[7]
2404 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
2405 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm1 = xmm5[2,2,2,2]
2406 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
2407 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} xmm1 = xmm10[0,1,2],xmm1[3]
2408 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 8(%rax), %ymm3
2409 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm3[4,5,6,7]
2410 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4],ymm0[5,6,7]
2411 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2412 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss %xmm2, %xmm0
2413 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss %xmm13, %xmm3
2414 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} xmm0 = xmm3[0],xmm0[0],xmm3[1],xmm0[1]
2415 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} xmm3 = xmm7[0],xmm12[0],xmm7[1],xmm12[1]
2416 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm3 = xmm3[0,1,2,2]
2417 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[0,1,2,1]
2418 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm3[0,1],ymm0[2,3],ymm3[4,5,6,7]
2419 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} xmm3 = xmm10[0],xmm5[0],xmm10[1],xmm5[1]
2420 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm3
2421 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
2422 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd %xmm12, %ymm5
2423 ; AVX2-FAST-PERLANE-NEXT: vunpcklpd {{.*#+}} ymm3 = ymm3[0],ymm5[0],ymm3[2],ymm5[2]
2424 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm3[4,5,6],ymm0[7]
2425 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2426 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm0 = ymm4[1,1],ymm8[1,1],ymm4[5,5],ymm8[5,5]
2427 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm2 = ymm6[1,1,1,1,5,5,5,5]
2428 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0],ymm11[1],ymm2[2,3,4],ymm11[5],ymm2[6,7]
2429 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[2,2,2,2]
2430 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3,4],ymm0[5,6],ymm2[7]
2431 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm2 = ymm9[0,0,0,0,4,4,4,4]
2432 ; AVX2-FAST-PERLANE-NEXT: vpermilps $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload
2433 ; AVX2-FAST-PERLANE-NEXT: # ymm5 = mem[0,1,0,1,4,5,4,5]
2434 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm5[0],ymm2[1],ymm5[2,3,4],ymm2[5],ymm5[6,7]
2435 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[2,1,2,3]
2436 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 48(%rax), %ymm5
2437 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm5[2,3],ymm2[4,5,6,7]
2438 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm10 = ymm2[0,1,2],ymm0[3,4,5,6],ymm2[7]
2439 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
2440 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm2 = ymm4[1,1,1,1,5,5,5,5]
2441 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
2442 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0],ymm9[1],ymm2[2,3,4],ymm9[5],ymm2[6,7]
2443 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[2,2,2,2]
2444 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2445 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm5 = ymm0[1,1],ymm14[1,1],ymm0[5,5],ymm14[5,5]
2446 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm5[5,6],ymm2[7]
2447 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
2448 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm5 = ymm3[0,0,0,0,4,4,4,4]
2449 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
2450 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm7 = ymm1[0,1,0,1,4,5,4,5]
2451 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm5 = ymm7[0],ymm5[1],ymm7[2,3,4],ymm5[5],ymm7[6,7]
2452 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[2,1,2,3]
2453 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 16(%rax), %ymm7
2454 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1],ymm7[2,3],ymm5[4,5,6,7]
2455 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2],ymm2[3,4,5,6],ymm5[7]
2456 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm2 = ymm14[2],ymm0[2],ymm14[3],ymm0[3],ymm14[6],ymm0[6],ymm14[7],ymm0[7]
2457 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[3,3,3,3]
2458 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm7 = ymm4[2],ymm9[2],ymm4[3],ymm9[3],ymm4[6],ymm9[6],ymm4[7],ymm9[7]
2459 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm9, %ymm0
2460 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm4, %ymm13
2461 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm7 = ymm7[3,3,3,3]
2462 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm7[0,1,2],ymm2[3,4,5,6,7]
2463 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm7 = ymm1[3,3],ymm3[3,3],ymm1[7,7],ymm3[7,7]
2464 ; AVX2-FAST-PERLANE-NEXT: vpermilps {{.*#+}} ymm9 = mem[2,3,2,3,6,7,6,7]
2465 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm7 = ymm9[0],ymm7[1,2],ymm9[3,4],ymm7[5,6],ymm9[7]
2466 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm7 = ymm7[2,1,2,3]
2467 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm7[0],ymm2[1,2,3,4],ymm7[5,6,7]
2468 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} ymm6 = ymm11[0],ymm6[0],ymm11[1],ymm6[1],ymm11[4],ymm6[4],ymm11[5],ymm6[5]
2469 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm4 = ymm8[3,1,2,0,7,5,6,4]
2470 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss 48(%rdx), %ymm7
2471 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3,4,5],ymm7[6],ymm4[7]
2472 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm6[4,5],ymm4[6,7]
2473 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
2474 ; AVX2-FAST-PERLANE-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm6 # 16-byte Folded Reload
2475 ; AVX2-FAST-PERLANE-NEXT: # xmm6 = xmm1[3,3],mem[3,3]
2476 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} xmm6 = xmm6[0,1,2],xmm15[3]
2477 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0],ymm6[1,2,3],ymm4[4,5,6,7]
2478 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} ymm6 = ymm0[0],ymm13[0],ymm0[1],ymm13[1],ymm0[4],ymm13[4],ymm0[5],ymm13[5]
2479 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm7 = ymm14[3,1,2,0,7,5,6,4]
2480 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss 16(%rdx), %ymm8
2481 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3,4,5],ymm8[6],ymm7[7]
2482 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm6 = ymm7[0,1,2,3],ymm6[4,5],ymm7[6,7]
2483 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2484 ; AVX2-FAST-PERLANE-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm7 # 16-byte Folded Reload
2485 ; AVX2-FAST-PERLANE-NEXT: # xmm7 = xmm0[3,3],mem[3,3]
2486 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} xmm7 = xmm7[0,1,2],xmm12[3]
2487 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0],ymm7[1,2,3],ymm6[4,5,6,7]
2488 ; AVX2-FAST-PERLANE-NEXT: movq {{[0-9]+}}(%rsp), %rax
2489 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm6, 96(%rax)
2490 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm4, 320(%rax)
2491 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm2, 192(%rax)
2492 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm5, 128(%rax)
2493 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm10, 352(%rax)
2494 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2495 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 160(%rax)
2496 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2497 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, (%rax)
2498 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2499 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 32(%rax)
2500 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2501 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 64(%rax)
2502 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2503 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 256(%rax)
2504 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2505 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 224(%rax)
2506 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2507 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 288(%rax)
2508 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2509 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 416(%rax)
2510 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2511 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 384(%rax)
2512 ; AVX2-FAST-PERLANE-NEXT: addq $488, %rsp # imm = 0x1E8
2513 ; AVX2-FAST-PERLANE-NEXT: vzeroupper
2514 ; AVX2-FAST-PERLANE-NEXT: retq
2516 ; AVX512F-LABEL: store_i32_stride7_vf16:
2518 ; AVX512F-NEXT: movq {{[0-9]+}}(%rsp), %rax
2519 ; AVX512F-NEXT: movq {{[0-9]+}}(%rsp), %r10
2520 ; AVX512F-NEXT: vmovdqa64 (%rdi), %zmm4
2521 ; AVX512F-NEXT: vmovdqa64 (%rsi), %zmm6
2522 ; AVX512F-NEXT: vmovdqa64 (%rdx), %zmm3
2523 ; AVX512F-NEXT: vmovdqa64 (%rcx), %zmm5
2524 ; AVX512F-NEXT: vmovdqa64 (%r8), %zmm1
2525 ; AVX512F-NEXT: vmovdqa64 (%r9), %zmm2
2526 ; AVX512F-NEXT: vmovdqa64 (%r10), %zmm0
2527 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm7 = <u,u,u,u,14,30,u,u,u,u,u,15,31,u,u,u>
2528 ; AVX512F-NEXT: vpermi2d %zmm5, %zmm3, %zmm7
2529 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm8 = <u,u,14,30,u,u,u,u,u,15,31,u,u,u,u,u>
2530 ; AVX512F-NEXT: vpermi2d %zmm6, %zmm4, %zmm8
2531 ; AVX512F-NEXT: movw $6192, %cx # imm = 0x1830
2532 ; AVX512F-NEXT: kmovw %ecx, %k1
2533 ; AVX512F-NEXT: vmovdqa32 %zmm7, %zmm8 {%k1}
2534 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm7 = <13,u,u,u,u,u,30,14,u,u,u,u,u,31,15,u>
2535 ; AVX512F-NEXT: vpermi2d %zmm1, %zmm2, %zmm7
2536 ; AVX512F-NEXT: movw $24769, %cx # imm = 0x60C1
2537 ; AVX512F-NEXT: kmovw %ecx, %k2
2538 ; AVX512F-NEXT: vmovdqa32 %zmm7, %zmm8 {%k2}
2539 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm7 = [0,29,2,3,4,5,6,7,30,9,10,11,12,13,14,31]
2540 ; AVX512F-NEXT: vpermi2d %zmm0, %zmm8, %zmm7
2541 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm9 = <u,u,0,16,u,u,u,u,u,1,17,u,u,u,u,u>
2542 ; AVX512F-NEXT: vpermi2d %zmm5, %zmm3, %zmm9
2543 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm8 = <0,16,u,u,u,u,u,1,17,u,u,u,u,u,2,18>
2544 ; AVX512F-NEXT: vpermi2d %zmm6, %zmm4, %zmm8
2545 ; AVX512F-NEXT: movw $1548, %cx # imm = 0x60C
2546 ; AVX512F-NEXT: kmovw %ecx, %k2
2547 ; AVX512F-NEXT: vmovdqa32 %zmm9, %zmm8 {%k2}
2548 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm9 = <u,u,u,u,0,16,u,u,u,u,u,1,17,u,u,u>
2549 ; AVX512F-NEXT: vpermi2d %zmm2, %zmm1, %zmm9
2550 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm10 = <u,u,u,u,4,5,16,u,u,u,u,11,12,17,u,u>
2551 ; AVX512F-NEXT: vpermi2d %zmm0, %zmm9, %zmm10
2552 ; AVX512F-NEXT: movw $14448, %cx # imm = 0x3870
2553 ; AVX512F-NEXT: kmovw %ecx, %k2
2554 ; AVX512F-NEXT: vmovdqa32 %zmm10, %zmm8 {%k2}
2555 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm10 = <u,u,u,u,u,3,19,u,u,u,u,u,4,20,u,u>
2556 ; AVX512F-NEXT: vpermi2d %zmm6, %zmm4, %zmm10
2557 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm9 = <2,18,u,u,u,u,u,3,19,u,u,u,u,u,4,20>
2558 ; AVX512F-NEXT: vpermi2d %zmm5, %zmm3, %zmm9
2559 ; AVX512F-NEXT: movw $12384, %cx # imm = 0x3060
2560 ; AVX512F-NEXT: kmovw %ecx, %k2
2561 ; AVX512F-NEXT: vmovdqa32 %zmm10, %zmm9 {%k2}
2562 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm10 = <u,u,2,18,u,u,u,u,u,3,19,u,u,u,u,u>
2563 ; AVX512F-NEXT: vpermi2d %zmm2, %zmm1, %zmm10
2564 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm11 = <u,u,2,3,18,u,u,u,u,9,10,19,u,u,u,u>
2565 ; AVX512F-NEXT: vpermi2d %zmm0, %zmm10, %zmm11
2566 ; AVX512F-NEXT: movw $3612, %cx # imm = 0xE1C
2567 ; AVX512F-NEXT: kmovw %ecx, %k3
2568 ; AVX512F-NEXT: vmovdqa32 %zmm11, %zmm9 {%k3}
2569 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm10 = <u,u,u,u,u,5,21,u,u,u,u,u,6,22,u,u>
2570 ; AVX512F-NEXT: vpermi2d %zmm5, %zmm3, %zmm10
2571 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm11 = <u,u,u,5,21,u,u,u,u,u,6,22,u,u,u,u>
2572 ; AVX512F-NEXT: vpermi2d %zmm6, %zmm4, %zmm11
2573 ; AVX512F-NEXT: vmovdqa32 %zmm10, %zmm11 {%k2}
2574 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm12 = <4,20,u,u,u,u,u,5,21,u,u,u,u,u,6,22>
2575 ; AVX512F-NEXT: vpermi2d %zmm2, %zmm1, %zmm12
2576 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm10 = <0,1,20,u,u,u,u,7,8,21,u,u,u,u,14,15>
2577 ; AVX512F-NEXT: vpermi2d %zmm0, %zmm12, %zmm10
2578 ; AVX512F-NEXT: movw $15480, %cx # imm = 0x3C78
2579 ; AVX512F-NEXT: kmovw %ecx, %k2
2580 ; AVX512F-NEXT: vmovdqa32 %zmm11, %zmm10 {%k2}
2581 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm11 = <u,u,u,7,23,u,u,u,u,u,8,24,u,u,u,u>
2582 ; AVX512F-NEXT: vpermi2d %zmm5, %zmm3, %zmm11
2583 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm12 = <u,7,23,u,u,u,u,u,8,24,u,u,u,u,u,9>
2584 ; AVX512F-NEXT: vpermi2d %zmm6, %zmm4, %zmm12
2585 ; AVX512F-NEXT: movw $3096, %cx # imm = 0xC18
2586 ; AVX512F-NEXT: kmovw %ecx, %k2
2587 ; AVX512F-NEXT: vmovdqa32 %zmm11, %zmm12 {%k2}
2588 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm11 = <u,u,u,u,u,7,23,u,u,u,u,u,8,24,u,u>
2589 ; AVX512F-NEXT: vpermi2d %zmm2, %zmm1, %zmm11
2590 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm13 = <22,u,u,u,u,5,6,23,u,u,u,u,12,13,24,u>
2591 ; AVX512F-NEXT: vpermi2d %zmm0, %zmm11, %zmm13
2592 ; AVX512F-NEXT: movw $28897, %cx # imm = 0x70E1
2593 ; AVX512F-NEXT: kmovw %ecx, %k2
2594 ; AVX512F-NEXT: vmovdqa32 %zmm13, %zmm12 {%k2}
2595 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm11 = <u,9,25,u,u,u,u,u,10,26,u,u,u,u,u,11>
2596 ; AVX512F-NEXT: vpermi2d %zmm5, %zmm3, %zmm11
2597 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm13 = <9,u,u,u,u,u,26,10,u,u,u,u,u,27,11,u>
2598 ; AVX512F-NEXT: vpermi2d %zmm4, %zmm6, %zmm13
2599 ; AVX512F-NEXT: movw $-31994, %cx # imm = 0x8306
2600 ; AVX512F-NEXT: kmovw %ecx, %k2
2601 ; AVX512F-NEXT: vmovdqa32 %zmm11, %zmm13 {%k2}
2602 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm11 = <u,u,u,9,25,u,u,u,u,u,10,26,u,u,u,u>
2603 ; AVX512F-NEXT: vpermi2d %zmm2, %zmm1, %zmm11
2604 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm14 = <u,u,u,3,4,25,u,u,u,u,10,11,26,u,u,u>
2605 ; AVX512F-NEXT: vpermi2d %zmm0, %zmm11, %zmm14
2606 ; AVX512F-NEXT: movw $7224, %cx # imm = 0x1C38
2607 ; AVX512F-NEXT: kmovw %ecx, %k2
2608 ; AVX512F-NEXT: vmovdqa32 %zmm14, %zmm13 {%k2}
2609 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm11 = <u,u,u,u,12,28,u,u,u,u,u,13,29,u,u,u>
2610 ; AVX512F-NEXT: vpermi2d %zmm6, %zmm4, %zmm11
2611 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm4 = <11,u,u,u,u,u,28,12,u,u,u,u,u,29,13,u>
2612 ; AVX512F-NEXT: vpermi2d %zmm3, %zmm5, %zmm4
2613 ; AVX512F-NEXT: vmovdqa32 %zmm11, %zmm4 {%k1}
2614 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm3 = <u,11,27,u,u,u,u,u,12,28,u,u,u,u,u,13>
2615 ; AVX512F-NEXT: vpermi2d %zmm2, %zmm1, %zmm3
2616 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm1 = <u,1,2,27,u,u,u,u,8,9,28,u,u,u,u,15>
2617 ; AVX512F-NEXT: vpermi2d %zmm0, %zmm3, %zmm1
2618 ; AVX512F-NEXT: movw $-30962, %cx # imm = 0x870E
2619 ; AVX512F-NEXT: kmovw %ecx, %k1
2620 ; AVX512F-NEXT: vmovdqa32 %zmm1, %zmm4 {%k1}
2621 ; AVX512F-NEXT: vmovdqa64 %zmm4, 320(%rax)
2622 ; AVX512F-NEXT: vmovdqa64 %zmm13, 256(%rax)
2623 ; AVX512F-NEXT: vmovdqa64 %zmm12, 192(%rax)
2624 ; AVX512F-NEXT: vmovdqa64 %zmm10, 128(%rax)
2625 ; AVX512F-NEXT: vmovdqa64 %zmm9, 64(%rax)
2626 ; AVX512F-NEXT: vmovdqa64 %zmm8, (%rax)
2627 ; AVX512F-NEXT: vmovdqa64 %zmm7, 384(%rax)
2628 ; AVX512F-NEXT: vzeroupper
2629 ; AVX512F-NEXT: retq
2631 ; AVX512BW-LABEL: store_i32_stride7_vf16:
2632 ; AVX512BW: # %bb.0:
2633 ; AVX512BW-NEXT: movq {{[0-9]+}}(%rsp), %rax
2634 ; AVX512BW-NEXT: movq {{[0-9]+}}(%rsp), %r10
2635 ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm4
2636 ; AVX512BW-NEXT: vmovdqa64 (%rsi), %zmm6
2637 ; AVX512BW-NEXT: vmovdqa64 (%rdx), %zmm3
2638 ; AVX512BW-NEXT: vmovdqa64 (%rcx), %zmm5
2639 ; AVX512BW-NEXT: vmovdqa64 (%r8), %zmm1
2640 ; AVX512BW-NEXT: vmovdqa64 (%r9), %zmm2
2641 ; AVX512BW-NEXT: vmovdqa64 (%r10), %zmm0
2642 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm7 = <u,u,u,u,14,30,u,u,u,u,u,15,31,u,u,u>
2643 ; AVX512BW-NEXT: vpermi2d %zmm5, %zmm3, %zmm7
2644 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm8 = <u,u,14,30,u,u,u,u,u,15,31,u,u,u,u,u>
2645 ; AVX512BW-NEXT: vpermi2d %zmm6, %zmm4, %zmm8
2646 ; AVX512BW-NEXT: movw $6192, %cx # imm = 0x1830
2647 ; AVX512BW-NEXT: kmovd %ecx, %k1
2648 ; AVX512BW-NEXT: vmovdqa32 %zmm7, %zmm8 {%k1}
2649 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm7 = <13,u,u,u,u,u,30,14,u,u,u,u,u,31,15,u>
2650 ; AVX512BW-NEXT: vpermi2d %zmm1, %zmm2, %zmm7
2651 ; AVX512BW-NEXT: movw $24769, %cx # imm = 0x60C1
2652 ; AVX512BW-NEXT: kmovd %ecx, %k2
2653 ; AVX512BW-NEXT: vmovdqa32 %zmm7, %zmm8 {%k2}
2654 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm7 = [0,29,2,3,4,5,6,7,30,9,10,11,12,13,14,31]
2655 ; AVX512BW-NEXT: vpermi2d %zmm0, %zmm8, %zmm7
2656 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm9 = <u,u,0,16,u,u,u,u,u,1,17,u,u,u,u,u>
2657 ; AVX512BW-NEXT: vpermi2d %zmm5, %zmm3, %zmm9
2658 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm8 = <0,16,u,u,u,u,u,1,17,u,u,u,u,u,2,18>
2659 ; AVX512BW-NEXT: vpermi2d %zmm6, %zmm4, %zmm8
2660 ; AVX512BW-NEXT: movw $1548, %cx # imm = 0x60C
2661 ; AVX512BW-NEXT: kmovd %ecx, %k2
2662 ; AVX512BW-NEXT: vmovdqa32 %zmm9, %zmm8 {%k2}
2663 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm9 = <u,u,u,u,0,16,u,u,u,u,u,1,17,u,u,u>
2664 ; AVX512BW-NEXT: vpermi2d %zmm2, %zmm1, %zmm9
2665 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm10 = <u,u,u,u,4,5,16,u,u,u,u,11,12,17,u,u>
2666 ; AVX512BW-NEXT: vpermi2d %zmm0, %zmm9, %zmm10
2667 ; AVX512BW-NEXT: movw $14448, %cx # imm = 0x3870
2668 ; AVX512BW-NEXT: kmovd %ecx, %k2
2669 ; AVX512BW-NEXT: vmovdqa32 %zmm10, %zmm8 {%k2}
2670 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm10 = <u,u,u,u,u,3,19,u,u,u,u,u,4,20,u,u>
2671 ; AVX512BW-NEXT: vpermi2d %zmm6, %zmm4, %zmm10
2672 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm9 = <2,18,u,u,u,u,u,3,19,u,u,u,u,u,4,20>
2673 ; AVX512BW-NEXT: vpermi2d %zmm5, %zmm3, %zmm9
2674 ; AVX512BW-NEXT: movw $12384, %cx # imm = 0x3060
2675 ; AVX512BW-NEXT: kmovd %ecx, %k2
2676 ; AVX512BW-NEXT: vmovdqa32 %zmm10, %zmm9 {%k2}
2677 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm10 = <u,u,2,18,u,u,u,u,u,3,19,u,u,u,u,u>
2678 ; AVX512BW-NEXT: vpermi2d %zmm2, %zmm1, %zmm10
2679 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm11 = <u,u,2,3,18,u,u,u,u,9,10,19,u,u,u,u>
2680 ; AVX512BW-NEXT: vpermi2d %zmm0, %zmm10, %zmm11
2681 ; AVX512BW-NEXT: movw $3612, %cx # imm = 0xE1C
2682 ; AVX512BW-NEXT: kmovd %ecx, %k3
2683 ; AVX512BW-NEXT: vmovdqa32 %zmm11, %zmm9 {%k3}
2684 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm10 = <u,u,u,u,u,5,21,u,u,u,u,u,6,22,u,u>
2685 ; AVX512BW-NEXT: vpermi2d %zmm5, %zmm3, %zmm10
2686 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm11 = <u,u,u,5,21,u,u,u,u,u,6,22,u,u,u,u>
2687 ; AVX512BW-NEXT: vpermi2d %zmm6, %zmm4, %zmm11
2688 ; AVX512BW-NEXT: vmovdqa32 %zmm10, %zmm11 {%k2}
2689 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm12 = <4,20,u,u,u,u,u,5,21,u,u,u,u,u,6,22>
2690 ; AVX512BW-NEXT: vpermi2d %zmm2, %zmm1, %zmm12
2691 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm10 = <0,1,20,u,u,u,u,7,8,21,u,u,u,u,14,15>
2692 ; AVX512BW-NEXT: vpermi2d %zmm0, %zmm12, %zmm10
2693 ; AVX512BW-NEXT: movw $15480, %cx # imm = 0x3C78
2694 ; AVX512BW-NEXT: kmovd %ecx, %k2
2695 ; AVX512BW-NEXT: vmovdqa32 %zmm11, %zmm10 {%k2}
2696 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm11 = <u,u,u,7,23,u,u,u,u,u,8,24,u,u,u,u>
2697 ; AVX512BW-NEXT: vpermi2d %zmm5, %zmm3, %zmm11
2698 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm12 = <u,7,23,u,u,u,u,u,8,24,u,u,u,u,u,9>
2699 ; AVX512BW-NEXT: vpermi2d %zmm6, %zmm4, %zmm12
2700 ; AVX512BW-NEXT: movw $3096, %cx # imm = 0xC18
2701 ; AVX512BW-NEXT: kmovd %ecx, %k2
2702 ; AVX512BW-NEXT: vmovdqa32 %zmm11, %zmm12 {%k2}
2703 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm11 = <u,u,u,u,u,7,23,u,u,u,u,u,8,24,u,u>
2704 ; AVX512BW-NEXT: vpermi2d %zmm2, %zmm1, %zmm11
2705 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm13 = <22,u,u,u,u,5,6,23,u,u,u,u,12,13,24,u>
2706 ; AVX512BW-NEXT: vpermi2d %zmm0, %zmm11, %zmm13
2707 ; AVX512BW-NEXT: movw $28897, %cx # imm = 0x70E1
2708 ; AVX512BW-NEXT: kmovd %ecx, %k2
2709 ; AVX512BW-NEXT: vmovdqa32 %zmm13, %zmm12 {%k2}
2710 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm11 = <u,9,25,u,u,u,u,u,10,26,u,u,u,u,u,11>
2711 ; AVX512BW-NEXT: vpermi2d %zmm5, %zmm3, %zmm11
2712 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm13 = <9,u,u,u,u,u,26,10,u,u,u,u,u,27,11,u>
2713 ; AVX512BW-NEXT: vpermi2d %zmm4, %zmm6, %zmm13
2714 ; AVX512BW-NEXT: movw $-31994, %cx # imm = 0x8306
2715 ; AVX512BW-NEXT: kmovd %ecx, %k2
2716 ; AVX512BW-NEXT: vmovdqa32 %zmm11, %zmm13 {%k2}
2717 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm11 = <u,u,u,9,25,u,u,u,u,u,10,26,u,u,u,u>
2718 ; AVX512BW-NEXT: vpermi2d %zmm2, %zmm1, %zmm11
2719 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm14 = <u,u,u,3,4,25,u,u,u,u,10,11,26,u,u,u>
2720 ; AVX512BW-NEXT: vpermi2d %zmm0, %zmm11, %zmm14
2721 ; AVX512BW-NEXT: movw $7224, %cx # imm = 0x1C38
2722 ; AVX512BW-NEXT: kmovd %ecx, %k2
2723 ; AVX512BW-NEXT: vmovdqa32 %zmm14, %zmm13 {%k2}
2724 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm11 = <u,u,u,u,12,28,u,u,u,u,u,13,29,u,u,u>
2725 ; AVX512BW-NEXT: vpermi2d %zmm6, %zmm4, %zmm11
2726 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm4 = <11,u,u,u,u,u,28,12,u,u,u,u,u,29,13,u>
2727 ; AVX512BW-NEXT: vpermi2d %zmm3, %zmm5, %zmm4
2728 ; AVX512BW-NEXT: vmovdqa32 %zmm11, %zmm4 {%k1}
2729 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm3 = <u,11,27,u,u,u,u,u,12,28,u,u,u,u,u,13>
2730 ; AVX512BW-NEXT: vpermi2d %zmm2, %zmm1, %zmm3
2731 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm1 = <u,1,2,27,u,u,u,u,8,9,28,u,u,u,u,15>
2732 ; AVX512BW-NEXT: vpermi2d %zmm0, %zmm3, %zmm1
2733 ; AVX512BW-NEXT: movw $-30962, %cx # imm = 0x870E
2734 ; AVX512BW-NEXT: kmovd %ecx, %k1
2735 ; AVX512BW-NEXT: vmovdqa32 %zmm1, %zmm4 {%k1}
2736 ; AVX512BW-NEXT: vmovdqa64 %zmm4, 320(%rax)
2737 ; AVX512BW-NEXT: vmovdqa64 %zmm13, 256(%rax)
2738 ; AVX512BW-NEXT: vmovdqa64 %zmm12, 192(%rax)
2739 ; AVX512BW-NEXT: vmovdqa64 %zmm10, 128(%rax)
2740 ; AVX512BW-NEXT: vmovdqa64 %zmm9, 64(%rax)
2741 ; AVX512BW-NEXT: vmovdqa64 %zmm8, (%rax)
2742 ; AVX512BW-NEXT: vmovdqa64 %zmm7, 384(%rax)
2743 ; AVX512BW-NEXT: vzeroupper
2744 ; AVX512BW-NEXT: retq
2745 %in.vec0 = load <16 x i32>, ptr %in.vecptr0, align 64
2746 %in.vec1 = load <16 x i32>, ptr %in.vecptr1, align 64
2747 %in.vec2 = load <16 x i32>, ptr %in.vecptr2, align 64
2748 %in.vec3 = load <16 x i32>, ptr %in.vecptr3, align 64
2749 %in.vec4 = load <16 x i32>, ptr %in.vecptr4, align 64
2750 %in.vec5 = load <16 x i32>, ptr %in.vecptr5, align 64
2751 %in.vec6 = load <16 x i32>, ptr %in.vecptr6, align 64
2752 %1 = shufflevector <16 x i32> %in.vec0, <16 x i32> %in.vec1, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
2753 %2 = shufflevector <16 x i32> %in.vec2, <16 x i32> %in.vec3, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
2754 %3 = shufflevector <16 x i32> %in.vec4, <16 x i32> %in.vec5, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
2755 %4 = shufflevector <32 x i32> %1, <32 x i32> %2, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
2756 %5 = shufflevector <16 x i32> %in.vec6, <16 x i32> poison, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
2757 %6 = shufflevector <32 x i32> %3, <32 x i32> %5, <48 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47>
2758 %7 = shufflevector <48 x i32> %6, <48 x i32> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
2759 %8 = shufflevector <64 x i32> %4, <64 x i32> %7, <112 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95, i32 96, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111>
2760 %interleaved.vec = shufflevector <112 x i32> %8, <112 x i32> poison, <112 x i32> <i32 0, i32 16, i32 32, i32 48, i32 64, i32 80, i32 96, i32 1, i32 17, i32 33, i32 49, i32 65, i32 81, i32 97, i32 2, i32 18, i32 34, i32 50, i32 66, i32 82, i32 98, i32 3, i32 19, i32 35, i32 51, i32 67, i32 83, i32 99, i32 4, i32 20, i32 36, i32 52, i32 68, i32 84, i32 100, i32 5, i32 21, i32 37, i32 53, i32 69, i32 85, i32 101, i32 6, i32 22, i32 38, i32 54, i32 70, i32 86, i32 102, i32 7, i32 23, i32 39, i32 55, i32 71, i32 87, i32 103, i32 8, i32 24, i32 40, i32 56, i32 72, i32 88, i32 104, i32 9, i32 25, i32 41, i32 57, i32 73, i32 89, i32 105, i32 10, i32 26, i32 42, i32 58, i32 74, i32 90, i32 106, i32 11, i32 27, i32 43, i32 59, i32 75, i32 91, i32 107, i32 12, i32 28, i32 44, i32 60, i32 76, i32 92, i32 108, i32 13, i32 29, i32 45, i32 61, i32 77, i32 93, i32 109, i32 14, i32 30, i32 46, i32 62, i32 78, i32 94, i32 110, i32 15, i32 31, i32 47, i32 63, i32 79, i32 95, i32 111>
2761 store <112 x i32> %interleaved.vec, ptr %out.vec, align 64
2765 define void @store_i32_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecptr2, ptr %in.vecptr3, ptr %in.vecptr4, ptr %in.vecptr5, ptr %in.vecptr6, ptr %out.vec) nounwind {
2766 ; SSE-LABEL: store_i32_stride7_vf32:
2768 ; SSE-NEXT: subq $1256, %rsp # imm = 0x4E8
2769 ; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax
2770 ; SSE-NEXT: movdqa (%rdi), %xmm8
2771 ; SSE-NEXT: movdqa (%rsi), %xmm10
2772 ; SSE-NEXT: movdqa 16(%rsi), %xmm4
2773 ; SSE-NEXT: movaps (%rdx), %xmm14
2774 ; SSE-NEXT: movdqa 16(%rdx), %xmm7
2775 ; SSE-NEXT: movdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2776 ; SSE-NEXT: movaps (%rcx), %xmm13
2777 ; SSE-NEXT: movaps 16(%rcx), %xmm9
2778 ; SSE-NEXT: movaps %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2779 ; SSE-NEXT: movaps (%r8), %xmm0
2780 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2781 ; SSE-NEXT: movaps 16(%r8), %xmm11
2782 ; SSE-NEXT: movaps %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2783 ; SSE-NEXT: movdqa (%r9), %xmm15
2784 ; SSE-NEXT: movdqa %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2785 ; SSE-NEXT: movdqa 16(%r9), %xmm12
2786 ; SSE-NEXT: movdqa %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2787 ; SSE-NEXT: movdqa (%rax), %xmm2
2788 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2789 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm13[1,1]
2790 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm10[1,1,1,1]
2791 ; SSE-NEXT: movaps %xmm14, %xmm3
2792 ; SSE-NEXT: movss {{.*#+}} xmm3 = xmm1[0],xmm3[1,2,3]
2793 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,1],xmm0[2,0]
2794 ; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2795 ; SSE-NEXT: movdqa %xmm8, %xmm0
2796 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm10[2],xmm0[3],xmm10[3]
2797 ; SSE-NEXT: movdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2798 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,1,1]
2799 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm15[1,1,1,1]
2800 ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
2801 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm0[0]
2802 ; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2803 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm7[1,1,1,1]
2804 ; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2805 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm4[1,1,1,1]
2806 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
2807 ; SSE-NEXT: movaps %xmm11, %xmm0
2808 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm9[1,1]
2809 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,0]
2810 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2811 ; SSE-NEXT: movdqa 16(%rax), %xmm0
2812 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2813 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
2814 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm12[1,1,1,1]
2815 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
2816 ; SSE-NEXT: movdqa 16(%rdi), %xmm15
2817 ; SSE-NEXT: movdqa %xmm15, %xmm0
2818 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm4[2],xmm0[3],xmm4[3]
2819 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
2820 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2821 ; SSE-NEXT: movdqa 32(%rsi), %xmm1
2822 ; SSE-NEXT: movaps 32(%rdx), %xmm5
2823 ; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2824 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1]
2825 ; SSE-NEXT: movdqa %xmm1, %xmm2
2826 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2827 ; SSE-NEXT: movaps %xmm5, %xmm1
2828 ; SSE-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
2829 ; SSE-NEXT: movaps 32(%rcx), %xmm5
2830 ; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2831 ; SSE-NEXT: movaps 32(%r8), %xmm0
2832 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2833 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm5[1,1]
2834 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,0]
2835 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2836 ; SSE-NEXT: movdqa 32(%r9), %xmm1
2837 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2838 ; SSE-NEXT: movdqa 32(%rax), %xmm0
2839 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2840 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
2841 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,1,1]
2842 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
2843 ; SSE-NEXT: movdqa 32(%rdi), %xmm0
2844 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2845 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
2846 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
2847 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2848 ; SSE-NEXT: movdqa 48(%rsi), %xmm3
2849 ; SSE-NEXT: movdqa 48(%rdx), %xmm0
2850 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2851 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
2852 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm3[1,1,1,1]
2853 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
2854 ; SSE-NEXT: movaps 48(%rcx), %xmm7
2855 ; SSE-NEXT: movaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2856 ; SSE-NEXT: movaps 48(%r8), %xmm0
2857 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2858 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm7[1,1]
2859 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,0]
2860 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2861 ; SSE-NEXT: movdqa 48(%r9), %xmm1
2862 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2863 ; SSE-NEXT: movdqa 48(%rax), %xmm0
2864 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2865 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
2866 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,1,1]
2867 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
2868 ; SSE-NEXT: movdqa 48(%rdi), %xmm11
2869 ; SSE-NEXT: movdqa %xmm11, %xmm0
2870 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm3[2],xmm0[3],xmm3[3]
2871 ; SSE-NEXT: movdqa %xmm3, %xmm7
2872 ; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2873 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
2874 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2875 ; SSE-NEXT: movdqa 64(%rsi), %xmm1
2876 ; SSE-NEXT: movaps 64(%rdx), %xmm3
2877 ; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2878 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1]
2879 ; SSE-NEXT: movdqa %xmm1, %xmm12
2880 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2881 ; SSE-NEXT: movaps %xmm3, %xmm1
2882 ; SSE-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
2883 ; SSE-NEXT: movaps 64(%rcx), %xmm3
2884 ; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2885 ; SSE-NEXT: movaps 64(%r8), %xmm0
2886 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2887 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm3[1,1]
2888 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,0]
2889 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2890 ; SSE-NEXT: movdqa 64(%r9), %xmm1
2891 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2892 ; SSE-NEXT: movdqa 64(%rax), %xmm0
2893 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2894 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
2895 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,1,1]
2896 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
2897 ; SSE-NEXT: movdqa 64(%rdi), %xmm9
2898 ; SSE-NEXT: movdqa %xmm9, %xmm0
2899 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm12[2],xmm0[3],xmm12[3]
2900 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
2901 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2902 ; SSE-NEXT: movdqa 80(%rsi), %xmm3
2903 ; SSE-NEXT: movdqa 80(%rdx), %xmm0
2904 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2905 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
2906 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm3[1,1,1,1]
2907 ; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2908 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
2909 ; SSE-NEXT: movaps 80(%rcx), %xmm2
2910 ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2911 ; SSE-NEXT: movaps 80(%r8), %xmm0
2912 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2913 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm2[1,1]
2914 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,0]
2915 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2916 ; SSE-NEXT: movdqa 80(%r9), %xmm1
2917 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2918 ; SSE-NEXT: movdqa 80(%rax), %xmm0
2919 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2920 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
2921 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,1,1]
2922 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
2923 ; SSE-NEXT: movdqa 80(%rdi), %xmm0
2924 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2925 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm3[2],xmm0[3],xmm3[3]
2926 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
2927 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2928 ; SSE-NEXT: movdqa 96(%rsi), %xmm6
2929 ; SSE-NEXT: movaps 96(%rdx), %xmm2
2930 ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2931 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm6[1,1,1,1]
2932 ; SSE-NEXT: movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2933 ; SSE-NEXT: movaps %xmm2, %xmm1
2934 ; SSE-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
2935 ; SSE-NEXT: movaps 96(%rcx), %xmm3
2936 ; SSE-NEXT: movaps 96(%r8), %xmm4
2937 ; SSE-NEXT: movaps %xmm4, %xmm0
2938 ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2939 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm3[1,1]
2940 ; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2941 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,0]
2942 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2943 ; SSE-NEXT: movdqa 96(%r9), %xmm2
2944 ; SSE-NEXT: movdqa 96(%rax), %xmm1
2945 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1]
2946 ; SSE-NEXT: movdqa %xmm1, %xmm12
2947 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2948 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,1,1]
2949 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2950 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
2951 ; SSE-NEXT: movdqa 96(%rdi), %xmm0
2952 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2953 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm6[2],xmm0[3],xmm6[3]
2954 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
2955 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2956 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm4[3,3,3,3]
2957 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm3[3,3,3,3]
2958 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
2959 ; SSE-NEXT: movdqa %xmm12, %xmm0
2960 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3],xmm2[3,3]
2961 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,0]
2962 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2963 ; SSE-NEXT: movdqa 112(%rsi), %xmm1
2964 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2965 ; SSE-NEXT: movdqa 112(%rdx), %xmm0
2966 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2967 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
2968 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,1,1]
2969 ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1]
2970 ; SSE-NEXT: movaps 112(%rcx), %xmm2
2971 ; SSE-NEXT: movaps 112(%r8), %xmm0
2972 ; SSE-NEXT: movaps %xmm0, %xmm1
2973 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm2[1,1]
2974 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,1],xmm1[2,0]
2975 ; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2976 ; SSE-NEXT: movaps %xmm0, %xmm1
2977 ; SSE-NEXT: movaps 112(%r9), %xmm12
2978 ; SSE-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm12[0]
2979 ; SSE-NEXT: movaps %xmm1, (%rsp) # 16-byte Spill
2980 ; SSE-NEXT: movaps %xmm12, %xmm1
2981 ; SSE-NEXT: movaps %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2982 ; SSE-NEXT: unpckhpd {{.*#+}} xmm12 = xmm12[1],xmm0[1]
2983 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,3,3,3]
2984 ; SSE-NEXT: movaps %xmm2, %xmm3
2985 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[3,3,3,3]
2986 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
2987 ; SSE-NEXT: movaps 112(%rax), %xmm0
2988 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2989 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,3],xmm0[3,3]
2990 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1],xmm1[0,2]
2991 ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2992 ; SSE-NEXT: movaps %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2993 ; SSE-NEXT: movaps %xmm14, %xmm0
2994 ; SSE-NEXT: movaps %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2995 ; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm13[0],xmm0[1],xmm13[1]
2996 ; SSE-NEXT: movdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2997 ; SSE-NEXT: movdqa %xmm8, %xmm1
2998 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm10[0],xmm1[1],xmm10[1]
2999 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
3000 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3001 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3002 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm8[1,3]
3003 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
3004 ; SSE-NEXT: movaps %xmm2, %xmm1
3005 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
3006 ; SSE-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1]
3007 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[0,2]
3008 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3009 ; SSE-NEXT: movaps %xmm2, %xmm0
3010 ; SSE-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],xmm4[2],xmm0[3],xmm4[3]
3011 ; SSE-NEXT: movaps %xmm14, %xmm1
3012 ; SSE-NEXT: unpckhps {{.*#+}} xmm1 = xmm1[2],xmm13[2],xmm1[3],xmm13[3]
3013 ; SSE-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
3014 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3015 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
3016 ; SSE-NEXT: movaps %xmm1, %xmm0
3017 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
3018 ; SSE-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm6[0]
3019 ; SSE-NEXT: movdqa %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3020 ; SSE-NEXT: movdqa %xmm15, %xmm2
3021 ; SSE-NEXT: punpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
3022 ; SSE-NEXT: # xmm2 = xmm2[0],mem[0],xmm2[1],mem[1]
3023 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1],xmm0[2,0]
3024 ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3025 ; SSE-NEXT: shufps $197, {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Folded Reload
3026 ; SSE-NEXT: # xmm15 = xmm15[1,1],mem[0,3]
3027 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
3028 ; SSE-NEXT: movaps %xmm2, %xmm4
3029 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
3030 ; SSE-NEXT: unpcklps {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
3031 ; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,1],xmm15[2,0]
3032 ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3033 ; SSE-NEXT: movaps %xmm6, %xmm4
3034 ; SSE-NEXT: unpckhps {{.*#+}} xmm4 = xmm4[2],xmm1[2],xmm4[3],xmm1[3]
3035 ; SSE-NEXT: movaps %xmm2, %xmm0
3036 ; SSE-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],xmm5[2],xmm0[3],xmm5[3]
3037 ; SSE-NEXT: movlhps {{.*#+}} xmm4 = xmm4[0],xmm0[0]
3038 ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3039 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
3040 ; SSE-NEXT: movaps %xmm2, %xmm0
3041 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
3042 ; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
3043 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
3044 ; SSE-NEXT: movaps %xmm5, %xmm4
3045 ; SSE-NEXT: unpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
3046 ; SSE-NEXT: # xmm4 = xmm4[0],mem[0],xmm4[1],mem[1]
3047 ; SSE-NEXT: movlhps {{.*#+}} xmm4 = xmm4[0],xmm0[0]
3048 ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3049 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3050 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm5[1,3]
3051 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
3052 ; SSE-NEXT: movaps %xmm5, %xmm6
3053 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
3054 ; SSE-NEXT: unpcklps {{.*#+}} xmm6 = xmm6[0],xmm4[0],xmm6[1],xmm4[1]
3055 ; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,1],xmm0[0,2]
3056 ; SSE-NEXT: movaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3057 ; SSE-NEXT: movaps %xmm5, %xmm0
3058 ; SSE-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],xmm4[2],xmm0[3],xmm4[3]
3059 ; SSE-NEXT: movaps %xmm2, %xmm4
3060 ; SSE-NEXT: unpckhps {{.*#+}} xmm4 = xmm4[2],xmm1[2],xmm4[3],xmm1[3]
3061 ; SSE-NEXT: movlhps {{.*#+}} xmm4 = xmm4[0],xmm0[0]
3062 ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3063 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
3064 ; SSE-NEXT: movaps %xmm1, %xmm0
3065 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
3066 ; SSE-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0]
3067 ; SSE-NEXT: movdqa %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3068 ; SSE-NEXT: movdqa %xmm11, %xmm4
3069 ; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm7[0],xmm4[1],xmm7[1]
3070 ; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,1],xmm0[2,0]
3071 ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3072 ; SSE-NEXT: movdqa %xmm11, %xmm0
3073 ; SSE-NEXT: shufps $197, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
3074 ; SSE-NEXT: # xmm0 = xmm0[1,1],mem[0,3]
3075 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
3076 ; SSE-NEXT: movaps %xmm6, %xmm5
3077 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
3078 ; SSE-NEXT: unpcklps {{.*#+}} xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1]
3079 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,1],xmm0[2,0]
3080 ; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3081 ; SSE-NEXT: movaps %xmm2, %xmm5
3082 ; SSE-NEXT: unpckhps {{.*#+}} xmm5 = xmm5[2],xmm1[2],xmm5[3],xmm1[3]
3083 ; SSE-NEXT: movaps %xmm6, %xmm0
3084 ; SSE-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],xmm4[2],xmm0[3],xmm4[3]
3085 ; SSE-NEXT: movlhps {{.*#+}} xmm5 = xmm5[0],xmm0[0]
3086 ; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3087 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
3088 ; SSE-NEXT: movdqa %xmm2, %xmm0
3089 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
3090 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
3091 ; SSE-NEXT: movdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3092 ; SSE-NEXT: movdqa %xmm9, %xmm4
3093 ; SSE-NEXT: punpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
3094 ; SSE-NEXT: # xmm4 = xmm4[0],mem[0],xmm4[1],mem[1]
3095 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm0[0]
3096 ; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3097 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3098 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm9[1,3]
3099 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
3100 ; SSE-NEXT: movaps %xmm6, %xmm5
3101 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
3102 ; SSE-NEXT: unpcklps {{.*#+}} xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1]
3103 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,1],xmm0[0,2]
3104 ; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3105 ; SSE-NEXT: movaps %xmm6, %xmm0
3106 ; SSE-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],xmm4[2],xmm0[3],xmm4[3]
3107 ; SSE-NEXT: movdqa %xmm2, %xmm4
3108 ; SSE-NEXT: punpckhdq {{.*#+}} xmm4 = xmm4[2],xmm1[2],xmm4[3],xmm1[3]
3109 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm0[0]
3110 ; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3111 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
3112 ; SSE-NEXT: movaps %xmm4, %xmm0
3113 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
3114 ; SSE-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm11[0]
3115 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
3116 ; SSE-NEXT: movaps %xmm1, %xmm15
3117 ; SSE-NEXT: unpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Folded Reload
3118 ; SSE-NEXT: # xmm15 = xmm15[0],mem[0],xmm15[1],mem[1]
3119 ; SSE-NEXT: shufps {{.*#+}} xmm15 = xmm15[0,1],xmm0[2,0]
3120 ; SSE-NEXT: movaps %xmm1, %xmm0
3121 ; SSE-NEXT: shufps $197, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
3122 ; SSE-NEXT: # xmm0 = xmm0[1,1],mem[0,3]
3123 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
3124 ; SSE-NEXT: movaps %xmm1, %xmm13
3125 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
3126 ; SSE-NEXT: unpcklps {{.*#+}} xmm13 = xmm13[0],xmm5[0],xmm13[1],xmm5[1]
3127 ; SSE-NEXT: shufps {{.*#+}} xmm13 = xmm13[0,1],xmm0[2,0]
3128 ; SSE-NEXT: unpckhps {{.*#+}} xmm11 = xmm11[2],xmm4[2],xmm11[3],xmm4[3]
3129 ; SSE-NEXT: movaps %xmm1, %xmm0
3130 ; SSE-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],xmm5[2],xmm0[3],xmm5[3]
3131 ; SSE-NEXT: movlhps {{.*#+}} xmm11 = xmm11[0],xmm0[0]
3132 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
3133 ; SSE-NEXT: movaps %xmm8, %xmm0
3134 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
3135 ; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
3136 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
3137 ; SSE-NEXT: movaps %xmm14, %xmm10
3138 ; SSE-NEXT: unpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Folded Reload
3139 ; SSE-NEXT: # xmm10 = xmm10[0],mem[0],xmm10[1],mem[1]
3140 ; SSE-NEXT: movlhps {{.*#+}} xmm10 = xmm10[0],xmm0[0]
3141 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3142 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm14[1,3]
3143 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
3144 ; SSE-NEXT: movaps %xmm1, %xmm9
3145 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
3146 ; SSE-NEXT: unpcklps {{.*#+}} xmm9 = xmm9[0],xmm5[0],xmm9[1],xmm5[1]
3147 ; SSE-NEXT: shufps {{.*#+}} xmm9 = xmm9[0,1],xmm0[0,2]
3148 ; SSE-NEXT: unpckhps {{.*#+}} xmm1 = xmm1[2],xmm5[2],xmm1[3],xmm5[3]
3149 ; SSE-NEXT: unpckhps {{.*#+}} xmm8 = xmm8[2],xmm4[2],xmm8[3],xmm4[3]
3150 ; SSE-NEXT: movlhps {{.*#+}} xmm8 = xmm8[0],xmm1[0]
3151 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
3152 ; SSE-NEXT: movaps %xmm4, %xmm7
3153 ; SSE-NEXT: unpckhps {{.*#+}} xmm7 = xmm7[2],xmm3[2],xmm7[3],xmm3[3]
3154 ; SSE-NEXT: movlhps {{.*#+}} xmm3 = xmm3[0],xmm4[0]
3155 ; SSE-NEXT: movaps 112(%rdi), %xmm5
3156 ; SSE-NEXT: movaps %xmm5, %xmm6
3157 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
3158 ; SSE-NEXT: unpcklps {{.*#+}} xmm6 = xmm6[0],xmm2[0],xmm6[1],xmm2[1]
3159 ; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,1],xmm3[2,0]
3160 ; SSE-NEXT: movaps %xmm5, %xmm0
3161 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
3162 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm1[0,3]
3163 ; SSE-NEXT: movaps (%rsp), %xmm3 # 16-byte Reload
3164 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,2],xmm0[2,0]
3165 ; SSE-NEXT: movaps %xmm3, (%rsp) # 16-byte Spill
3166 ; SSE-NEXT: movaps %xmm5, %xmm0
3167 ; SSE-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
3168 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
3169 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,1],xmm1[1,1]
3170 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,2],xmm0[0,1]
3171 ; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3172 ; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,1],xmm12[2,0]
3173 ; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[3,3],xmm2[3,3]
3174 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[3,1],xmm1[2,3]
3175 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[2,0],xmm4[2,0]
3176 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3177 ; SSE-NEXT: shufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
3178 ; SSE-NEXT: # xmm0 = xmm0[3,3],mem[3,3]
3179 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
3180 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm0[2,0]
3181 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
3182 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm12[2,3,2,3]
3183 ; SSE-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
3184 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3185 ; SSE-NEXT: shufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Folded Reload
3186 ; SSE-NEXT: # xmm12 = xmm12[3,3],mem[3,3]
3187 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
3188 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm12[2,0]
3189 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
3190 ; SSE-NEXT: # xmm0 = mem[3,3,3,3]
3191 ; SSE-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
3192 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3193 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3194 ; SSE-NEXT: shufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
3195 ; SSE-NEXT: # xmm0 = xmm0[3,3],mem[3,3]
3196 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
3197 ; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,3],xmm0[2,0]
3198 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
3199 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
3200 ; SSE-NEXT: movss {{.*#+}} xmm4 = xmm0[0],xmm4[1,2,3]
3201 ; SSE-NEXT: shufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
3202 ; SSE-NEXT: # xmm1 = xmm1[3,3],mem[3,3]
3203 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
3204 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,3],xmm1[2,0]
3205 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
3206 ; SSE-NEXT: # xmm0 = mem[3,3,3,3]
3207 ; SSE-NEXT: movss {{.*#+}} xmm2 = xmm0[0],xmm2[1,2,3]
3208 ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3209 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3210 ; SSE-NEXT: shufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
3211 ; SSE-NEXT: # xmm0 = xmm0[3,3],mem[3,3]
3212 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
3213 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,3],xmm0[2,0]
3214 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
3215 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
3216 ; SSE-NEXT: movss {{.*#+}} xmm2 = xmm0[0],xmm2[1,2,3]
3217 ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3218 ; SSE-NEXT: shufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
3219 ; SSE-NEXT: # xmm1 = xmm1[3,3],mem[3,3]
3220 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
3221 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,3],xmm1[2,0]
3222 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
3223 ; SSE-NEXT: # xmm0 = mem[3,3,3,3]
3224 ; SSE-NEXT: movss {{.*#+}} xmm2 = xmm0[0],xmm2[1,2,3]
3225 ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3226 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3227 ; SSE-NEXT: shufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
3228 ; SSE-NEXT: # xmm0 = xmm0[3,3],mem[3,3]
3229 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
3230 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,3],xmm0[2,0]
3231 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
3232 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
3233 ; SSE-NEXT: movss {{.*#+}} xmm2 = xmm0[0],xmm2[1,2,3]
3234 ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3235 ; SSE-NEXT: shufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
3236 ; SSE-NEXT: # xmm1 = xmm1[3,3],mem[3,3]
3237 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
3238 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,3],xmm1[2,0]
3239 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
3240 ; SSE-NEXT: # xmm0 = mem[3,3,3,3]
3241 ; SSE-NEXT: movss {{.*#+}} xmm2 = xmm0[0],xmm2[1,2,3]
3242 ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3243 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3244 ; SSE-NEXT: shufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
3245 ; SSE-NEXT: # xmm0 = xmm0[3,3],mem[3,3]
3246 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
3247 ; SSE-NEXT: shufps {{.*#+}} xmm12 = xmm12[0,3],xmm0[2,0]
3248 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
3249 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
3250 ; SSE-NEXT: movss {{.*#+}} xmm12 = xmm0[0],xmm12[1,2,3]
3251 ; SSE-NEXT: shufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
3252 ; SSE-NEXT: # xmm1 = xmm1[3,3],mem[3,3]
3253 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
3254 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,3],xmm1[2,0]
3255 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
3256 ; SSE-NEXT: # xmm0 = mem[3,3,3,3]
3257 ; SSE-NEXT: movss {{.*#+}} xmm3 = xmm0[0],xmm3[1,2,3]
3258 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3259 ; SSE-NEXT: shufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
3260 ; SSE-NEXT: # xmm0 = xmm0[3,3],mem[3,3]
3261 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
3262 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,3],xmm0[2,0]
3263 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
3264 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
3265 ; SSE-NEXT: movss {{.*#+}} xmm2 = xmm0[0],xmm2[1,2,3]
3266 ; SSE-NEXT: shufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
3267 ; SSE-NEXT: # xmm1 = xmm1[3,3],mem[3,3]
3268 ; SSE-NEXT: movaps %xmm1, %xmm0
3269 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
3270 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm0[2,0]
3271 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
3272 ; SSE-NEXT: # xmm0 = mem[3,3,3,3]
3273 ; SSE-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
3274 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3275 ; SSE-NEXT: shufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
3276 ; SSE-NEXT: # xmm0 = xmm0[3,3],mem[3,3]
3277 ; SSE-NEXT: shufps {{.*#+}} xmm14 = xmm14[0,3],xmm0[2,0]
3278 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
3279 ; SSE-NEXT: # xmm0 = mem[2,3,2,3]
3280 ; SSE-NEXT: movss {{.*#+}} xmm14 = xmm0[0],xmm14[1,2,3]
3281 ; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax
3282 ; SSE-NEXT: movaps %xmm5, 864(%rax)
3283 ; SSE-NEXT: movaps %xmm7, 848(%rax)
3284 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3285 ; SSE-NEXT: movaps %xmm0, 832(%rax)
3286 ; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
3287 ; SSE-NEXT: movaps %xmm0, 800(%rax)
3288 ; SSE-NEXT: movaps %xmm6, 784(%rax)
3289 ; SSE-NEXT: movaps %xmm8, 736(%rax)
3290 ; SSE-NEXT: movaps %xmm9, 688(%rax)
3291 ; SSE-NEXT: movaps %xmm10, 672(%rax)
3292 ; SSE-NEXT: movaps %xmm11, 624(%rax)
3293 ; SSE-NEXT: movaps %xmm13, 576(%rax)
3294 ; SSE-NEXT: movaps %xmm15, 560(%rax)
3295 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3296 ; SSE-NEXT: movaps %xmm0, 512(%rax)
3297 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3298 ; SSE-NEXT: movaps %xmm0, 464(%rax)
3299 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3300 ; SSE-NEXT: movaps %xmm0, 448(%rax)
3301 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3302 ; SSE-NEXT: movaps %xmm0, 400(%rax)
3303 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3304 ; SSE-NEXT: movaps %xmm0, 352(%rax)
3305 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3306 ; SSE-NEXT: movaps %xmm0, 336(%rax)
3307 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3308 ; SSE-NEXT: movaps %xmm0, 288(%rax)
3309 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3310 ; SSE-NEXT: movaps %xmm0, 240(%rax)
3311 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3312 ; SSE-NEXT: movaps %xmm0, 224(%rax)
3313 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3314 ; SSE-NEXT: movaps %xmm0, 176(%rax)
3315 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3316 ; SSE-NEXT: movaps %xmm0, 128(%rax)
3317 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3318 ; SSE-NEXT: movaps %xmm0, 112(%rax)
3319 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3320 ; SSE-NEXT: movaps %xmm0, 64(%rax)
3321 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3322 ; SSE-NEXT: movaps %xmm0, 16(%rax)
3323 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3324 ; SSE-NEXT: movaps %xmm0, (%rax)
3325 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3326 ; SSE-NEXT: movaps %xmm0, 880(%rax)
3327 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3328 ; SSE-NEXT: movaps %xmm0, 816(%rax)
3329 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3330 ; SSE-NEXT: movaps %xmm0, 768(%rax)
3331 ; SSE-NEXT: movaps %xmm14, 752(%rax)
3332 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3333 ; SSE-NEXT: movaps %xmm0, 720(%rax)
3334 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3335 ; SSE-NEXT: movaps %xmm0, 704(%rax)
3336 ; SSE-NEXT: movaps %xmm1, 656(%rax)
3337 ; SSE-NEXT: movaps %xmm2, 640(%rax)
3338 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3339 ; SSE-NEXT: movaps %xmm0, 608(%rax)
3340 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3341 ; SSE-NEXT: movaps %xmm0, 592(%rax)
3342 ; SSE-NEXT: movaps %xmm3, 544(%rax)
3343 ; SSE-NEXT: movaps %xmm12, 528(%rax)
3344 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3345 ; SSE-NEXT: movaps %xmm0, 496(%rax)
3346 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3347 ; SSE-NEXT: movaps %xmm0, 480(%rax)
3348 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3349 ; SSE-NEXT: movaps %xmm0, 432(%rax)
3350 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3351 ; SSE-NEXT: movaps %xmm0, 416(%rax)
3352 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3353 ; SSE-NEXT: movaps %xmm0, 384(%rax)
3354 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3355 ; SSE-NEXT: movaps %xmm0, 368(%rax)
3356 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3357 ; SSE-NEXT: movaps %xmm0, 320(%rax)
3358 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3359 ; SSE-NEXT: movaps %xmm0, 304(%rax)
3360 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3361 ; SSE-NEXT: movaps %xmm0, 272(%rax)
3362 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3363 ; SSE-NEXT: movaps %xmm0, 256(%rax)
3364 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3365 ; SSE-NEXT: movaps %xmm0, 208(%rax)
3366 ; SSE-NEXT: movaps %xmm4, 192(%rax)
3367 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3368 ; SSE-NEXT: movaps %xmm0, 160(%rax)
3369 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3370 ; SSE-NEXT: movaps %xmm0, 144(%rax)
3371 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3372 ; SSE-NEXT: movaps %xmm0, 96(%rax)
3373 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3374 ; SSE-NEXT: movaps %xmm0, 80(%rax)
3375 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3376 ; SSE-NEXT: movaps %xmm0, 48(%rax)
3377 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3378 ; SSE-NEXT: movaps %xmm0, 32(%rax)
3379 ; SSE-NEXT: addq $1256, %rsp # imm = 0x4E8
3382 ; AVX1-ONLY-LABEL: store_i32_stride7_vf32:
3383 ; AVX1-ONLY: # %bb.0:
3384 ; AVX1-ONLY-NEXT: subq $1624, %rsp # imm = 0x658
3385 ; AVX1-ONLY-NEXT: movq {{[0-9]+}}(%rsp), %rax
3386 ; AVX1-ONLY-NEXT: vmovaps 96(%rdi), %ymm4
3387 ; AVX1-ONLY-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3388 ; AVX1-ONLY-NEXT: vmovaps 96(%rsi), %ymm3
3389 ; AVX1-ONLY-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3390 ; AVX1-ONLY-NEXT: vmovaps 96(%rdx), %ymm0
3391 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3392 ; AVX1-ONLY-NEXT: vmovaps 96(%rcx), %ymm1
3393 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3394 ; AVX1-ONLY-NEXT: vmovaps 96(%rax), %ymm2
3395 ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3396 ; AVX1-ONLY-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
3397 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} ymm1 = ymm3[1],ymm4[1],ymm3[3],ymm4[3]
3398 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm4[0,1],ymm1[2,0],ymm4[4,5],ymm1[6,4]
3399 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
3400 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],mem[6,7]
3401 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = mem[2,3,2,3]
3402 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[1,1,2,2,5,5,6,6]
3403 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5,6],ymm1[7]
3404 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm2[2,3],ymm0[2,3]
3405 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4],ymm1[5],ymm0[6,7]
3406 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3407 ; AVX1-ONLY-NEXT: vmovaps (%rax), %xmm0
3408 ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3409 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
3410 ; AVX1-ONLY-NEXT: vmovaps (%r9), %xmm3
3411 ; AVX1-ONLY-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3412 ; AVX1-ONLY-NEXT: vmovaps (%r8), %xmm4
3413 ; AVX1-ONLY-NEXT: vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3414 ; AVX1-ONLY-NEXT: vmovaps (%rdi), %xmm2
3415 ; AVX1-ONLY-NEXT: vmovaps (%rsi), %xmm5
3416 ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm1 = xmm5[0],xmm2[0]
3417 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm1[2,0],xmm2[2,1]
3418 ; AVX1-ONLY-NEXT: vmovaps %xmm2, %xmm6
3419 ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3420 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm1
3421 ; AVX1-ONLY-NEXT: vmovaps (%rcx), %xmm8
3422 ; AVX1-ONLY-NEXT: vmovaps (%rdx), %xmm9
3423 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} xmm2 = xmm9[0],xmm8[0],xmm9[1],xmm8[1]
3424 ; AVX1-ONLY-NEXT: vmovaps %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3425 ; AVX1-ONLY-NEXT: vmovaps %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3426 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm2 = xmm2[0,1,0,1]
3427 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3],ymm1[4,5,6,7]
3428 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} xmm2 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
3429 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2
3430 ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm2 = ymm2[0],ymm0[0],ymm2[2],ymm0[2]
3431 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6],ymm1[7]
3432 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3433 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm3[1,1],xmm4[1,1]
3434 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm1
3435 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5],ymm1[6,7]
3436 ; AVX1-ONLY-NEXT: vmovaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3437 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm6[1],xmm5[1]
3438 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm5[1,1],xmm1[0,2]
3439 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm1
3440 ; AVX1-ONLY-NEXT: vinsertps {{.*#+}} xmm2 = zero,xmm9[1],xmm8[1],zero
3441 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2],ymm1[3,4,5,6,7]
3442 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
3443 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3444 ; AVX1-ONLY-NEXT: vmovaps (%rdi), %ymm1
3445 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3446 ; AVX1-ONLY-NEXT: vmovaps (%rsi), %ymm0
3447 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3448 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[1,1],ymm1[1,1],ymm0[5,5],ymm1[5,5]
3449 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
3450 ; AVX1-ONLY-NEXT: vmovaps (%rdx), %ymm2
3451 ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3452 ; AVX1-ONLY-NEXT: vmovaps (%rcx), %ymm1
3453 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3454 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm2[1,1],ymm1[1,1],ymm2[5,5],ymm1[5,5]
3455 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6],ymm0[7]
3456 ; AVX1-ONLY-NEXT: vmovaps (%r8), %ymm2
3457 ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3458 ; AVX1-ONLY-NEXT: vmovaps (%r9), %ymm8
3459 ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm8[0],ymm2[0],ymm8[2],ymm2[2]
3460 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[2,0],ymm2[2,1],ymm1[6,4],ymm2[6,5]
3461 ; AVX1-ONLY-NEXT: vmovaps (%rax), %ymm2
3462 ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3463 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm2[2,3],ymm1[2,3]
3464 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm1, %xmm1
3465 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[2],ymm2[3]
3466 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6],ymm1[7]
3467 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3468 ; AVX1-ONLY-NEXT: vmovaps 32(%rdi), %xmm1
3469 ; AVX1-ONLY-NEXT: vmovaps 32(%rsi), %xmm7
3470 ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm0 = xmm7[0],xmm1[0]
3471 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,1]
3472 ; AVX1-ONLY-NEXT: vmovaps %xmm1, %xmm5
3473 ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3474 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
3475 ; AVX1-ONLY-NEXT: vmovaps 32(%rcx), %xmm3
3476 ; AVX1-ONLY-NEXT: vmovaps 32(%rdx), %xmm2
3477 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} xmm1 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
3478 ; AVX1-ONLY-NEXT: vmovaps %xmm3, %xmm6
3479 ; AVX1-ONLY-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3480 ; AVX1-ONLY-NEXT: vmovaps %xmm2, %xmm9
3481 ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3482 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,1,0,1]
3483 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
3484 ; AVX1-ONLY-NEXT: vmovaps 32(%r9), %xmm3
3485 ; AVX1-ONLY-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3486 ; AVX1-ONLY-NEXT: vmovaps 32(%r8), %xmm4
3487 ; AVX1-ONLY-NEXT: vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3488 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} xmm1 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
3489 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
3490 ; AVX1-ONLY-NEXT: vmovaps 32(%rax), %xmm2
3491 ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3492 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2
3493 ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[2],ymm2[2]
3494 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6],ymm0[7]
3495 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3496 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm3[1,1],xmm4[1,1]
3497 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
3498 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm2[5],ymm0[6,7]
3499 ; AVX1-ONLY-NEXT: vmovaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3500 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm5[1],xmm7[1]
3501 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm7[1,1],xmm1[0,2]
3502 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm1
3503 ; AVX1-ONLY-NEXT: vinsertps {{.*#+}} xmm2 = zero,xmm9[1],xmm6[1],zero
3504 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2],ymm1[3,4,5,6,7]
3505 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
3506 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3507 ; AVX1-ONLY-NEXT: vmovaps 32(%rdi), %ymm0
3508 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3509 ; AVX1-ONLY-NEXT: vmovaps 32(%rsi), %ymm1
3510 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3511 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm1[1,1],ymm0[1,1],ymm1[5,5],ymm0[5,5]
3512 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
3513 ; AVX1-ONLY-NEXT: vmovaps 32(%rdx), %ymm2
3514 ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3515 ; AVX1-ONLY-NEXT: vmovaps 32(%rcx), %ymm1
3516 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3517 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm2[1,1],ymm1[1,1],ymm2[5,5],ymm1[5,5]
3518 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6],ymm0[7]
3519 ; AVX1-ONLY-NEXT: vmovaps 32(%r8), %ymm2
3520 ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3521 ; AVX1-ONLY-NEXT: vmovaps 32(%r9), %ymm1
3522 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3523 ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[2],ymm2[2]
3524 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[2,0],ymm2[2,1],ymm1[6,4],ymm2[6,5]
3525 ; AVX1-ONLY-NEXT: vmovaps 32(%rax), %ymm2
3526 ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3527 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm2[2,3],ymm1[2,3]
3528 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm1, %xmm1
3529 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[2],ymm2[3]
3530 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6],ymm1[7]
3531 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3532 ; AVX1-ONLY-NEXT: vmovaps 64(%rdi), %xmm1
3533 ; AVX1-ONLY-NEXT: vmovaps 64(%rsi), %xmm7
3534 ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm0 = xmm7[0],xmm1[0]
3535 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,1]
3536 ; AVX1-ONLY-NEXT: vmovaps %xmm1, %xmm6
3537 ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3538 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
3539 ; AVX1-ONLY-NEXT: vmovaps 64(%rcx), %xmm4
3540 ; AVX1-ONLY-NEXT: vmovaps 64(%rdx), %xmm2
3541 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} xmm1 = xmm2[0],xmm4[0],xmm2[1],xmm4[1]
3542 ; AVX1-ONLY-NEXT: vmovaps %xmm4, %xmm9
3543 ; AVX1-ONLY-NEXT: vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3544 ; AVX1-ONLY-NEXT: vmovaps %xmm2, %xmm10
3545 ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3546 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,1,0,1]
3547 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
3548 ; AVX1-ONLY-NEXT: vmovaps 64(%r9), %xmm4
3549 ; AVX1-ONLY-NEXT: vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3550 ; AVX1-ONLY-NEXT: vmovaps 64(%r8), %xmm5
3551 ; AVX1-ONLY-NEXT: vmovaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3552 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} xmm1 = xmm5[0],xmm4[0],xmm5[1],xmm4[1]
3553 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
3554 ; AVX1-ONLY-NEXT: vmovaps 64(%rax), %xmm2
3555 ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3556 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2
3557 ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[2],ymm2[2]
3558 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6],ymm0[7]
3559 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3560 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm4[1,1],xmm5[1,1]
3561 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
3562 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm2[5],ymm0[6,7]
3563 ; AVX1-ONLY-NEXT: vmovaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3564 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm6[1],xmm7[1]
3565 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm7[1,1],xmm1[0,2]
3566 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm1
3567 ; AVX1-ONLY-NEXT: vinsertps {{.*#+}} xmm2 = zero,xmm10[1],xmm9[1],zero
3568 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2],ymm1[3,4,5,6,7]
3569 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
3570 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3571 ; AVX1-ONLY-NEXT: vmovaps 64(%rdi), %ymm14
3572 ; AVX1-ONLY-NEXT: vmovaps 64(%rsi), %ymm0
3573 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3574 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[1,1],ymm14[1,1],ymm0[5,5],ymm14[5,5]
3575 ; AVX1-ONLY-NEXT: vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3576 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
3577 ; AVX1-ONLY-NEXT: vmovaps 64(%rdx), %ymm2
3578 ; AVX1-ONLY-NEXT: vmovaps 64(%rcx), %ymm9
3579 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm2[1,1],ymm9[1,1],ymm2[5,5],ymm9[5,5]
3580 ; AVX1-ONLY-NEXT: vmovups %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3581 ; AVX1-ONLY-NEXT: vmovaps %ymm2, %ymm13
3582 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6],ymm0[7]
3583 ; AVX1-ONLY-NEXT: vmovaps 64(%r8), %ymm7
3584 ; AVX1-ONLY-NEXT: vmovaps 64(%r9), %ymm10
3585 ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm10[0],ymm7[0],ymm10[2],ymm7[2]
3586 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[2,0],ymm7[2,1],ymm1[6,4],ymm7[6,5]
3587 ; AVX1-ONLY-NEXT: vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3588 ; AVX1-ONLY-NEXT: vmovaps 64(%rax), %ymm2
3589 ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3590 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm2[2,3],ymm1[2,3]
3591 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm1, %xmm1
3592 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[2],ymm2[3]
3593 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6],ymm1[7]
3594 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3595 ; AVX1-ONLY-NEXT: vmovaps 96(%rdi), %xmm6
3596 ; AVX1-ONLY-NEXT: vmovaps 96(%rsi), %xmm2
3597 ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm0 = xmm2[0],xmm6[0]
3598 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,0],xmm6[2,1]
3599 ; AVX1-ONLY-NEXT: vmovaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3600 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm11
3601 ; AVX1-ONLY-NEXT: vmovaps 96(%rcx), %xmm4
3602 ; AVX1-ONLY-NEXT: vmovaps 96(%rdx), %xmm3
3603 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} xmm12 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
3604 ; AVX1-ONLY-NEXT: vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3605 ; AVX1-ONLY-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3606 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm12 = xmm12[0,1,0,1]
3607 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm11[0,1],ymm12[2,3],ymm11[4,5,6,7]
3608 ; AVX1-ONLY-NEXT: vmovaps 96(%r9), %xmm5
3609 ; AVX1-ONLY-NEXT: vmovaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3610 ; AVX1-ONLY-NEXT: vmovaps 96(%r8), %xmm11
3611 ; AVX1-ONLY-NEXT: vmovaps %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3612 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} xmm15 = xmm11[0],xmm5[0],xmm11[1],xmm5[1]
3613 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm15, %ymm0, %ymm15
3614 ; AVX1-ONLY-NEXT: vinsertf128 $1, 96(%rax), %ymm0, %ymm0
3615 ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm15 = ymm15[0],ymm0[0],ymm15[2],ymm0[2]
3616 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm15[4,5,6],ymm1[7]
3617 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3618 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm5[1,1],xmm11[1,1]
3619 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm1
3620 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5],ymm1[6,7]
3621 ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3622 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm6[1],xmm2[1]
3623 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm2[1,1],xmm1[0,2]
3624 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm1
3625 ; AVX1-ONLY-NEXT: vinsertps {{.*#+}} xmm15 = zero,xmm3[1],xmm4[1],zero
3626 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm15[1,2],ymm1[3,4,5,6,7]
3627 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
3628 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3629 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
3630 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
3631 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm5[1],ymm11[1],ymm5[3],ymm11[3]
3632 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,1,2,0,4,5,6,4]
3633 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
3634 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
3635 ; AVX1-ONLY-NEXT: vunpckhps {{.*#+}} ymm1 = ymm12[2],ymm4[2],ymm12[3],ymm4[3],ymm12[6],ymm4[6],ymm12[7],ymm4[7]
3636 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
3637 ; AVX1-ONLY-NEXT: vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3638 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
3639 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} ymm1 = ymm1[1],ymm8[1],ymm1[3],ymm8[3]
3640 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm8[1,1],ymm1[0,2],ymm8[5,5],ymm1[4,6]
3641 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[2,3,2,3]
3642 ; AVX1-ONLY-NEXT: vmovaps 16(%rax), %xmm15
3643 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm15[1],ymm1[2,3,4,5,6,7]
3644 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
3645 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3646 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
3647 ; AVX1-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm0 # 32-byte Folded Reload
3648 ; AVX1-ONLY-NEXT: # ymm0 = ymm1[1],mem[1],ymm1[3],mem[3]
3649 ; AVX1-ONLY-NEXT: vmovaps %ymm1, %ymm6
3650 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,1,2,0,4,5,6,4]
3651 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
3652 ; AVX1-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
3653 ; AVX1-ONLY-NEXT: # ymm1 = ymm1[2],mem[2],ymm1[3],mem[3],ymm1[6],mem[6],ymm1[7],mem[7]
3654 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
3655 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
3656 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
3657 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} ymm1 = ymm1[1],ymm2[1],ymm1[3],ymm2[3]
3658 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm2[1,1],ymm1[0,2],ymm2[5,5],ymm1[4,6]
3659 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[2,3,2,3]
3660 ; AVX1-ONLY-NEXT: vmovaps 48(%rax), %xmm15
3661 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm15[1],ymm1[2,3,4,5,6,7]
3662 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
3663 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3664 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3665 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm14[1],ymm0[3],ymm14[3]
3666 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,1,2,0,4,5,6,4]
3667 ; AVX1-ONLY-NEXT: vunpckhps {{.*#+}} ymm1 = ymm13[2],ymm9[2],ymm13[3],ymm9[3],ymm13[6],ymm9[6],ymm13[7],ymm9[7]
3668 ; AVX1-ONLY-NEXT: vmovaps %ymm13, %ymm14
3669 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
3670 ; AVX1-ONLY-NEXT: vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3671 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} ymm1 = ymm7[1],ymm10[1],ymm7[3],ymm10[3]
3672 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm10[1,1],ymm1[0,2],ymm10[5,5],ymm1[4,6]
3673 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[2,3,2,3]
3674 ; AVX1-ONLY-NEXT: vmovaps 80(%rax), %xmm15
3675 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm15[1],ymm1[2,3,4,5,6,7]
3676 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
3677 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3678 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
3679 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
3680 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm3[3,3],ymm2[3,3],ymm3[7,7],ymm2[7,7]
3681 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
3682 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
3683 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
3684 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm8[3,3],ymm15[3,3],ymm8[7,7],ymm15[7,7]
3685 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm1, %xmm1
3686 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
3687 ; AVX1-ONLY-NEXT: vbroadcastss 124(%r8), %ymm1
3688 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
3689 ; AVX1-ONLY-NEXT: vbroadcastss 124(%r9), %ymm1
3690 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
3691 ; AVX1-ONLY-NEXT: vbroadcastsd 120(%rax), %ymm1
3692 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5,6],ymm1[7]
3693 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3694 ; AVX1-ONLY-NEXT: vmovaps %ymm3, %ymm1
3695 ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm2[0],ymm3[0],ymm2[2],ymm3[2]
3696 ; AVX1-ONLY-NEXT: vmovaps %ymm2, %ymm3
3697 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm1[3,1],ymm0[0,2],ymm1[7,5],ymm0[4,6]
3698 ; AVX1-ONLY-NEXT: vmovaps %ymm1, %ymm2
3699 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} ymm1 = ymm8[0],ymm15[0],ymm8[1],ymm15[1],ymm8[4],ymm15[4],ymm8[5],ymm15[5]
3700 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
3701 ; AVX1-ONLY-NEXT: vbroadcastss 108(%r8), %ymm1
3702 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4,5,6,7]
3703 ; AVX1-ONLY-NEXT: vbroadcastss 108(%r9), %xmm1
3704 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
3705 ; AVX1-ONLY-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
3706 ; AVX1-ONLY-NEXT: # ymm0 = ymm0[0,1,2],mem[3],ymm0[4,5,6,7]
3707 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3708 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm3[1,1],ymm2[1,1],ymm3[5,5],ymm2[5,5]
3709 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm15[1,1],ymm8[1,1],ymm15[5,5],ymm8[5,5]
3710 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[2,3,2,3]
3711 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5,6],ymm1[7]
3712 ; AVX1-ONLY-NEXT: vbroadcastsd 112(%r8), %ymm1
3713 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5,6],ymm1[7]
3714 ; AVX1-ONLY-NEXT: vbroadcastss 112(%r9), %xmm1
3715 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4,5,6,7]
3716 ; AVX1-ONLY-NEXT: vbroadcastss 112(%rax), %ymm1
3717 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4,5,6,7]
3718 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3719 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3720 ; AVX1-ONLY-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
3721 ; AVX1-ONLY-NEXT: # xmm0 = xmm0[3,3],mem[3,3]
3722 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
3723 ; AVX1-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm1 # 16-byte Folded Reload
3724 ; AVX1-ONLY-NEXT: # xmm1 = xmm2[2],mem[2],xmm2[3],mem[3]
3725 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
3726 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
3727 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5,6],ymm1[7]
3728 ; AVX1-ONLY-NEXT: vpermilps $170, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
3729 ; AVX1-ONLY-NEXT: # xmm1 = mem[2,2,2,2]
3730 ; AVX1-ONLY-NEXT: vblendps $7, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
3731 ; AVX1-ONLY-NEXT: # xmm1 = mem[0,1,2],xmm1[3]
3732 ; AVX1-ONLY-NEXT: vbroadcastsd 8(%rax), %ymm15
3733 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm15[4,5,6,7]
3734 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4],ymm0[5,6,7]
3735 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3736 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm4[3,3],ymm12[3,3],ymm4[7,7],ymm12[7,7]
3737 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
3738 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm11[3,3],ymm5[3,3],ymm11[7,7],ymm5[7,7]
3739 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm1, %xmm1
3740 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
3741 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
3742 ; AVX1-ONLY-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
3743 ; AVX1-ONLY-NEXT: # ymm1 = ymm1[3,3],mem[3,3],ymm1[7,7],mem[7,7]
3744 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
3745 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm2[2,3],ymm1[1,2],ymm2[6,7],ymm1[5,6]
3746 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[2,3,2,3]
3747 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,2,3,1,4,6,7,5]
3748 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4],ymm1[5,6,7]
3749 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3750 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3751 ; AVX1-ONLY-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
3752 ; AVX1-ONLY-NEXT: # xmm0 = xmm0[3,3],mem[3,3]
3753 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
3754 ; AVX1-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm1 # 16-byte Folded Reload
3755 ; AVX1-ONLY-NEXT: # xmm1 = xmm2[2],mem[2],xmm2[3],mem[3]
3756 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
3757 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
3758 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5,6],ymm1[7]
3759 ; AVX1-ONLY-NEXT: vpermilps $170, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
3760 ; AVX1-ONLY-NEXT: # xmm1 = mem[2,2,2,2]
3761 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
3762 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm13[0,1,2],xmm1[3]
3763 ; AVX1-ONLY-NEXT: vbroadcastsd 40(%rax), %ymm15
3764 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm15[4,5,6,7]
3765 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4],ymm0[5,6,7]
3766 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3767 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
3768 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
3769 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm11[3,3],ymm12[3,3],ymm11[7,7],ymm12[7,7]
3770 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
3771 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
3772 ; AVX1-ONLY-NEXT: vmovaps %ymm6, %ymm3
3773 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm10[3,3],ymm6[3,3],ymm10[7,7],ymm6[7,7]
3774 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm1, %xmm1
3775 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
3776 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
3777 ; AVX1-ONLY-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
3778 ; AVX1-ONLY-NEXT: # ymm1 = ymm1[3,3],mem[3,3],ymm1[7,7],mem[7,7]
3779 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
3780 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm2[2,3],ymm1[1,2],ymm2[6,7],ymm1[5,6]
3781 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[2,3,2,3]
3782 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,2,3,1,4,6,7,5]
3783 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4],ymm1[5,6,7]
3784 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3785 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3786 ; AVX1-ONLY-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
3787 ; AVX1-ONLY-NEXT: # xmm0 = xmm0[3,3],mem[3,3]
3788 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
3789 ; AVX1-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm1 # 16-byte Folded Reload
3790 ; AVX1-ONLY-NEXT: # xmm1 = xmm2[2],mem[2],xmm2[3],mem[3]
3791 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
3792 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
3793 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5,6],ymm1[7]
3794 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
3795 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm9[2,2,2,2]
3796 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
3797 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm8[0,1,2],xmm1[3]
3798 ; AVX1-ONLY-NEXT: vbroadcastsd 72(%rax), %ymm6
3799 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm6[4,5,6,7]
3800 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4],ymm0[5,6,7]
3801 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3802 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
3803 ; AVX1-ONLY-NEXT: vmovaps %ymm14, %ymm7
3804 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm6[3,3],ymm14[3,3],ymm6[7,7],ymm14[7,7]
3805 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
3806 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
3807 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
3808 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm4[3,3],ymm14[3,3],ymm4[7,7],ymm14[7,7]
3809 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm1, %xmm1
3810 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
3811 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
3812 ; AVX1-ONLY-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
3813 ; AVX1-ONLY-NEXT: # ymm1 = ymm1[3,3],mem[3,3],ymm1[7,7],mem[7,7]
3814 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
3815 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm2[2,3],ymm1[1,2],ymm2[6,7],ymm1[5,6]
3816 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[2,3,2,3]
3817 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,2,3,1,4,6,7,5]
3818 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm15 = ymm1[0],ymm0[1,2,3,4],ymm1[5,6,7]
3819 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3820 ; AVX1-ONLY-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
3821 ; AVX1-ONLY-NEXT: # xmm0 = xmm0[3,3],mem[3,3]
3822 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
3823 ; AVX1-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm1 # 16-byte Folded Reload
3824 ; AVX1-ONLY-NEXT: # xmm1 = xmm2[2],mem[2],xmm2[3],mem[3]
3825 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
3826 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
3827 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5,6],ymm1[7]
3828 ; AVX1-ONLY-NEXT: vpermilps $170, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
3829 ; AVX1-ONLY-NEXT: # xmm1 = mem[2,2,2,2]
3830 ; AVX1-ONLY-NEXT: vblendps $7, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
3831 ; AVX1-ONLY-NEXT: # xmm1 = mem[0,1,2],xmm1[3]
3832 ; AVX1-ONLY-NEXT: vbroadcastsd 104(%rax), %ymm2
3833 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
3834 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4],ymm0[5,6,7]
3835 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
3836 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} ymm1 = ymm1[0],ymm5[0],ymm1[1],ymm5[1],ymm1[4],ymm5[4],ymm1[5],ymm5[5]
3837 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
3838 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
3839 ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm2 = ymm2[0],ymm5[0],ymm2[2],ymm5[2]
3840 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm2 = ymm5[3,1],ymm2[0,2],ymm5[7,5],ymm2[4,6]
3841 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5],ymm2[6,7]
3842 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
3843 ; AVX1-ONLY-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm2 # 16-byte Folded Reload
3844 ; AVX1-ONLY-NEXT: # xmm2 = xmm2[3,3],mem[3,3]
3845 ; AVX1-ONLY-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm2 # 16-byte Folded Reload
3846 ; AVX1-ONLY-NEXT: # xmm2 = xmm2[0,1,2],mem[3]
3847 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2,3],ymm1[4,5,6,7]
3848 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} ymm2 = ymm10[0],ymm3[0],ymm10[1],ymm3[1],ymm10[4],ymm3[4],ymm10[5],ymm3[5]
3849 ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm3 = ymm12[0],ymm11[0],ymm12[2],ymm11[2]
3850 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm3 = ymm11[3,1],ymm3[0,2],ymm11[7,5],ymm3[4,6]
3851 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm2[4,5],ymm3[6,7]
3852 ; AVX1-ONLY-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm13, %xmm3 # 16-byte Folded Reload
3853 ; AVX1-ONLY-NEXT: # xmm3 = xmm13[3,3],mem[3,3]
3854 ; AVX1-ONLY-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm3 # 16-byte Folded Reload
3855 ; AVX1-ONLY-NEXT: # xmm3 = xmm3[0,1,2],mem[3]
3856 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0],ymm3[1,2,3],ymm2[4,5,6,7]
3857 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} ymm3 = ymm4[0],ymm14[0],ymm4[1],ymm14[1],ymm4[4],ymm14[4],ymm4[5],ymm14[5]
3858 ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm5 = ymm7[0],ymm6[0],ymm7[2],ymm6[2]
3859 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm5 = ymm6[3,1],ymm5[0,2],ymm6[7,5],ymm5[4,6]
3860 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm3 = ymm5[0,1,2,3],ymm3[4,5],ymm5[6,7]
3861 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm5 = xmm8[3,3],xmm9[3,3]
3862 ; AVX1-ONLY-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5, %xmm5 # 16-byte Folded Reload
3863 ; AVX1-ONLY-NEXT: # xmm5 = xmm5[0,1,2],mem[3]
3864 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0],ymm5[1,2,3],ymm3[4,5,6,7]
3865 ; AVX1-ONLY-NEXT: movq {{[0-9]+}}(%rsp), %rax
3866 ; AVX1-ONLY-NEXT: vmovaps %ymm3, 544(%rax)
3867 ; AVX1-ONLY-NEXT: vmovaps %ymm2, 320(%rax)
3868 ; AVX1-ONLY-NEXT: vmovaps %ymm1, 96(%rax)
3869 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 736(%rax)
3870 ; AVX1-ONLY-NEXT: vmovaps %ymm15, 640(%rax)
3871 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3872 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 608(%rax)
3873 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3874 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 512(%rax)
3875 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3876 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 416(%rax)
3877 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3878 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 384(%rax)
3879 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3880 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 288(%rax)
3881 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3882 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 192(%rax)
3883 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3884 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 160(%rax)
3885 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3886 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 64(%rax)
3887 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3888 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 800(%rax)
3889 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3890 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 768(%rax)
3891 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3892 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 704(%rax)
3893 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3894 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 672(%rax)
3895 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3896 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 576(%rax)
3897 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3898 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 480(%rax)
3899 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3900 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 448(%rax)
3901 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3902 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 352(%rax)
3903 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3904 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 256(%rax)
3905 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3906 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 224(%rax)
3907 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3908 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 128(%rax)
3909 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3910 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 32(%rax)
3911 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3912 ; AVX1-ONLY-NEXT: vmovaps %ymm0, (%rax)
3913 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3914 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 864(%rax)
3915 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3916 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 832(%rax)
3917 ; AVX1-ONLY-NEXT: addq $1624, %rsp # imm = 0x658
3918 ; AVX1-ONLY-NEXT: vzeroupper
3919 ; AVX1-ONLY-NEXT: retq
3921 ; AVX2-SLOW-LABEL: store_i32_stride7_vf32:
3922 ; AVX2-SLOW: # %bb.0:
3923 ; AVX2-SLOW-NEXT: subq $1320, %rsp # imm = 0x528
3924 ; AVX2-SLOW-NEXT: movq {{[0-9]+}}(%rsp), %rax
3925 ; AVX2-SLOW-NEXT: vmovaps (%rax), %xmm0
3926 ; AVX2-SLOW-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3927 ; AVX2-SLOW-NEXT: vmovaps 32(%rax), %xmm3
3928 ; AVX2-SLOW-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3929 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
3930 ; AVX2-SLOW-NEXT: vmovaps (%r8), %xmm14
3931 ; AVX2-SLOW-NEXT: vmovaps 32(%r8), %xmm7
3932 ; AVX2-SLOW-NEXT: vmovaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3933 ; AVX2-SLOW-NEXT: vmovaps (%r9), %xmm10
3934 ; AVX2-SLOW-NEXT: vmovaps 32(%r9), %xmm9
3935 ; AVX2-SLOW-NEXT: vmovaps %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3936 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm10[1,1,1,1]
3937 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0],xmm14[1],xmm1[2,3]
3938 ; AVX2-SLOW-NEXT: vbroadcastsd %xmm1, %ymm1
3939 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5],ymm1[6,7]
3940 ; AVX2-SLOW-NEXT: vmovaps (%rcx), %xmm12
3941 ; AVX2-SLOW-NEXT: vmovaps 32(%rcx), %xmm4
3942 ; AVX2-SLOW-NEXT: vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3943 ; AVX2-SLOW-NEXT: vmovaps (%rdx), %xmm8
3944 ; AVX2-SLOW-NEXT: vinsertps {{.*#+}} xmm1 = zero,xmm8[1],xmm12[1],zero
3945 ; AVX2-SLOW-NEXT: vmovaps (%rdi), %xmm6
3946 ; AVX2-SLOW-NEXT: vmovaps 32(%rdi), %xmm11
3947 ; AVX2-SLOW-NEXT: vmovaps %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3948 ; AVX2-SLOW-NEXT: vmovaps (%rsi), %xmm5
3949 ; AVX2-SLOW-NEXT: vmovaps 32(%rsi), %xmm13
3950 ; AVX2-SLOW-NEXT: vmovaps %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3951 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm2 = xmm5[1,1,2,2]
3952 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm2 = xmm2[0,1],xmm6[2],xmm2[3]
3953 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,1,2,1]
3954 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0],ymm1[1,2],ymm2[3,4,5,6,7]
3955 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
3956 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3957 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0
3958 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm9[1,1,1,1]
3959 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0],xmm7[1],xmm1[2,3]
3960 ; AVX2-SLOW-NEXT: vbroadcastsd %xmm1, %ymm1
3961 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5],ymm1[6,7]
3962 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm13[1,1,2,2]
3963 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm11[2],xmm1[3]
3964 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1]
3965 ; AVX2-SLOW-NEXT: vmovaps 32(%rdx), %xmm9
3966 ; AVX2-SLOW-NEXT: vinsertps {{.*#+}} xmm2 = zero,xmm9[1],xmm4[1],zero
3967 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2],ymm1[3,4,5,6,7]
3968 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
3969 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3970 ; AVX2-SLOW-NEXT: vmovaps 64(%r8), %xmm1
3971 ; AVX2-SLOW-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3972 ; AVX2-SLOW-NEXT: vmovaps 64(%r9), %xmm0
3973 ; AVX2-SLOW-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3974 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
3975 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
3976 ; AVX2-SLOW-NEXT: vbroadcastsd %xmm0, %ymm0
3977 ; AVX2-SLOW-NEXT: vmovaps 64(%rax), %xmm1
3978 ; AVX2-SLOW-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3979 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
3980 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
3981 ; AVX2-SLOW-NEXT: vmovaps 64(%rdi), %xmm2
3982 ; AVX2-SLOW-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3983 ; AVX2-SLOW-NEXT: vmovaps 64(%rsi), %xmm1
3984 ; AVX2-SLOW-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3985 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,1,2,2]
3986 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3]
3987 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1]
3988 ; AVX2-SLOW-NEXT: vmovaps 64(%rcx), %xmm2
3989 ; AVX2-SLOW-NEXT: vmovaps %xmm2, (%rsp) # 16-byte Spill
3990 ; AVX2-SLOW-NEXT: vmovaps 64(%rdx), %xmm13
3991 ; AVX2-SLOW-NEXT: vinsertps {{.*#+}} xmm2 = zero,xmm13[1],xmm2[1],zero
3992 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2],ymm1[3,4,5,6,7]
3993 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
3994 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3995 ; AVX2-SLOW-NEXT: vmovaps 96(%r8), %xmm1
3996 ; AVX2-SLOW-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3997 ; AVX2-SLOW-NEXT: vmovaps 96(%r9), %xmm7
3998 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm7[1,1,1,1]
3999 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
4000 ; AVX2-SLOW-NEXT: vbroadcastsd %xmm0, %ymm0
4001 ; AVX2-SLOW-NEXT: vmovaps 96(%rax), %xmm1
4002 ; AVX2-SLOW-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4003 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
4004 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
4005 ; AVX2-SLOW-NEXT: vmovaps 96(%rdi), %xmm2
4006 ; AVX2-SLOW-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4007 ; AVX2-SLOW-NEXT: vmovaps 96(%rsi), %xmm1
4008 ; AVX2-SLOW-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4009 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,1,2,2]
4010 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3]
4011 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1]
4012 ; AVX2-SLOW-NEXT: vmovaps 96(%rcx), %xmm3
4013 ; AVX2-SLOW-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4014 ; AVX2-SLOW-NEXT: vmovaps 96(%rdx), %xmm2
4015 ; AVX2-SLOW-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4016 ; AVX2-SLOW-NEXT: vinsertps {{.*#+}} xmm2 = zero,xmm2[1],xmm3[1],zero
4017 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2],ymm1[3,4,5,6,7]
4018 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
4019 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4020 ; AVX2-SLOW-NEXT: vmovaps (%rdi), %ymm0
4021 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4022 ; AVX2-SLOW-NEXT: vmovaps (%rsi), %ymm1
4023 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4024 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
4025 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
4026 ; AVX2-SLOW-NEXT: vmovaps (%rdx), %ymm2
4027 ; AVX2-SLOW-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4028 ; AVX2-SLOW-NEXT: vmovaps (%rcx), %ymm1
4029 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4030 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm1 = ymm2[2],ymm1[2],ymm2[3],ymm1[3],ymm2[6],ymm1[6],ymm2[7],ymm1[7]
4031 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
4032 ; AVX2-SLOW-NEXT: vmovaps (%r8), %ymm2
4033 ; AVX2-SLOW-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4034 ; AVX2-SLOW-NEXT: vmovaps (%r9), %ymm1
4035 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4036 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm1 = ymm1[1,1,2,2,5,5,6,6]
4037 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4,5],ymm2[6],ymm1[7]
4038 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
4039 ; AVX2-SLOW-NEXT: vmovaps 16(%rax), %xmm2
4040 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7]
4041 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
4042 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4043 ; AVX2-SLOW-NEXT: vmovaps 32(%rdi), %ymm0
4044 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4045 ; AVX2-SLOW-NEXT: vmovaps 32(%rsi), %ymm1
4046 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4047 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
4048 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
4049 ; AVX2-SLOW-NEXT: vmovaps 32(%rdx), %ymm1
4050 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4051 ; AVX2-SLOW-NEXT: vmovaps 32(%rcx), %ymm2
4052 ; AVX2-SLOW-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4053 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm1 = ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[6],ymm2[6],ymm1[7],ymm2[7]
4054 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
4055 ; AVX2-SLOW-NEXT: vmovaps 32(%r8), %ymm2
4056 ; AVX2-SLOW-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4057 ; AVX2-SLOW-NEXT: vmovaps 32(%r9), %ymm1
4058 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4059 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm1 = ymm1[1,1,2,2,5,5,6,6]
4060 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4,5],ymm2[6],ymm1[7]
4061 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
4062 ; AVX2-SLOW-NEXT: vmovaps 48(%rax), %xmm2
4063 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7]
4064 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
4065 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4066 ; AVX2-SLOW-NEXT: vmovaps 64(%rdi), %ymm1
4067 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4068 ; AVX2-SLOW-NEXT: vmovaps 64(%rsi), %ymm0
4069 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4070 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm0 = ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[6],ymm0[6],ymm1[7],ymm0[7]
4071 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
4072 ; AVX2-SLOW-NEXT: vmovaps 64(%rdx), %ymm1
4073 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4074 ; AVX2-SLOW-NEXT: vmovaps 64(%rcx), %ymm2
4075 ; AVX2-SLOW-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4076 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm1 = ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[6],ymm2[6],ymm1[7],ymm2[7]
4077 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
4078 ; AVX2-SLOW-NEXT: vmovaps 64(%r8), %ymm2
4079 ; AVX2-SLOW-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4080 ; AVX2-SLOW-NEXT: vmovaps 64(%r9), %ymm1
4081 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4082 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm1 = ymm1[1,1,2,2,5,5,6,6]
4083 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4,5],ymm2[6],ymm1[7]
4084 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
4085 ; AVX2-SLOW-NEXT: vmovaps 80(%rax), %xmm2
4086 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7]
4087 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
4088 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4089 ; AVX2-SLOW-NEXT: vmovaps 96(%rdi), %ymm3
4090 ; AVX2-SLOW-NEXT: vmovaps 96(%rsi), %ymm2
4091 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm0 = ymm2[1,1,1,1,5,5,5,5]
4092 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm3[1],ymm0[2,3,4],ymm3[5],ymm0[6,7]
4093 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
4094 ; AVX2-SLOW-NEXT: vmovaps 96(%rdx), %ymm4
4095 ; AVX2-SLOW-NEXT: vmovaps 96(%rcx), %ymm1
4096 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm15 = ymm4[1,1],ymm1[1,1],ymm4[5,5],ymm1[5,5]
4097 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm15[5,6],ymm0[7]
4098 ; AVX2-SLOW-NEXT: vbroadcastsd 112(%r8), %ymm15
4099 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm15[0],ymm0[1,2,3,4,5,6],ymm15[7]
4100 ; AVX2-SLOW-NEXT: vbroadcastss 112(%r9), %xmm15
4101 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm15[1],ymm0[2,3,4,5,6,7]
4102 ; AVX2-SLOW-NEXT: vbroadcastss 112(%rax), %ymm15
4103 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm15[2],ymm0[3,4,5,6,7]
4104 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4105 ; AVX2-SLOW-NEXT: vbroadcastss %xmm12, %xmm0
4106 ; AVX2-SLOW-NEXT: vbroadcastss %xmm8, %xmm15
4107 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} xmm0 = xmm15[0],xmm0[0],xmm15[1],xmm0[1]
4108 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} xmm15 = xmm6[0],xmm5[0],xmm6[1],xmm5[1]
4109 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm15 = xmm15[0,1,2,2]
4110 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm15 = ymm15[0,1,2,1]
4111 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm15[0,1],ymm0[2,3],ymm15[4,5,6,7]
4112 ; AVX2-SLOW-NEXT: vmovaps %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4113 ; AVX2-SLOW-NEXT: vmovaps %xmm14, %xmm11
4114 ; AVX2-SLOW-NEXT: vmovaps %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4115 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} xmm15 = xmm14[0],xmm10[0],xmm14[1],xmm10[1]
4116 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm15, %ymm0, %ymm15
4117 ; AVX2-SLOW-NEXT: vbroadcastsd {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 16-byte Folded Reload
4118 ; AVX2-SLOW-NEXT: vunpcklpd {{.*#+}} ymm14 = ymm15[0],ymm14[0],ymm15[2],ymm14[2]
4119 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm14[4,5,6],ymm0[7]
4120 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4121 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm6[3,3],xmm5[3,3]
4122 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} xmm5 = xmm8[2],xmm12[2],xmm8[3],xmm12[3]
4123 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
4124 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm5 = xmm5[0,1,2,2]
4125 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,1,2,1]
4126 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm5[0,1,2,3,4],ymm0[5,6],ymm5[7]
4127 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm5 = xmm10[2,2,2,2]
4128 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm5 = xmm11[0,1,2],xmm5[3]
4129 ; AVX2-SLOW-NEXT: vbroadcastsd 8(%rax), %ymm6
4130 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm6[4,5,6,7]
4131 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm5[2,3,4],ymm0[5,6,7]
4132 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4133 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
4134 ; AVX2-SLOW-NEXT: vbroadcastss %xmm8, %xmm0
4135 ; AVX2-SLOW-NEXT: vbroadcastss %xmm9, %xmm5
4136 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} xmm0 = xmm5[0],xmm0[0],xmm5[1],xmm0[1]
4137 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
4138 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
4139 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} xmm5 = xmm10[0],xmm11[0],xmm10[1],xmm11[1]
4140 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm5 = xmm5[0,1,2,2]
4141 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,1,2,1]
4142 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm5[0,1],ymm0[2,3],ymm5[4,5,6,7]
4143 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
4144 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
4145 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} xmm5 = xmm14[0],xmm12[0],xmm14[1],xmm12[1]
4146 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm5, %ymm0, %ymm5
4147 ; AVX2-SLOW-NEXT: vbroadcastsd {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 16-byte Folded Reload
4148 ; AVX2-SLOW-NEXT: vunpcklpd {{.*#+}} ymm5 = ymm5[0],ymm6[0],ymm5[2],ymm6[2]
4149 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm5[4,5,6],ymm0[7]
4150 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4151 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm10[3,3],xmm11[3,3]
4152 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} xmm5 = xmm9[2],xmm8[2],xmm9[3],xmm8[3]
4153 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
4154 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm5 = xmm5[0,1,2,2]
4155 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,1,2,1]
4156 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm5[0,1,2,3,4],ymm0[5,6],ymm5[7]
4157 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm5 = xmm12[2,2,2,2]
4158 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm5 = xmm14[0,1,2],xmm5[3]
4159 ; AVX2-SLOW-NEXT: vbroadcastsd 40(%rax), %ymm6
4160 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm6[4,5,6,7]
4161 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm5[2,3,4],ymm0[5,6,7]
4162 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4163 ; AVX2-SLOW-NEXT: vmovaps (%rsp), %xmm12 # 16-byte Reload
4164 ; AVX2-SLOW-NEXT: vbroadcastss %xmm12, %xmm0
4165 ; AVX2-SLOW-NEXT: vbroadcastss %xmm13, %xmm5
4166 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} xmm0 = xmm5[0],xmm0[0],xmm5[1],xmm0[1]
4167 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
4168 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
4169 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} xmm5 = xmm11[0],xmm10[0],xmm11[1],xmm10[1]
4170 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm5 = xmm5[0,1,2,2]
4171 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,1,2,1]
4172 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm5[0,1],ymm0[2,3],ymm5[4,5,6,7]
4173 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
4174 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
4175 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} xmm5 = xmm9[0],xmm8[0],xmm9[1],xmm8[1]
4176 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm5, %ymm0, %ymm5
4177 ; AVX2-SLOW-NEXT: vbroadcastsd {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 16-byte Folded Reload
4178 ; AVX2-SLOW-NEXT: vunpcklpd {{.*#+}} ymm5 = ymm5[0],ymm6[0],ymm5[2],ymm6[2]
4179 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm5[4,5,6],ymm0[7]
4180 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4181 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm11[3,3],xmm10[3,3]
4182 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} xmm5 = xmm13[2],xmm12[2],xmm13[3],xmm12[3]
4183 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
4184 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm5 = xmm5[0,1,2,2]
4185 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,1,2,1]
4186 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm5[0,1,2,3,4],ymm0[5,6],ymm5[7]
4187 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm5 = xmm8[2,2,2,2]
4188 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm5 = xmm9[0,1,2],xmm5[3]
4189 ; AVX2-SLOW-NEXT: vbroadcastsd 72(%rax), %ymm6
4190 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm6[4,5,6,7]
4191 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm5[2,3,4],ymm0[5,6,7]
4192 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4193 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
4194 ; AVX2-SLOW-NEXT: vbroadcastss %xmm12, %xmm0
4195 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
4196 ; AVX2-SLOW-NEXT: vbroadcastss %xmm11, %xmm5
4197 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} xmm0 = xmm5[0],xmm0[0],xmm5[1],xmm0[1]
4198 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
4199 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
4200 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} xmm5 = xmm10[0],xmm9[0],xmm10[1],xmm9[1]
4201 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm5 = xmm5[0,1,2,2]
4202 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,1,2,1]
4203 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm5[0,1],ymm0[2,3],ymm5[4,5,6,7]
4204 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
4205 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} xmm5 = xmm8[0],xmm7[0],xmm8[1],xmm7[1]
4206 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm5, %ymm0, %ymm5
4207 ; AVX2-SLOW-NEXT: vbroadcastsd {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 16-byte Folded Reload
4208 ; AVX2-SLOW-NEXT: vunpcklpd {{.*#+}} ymm5 = ymm5[0],ymm6[0],ymm5[2],ymm6[2]
4209 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm5[4,5,6],ymm0[7]
4210 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4211 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm10[3,3],xmm9[3,3]
4212 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} xmm5 = xmm11[2],xmm12[2],xmm11[3],xmm12[3]
4213 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
4214 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm5 = xmm5[0,1,2,2]
4215 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,1,2,1]
4216 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm5[0,1,2,3,4],ymm0[5,6],ymm5[7]
4217 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm5 = xmm7[2,2,2,2]
4218 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm5 = xmm8[0,1,2],xmm5[3]
4219 ; AVX2-SLOW-NEXT: vbroadcastsd 104(%rax), %ymm6
4220 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm6[4,5,6,7]
4221 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm5[2,3,4],ymm0[5,6,7]
4222 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4223 ; AVX2-SLOW-NEXT: vbroadcastss 112(%rdx), %ymm0
4224 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm6 = ymm1[3,1,2,0,7,5,6,4]
4225 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm6[0,1,2,3,4,5],ymm0[6],ymm6[7]
4226 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} ymm6 = ymm3[0],ymm2[0],ymm3[1],ymm2[1],ymm3[4],ymm2[4],ymm3[5],ymm2[5]
4227 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm6[4,5],ymm0[6,7]
4228 ; AVX2-SLOW-NEXT: vbroadcastss 108(%r8), %ymm6
4229 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm6[1],ymm0[2,3,4,5,6,7]
4230 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm6 = xmm7[2,2,3,3]
4231 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm6[2,3],ymm0[4,5,6,7]
4232 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm6 = ymm3[2],ymm2[2],ymm3[3],ymm2[3],ymm3[6],ymm2[6],ymm3[7],ymm2[7]
4233 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[2,2,2,2]
4234 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm7 = ymm4[2],ymm1[2],ymm4[3],ymm1[3],ymm4[6],ymm1[6],ymm4[7],ymm1[7]
4235 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm7[4,5,6,7]
4236 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3,4,5],mem[6,7]
4237 ; AVX2-SLOW-NEXT: vpermilps {{.*#+}} ymm8 = mem[1,2,2,3,5,6,6,7]
4238 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm8 = ymm8[2,2,2,2]
4239 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0],ymm6[1,2,3,4,5,6],ymm8[7]
4240 ; AVX2-SLOW-NEXT: vmovaps 96(%rax), %ymm9
4241 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm9[3],ymm0[4,5,6,7]
4242 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4243 ; AVX2-SLOW-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm9[2,3],ymm7[2,3]
4244 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm8[0],ymm0[1],ymm8[2,3,4],ymm0[5],ymm8[6,7]
4245 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4246 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm0 = ymm1[2],ymm4[2],ymm1[3],ymm4[3],ymm1[6],ymm4[6],ymm1[7],ymm4[7]
4247 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm1 = ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[6],ymm3[6],ymm2[7],ymm3[7]
4248 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,3,3,3]
4249 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[3,3,3,3]
4250 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
4251 ; AVX2-SLOW-NEXT: vbroadcastss 124(%r8), %ymm1
4252 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
4253 ; AVX2-SLOW-NEXT: vbroadcastss 124(%r9), %ymm1
4254 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
4255 ; AVX2-SLOW-NEXT: vbroadcastsd 120(%rax), %ymm1
4256 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5,6],ymm1[7]
4257 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4258 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
4259 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm1 = ymm8[1,1,1,1,5,5,5,5]
4260 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
4261 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm6[1],ymm1[2,3,4],ymm6[5],ymm1[6,7]
4262 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,2,2,2]
4263 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
4264 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
4265 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm2 = ymm10[1,1],ymm5[1,1],ymm10[5,5],ymm5[5,5]
4266 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5,6],ymm1[7]
4267 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
4268 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm2 = ymm13[0,0,0,0,4,4,4,4]
4269 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
4270 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm3 = ymm11[0,1,0,1,4,5,4,5]
4271 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0],ymm2[1],ymm3[2,3,4],ymm2[5],ymm3[6,7]
4272 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[2,1,2,3]
4273 ; AVX2-SLOW-NEXT: vbroadcastsd 16(%rax), %ymm3
4274 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm3[2,3],ymm2[4,5,6,7]
4275 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2],ymm1[3,4,5,6],ymm2[7]
4276 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4277 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
4278 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm2 = ymm14[1,1,1,1,5,5,5,5]
4279 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
4280 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0],ymm12[1],ymm2[2,3,4],ymm12[5],ymm2[6,7]
4281 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[2,2,2,2]
4282 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
4283 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4284 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm3 = ymm0[1,1],ymm9[1,1],ymm0[5,5],ymm9[5,5]
4285 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm3[5,6],ymm2[7]
4286 ; AVX2-SLOW-NEXT: vpermilps $0, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Folded Reload
4287 ; AVX2-SLOW-NEXT: # ymm3 = mem[0,0,0,0,4,4,4,4]
4288 ; AVX2-SLOW-NEXT: vpermilps $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Folded Reload
4289 ; AVX2-SLOW-NEXT: # ymm4 = mem[0,1,0,1,4,5,4,5]
4290 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0],ymm3[1],ymm4[2,3,4],ymm3[5],ymm4[6,7]
4291 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[2,1,2,3]
4292 ; AVX2-SLOW-NEXT: vbroadcastsd 48(%rax), %ymm4
4293 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm4[2,3],ymm3[4,5,6,7]
4294 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm3[0,1,2],ymm2[3,4,5,6],ymm3[7]
4295 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4296 ; AVX2-SLOW-NEXT: vpermilps $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Folded Reload
4297 ; AVX2-SLOW-NEXT: # ymm3 = mem[1,1,1,1,5,5,5,5]
4298 ; AVX2-SLOW-NEXT: vblendps $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
4299 ; AVX2-SLOW-NEXT: # ymm3 = ymm3[0],mem[1],ymm3[2,3,4],mem[5],ymm3[6,7]
4300 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[2,2,2,2]
4301 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4302 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
4303 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm4 = ymm0[1,1],ymm15[1,1],ymm0[5,5],ymm15[5,5]
4304 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3,4],ymm4[5,6],ymm3[7]
4305 ; AVX2-SLOW-NEXT: vpermilps $0, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Folded Reload
4306 ; AVX2-SLOW-NEXT: # ymm4 = mem[0,0,0,0,4,4,4,4]
4307 ; AVX2-SLOW-NEXT: vpermilps $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Folded Reload
4308 ; AVX2-SLOW-NEXT: # ymm7 = mem[0,1,0,1,4,5,4,5]
4309 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm4 = ymm7[0],ymm4[1],ymm7[2,3,4],ymm4[5],ymm7[6,7]
4310 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[2,1,2,3]
4311 ; AVX2-SLOW-NEXT: vbroadcastsd 80(%rax), %ymm7
4312 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm7[2,3],ymm4[4,5,6,7]
4313 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm4[0,1,2],ymm3[3,4,5,6],ymm4[7]
4314 ; AVX2-SLOW-NEXT: vmovups %ymm0, (%rsp) # 32-byte Spill
4315 ; AVX2-SLOW-NEXT: vbroadcastss 16(%rdx), %ymm4
4316 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm7 = ymm5[3,1,2,0,7,5,6,4]
4317 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm4 = ymm7[0,1,2,3,4,5],ymm4[6],ymm7[7]
4318 ; AVX2-SLOW-NEXT: vmovaps %ymm6, %ymm1
4319 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} ymm7 = ymm6[0],ymm8[0],ymm6[1],ymm8[1],ymm6[4],ymm8[4],ymm6[5],ymm8[5]
4320 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm7[4,5],ymm4[6,7]
4321 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
4322 ; AVX2-SLOW-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm7 # 16-byte Folded Reload
4323 ; AVX2-SLOW-NEXT: # xmm7 = xmm3[3,3],mem[3,3]
4324 ; AVX2-SLOW-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7, %xmm7 # 16-byte Folded Reload
4325 ; AVX2-SLOW-NEXT: # xmm7 = xmm7[0,1,2],mem[3]
4326 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm6 = ymm4[0],ymm7[1,2,3],ymm4[4,5,6,7]
4327 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm4 = ymm5[2],ymm10[2],ymm5[3],ymm10[3],ymm5[6],ymm10[6],ymm5[7],ymm10[7]
4328 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm7 = ymm8[2],ymm1[2],ymm8[3],ymm1[3],ymm8[6],ymm1[6],ymm8[7],ymm1[7]
4329 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[3,3,3,3]
4330 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm7 = ymm7[3,3,3,3]
4331 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm4 = ymm7[0,1,2],ymm4[3,4,5,6,7]
4332 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm7 = ymm11[3,3],ymm13[3,3],ymm11[7,7],ymm13[7,7]
4333 ; AVX2-SLOW-NEXT: vpermilps {{.*#+}} ymm10 = mem[2,3,2,3,6,7,6,7]
4334 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm7 = ymm10[0],ymm7[1,2],ymm10[3,4],ymm7[5,6],ymm10[7]
4335 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm7 = ymm7[2,1,2,3]
4336 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm10 = ymm7[0],ymm4[1,2,3,4],ymm7[5,6,7]
4337 ; AVX2-SLOW-NEXT: vbroadcastss 48(%rdx), %ymm4
4338 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm7 = ymm9[3,1,2,0,7,5,6,4]
4339 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm4 = ymm7[0,1,2,3,4,5],ymm4[6],ymm7[7]
4340 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} ymm7 = ymm12[0],ymm14[0],ymm12[1],ymm14[1],ymm12[4],ymm14[4],ymm12[5],ymm14[5]
4341 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm7[4,5],ymm4[6,7]
4342 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
4343 ; AVX2-SLOW-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm7 # 16-byte Folded Reload
4344 ; AVX2-SLOW-NEXT: # xmm7 = xmm3[3,3],mem[3,3]
4345 ; AVX2-SLOW-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7, %xmm7 # 16-byte Folded Reload
4346 ; AVX2-SLOW-NEXT: # xmm7 = xmm7[0,1,2],mem[3]
4347 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0],ymm7[1,2,3],ymm4[4,5,6,7]
4348 ; AVX2-SLOW-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm4 # 32-byte Folded Reload
4349 ; AVX2-SLOW-NEXT: # ymm4 = ymm9[2],mem[2],ymm9[3],mem[3],ymm9[6],mem[6],ymm9[7],mem[7]
4350 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm7 = ymm14[2],ymm12[2],ymm14[3],ymm12[3],ymm14[6],ymm12[6],ymm14[7],ymm12[7]
4351 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[3,3,3,3]
4352 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm7 = ymm7[3,3,3,3]
4353 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm4 = ymm7[0,1,2],ymm4[3,4,5,6,7]
4354 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4355 ; AVX2-SLOW-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm7 # 32-byte Folded Reload
4356 ; AVX2-SLOW-NEXT: # ymm7 = ymm0[3,3],mem[3,3],ymm0[7,7],mem[7,7]
4357 ; AVX2-SLOW-NEXT: vpermilps {{.*#+}} ymm12 = mem[2,3,2,3,6,7,6,7]
4358 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm7 = ymm12[0],ymm7[1,2],ymm12[3,4],ymm7[5,6],ymm12[7]
4359 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm7 = ymm7[2,1,2,3]
4360 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm4 = ymm7[0],ymm4[1,2,3,4],ymm7[5,6,7]
4361 ; AVX2-SLOW-NEXT: vbroadcastss 80(%rdx), %ymm7
4362 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm12 = ymm15[3,1,2,0,7,5,6,4]
4363 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm7 = ymm12[0,1,2,3,4,5],ymm7[6],ymm12[7]
4364 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
4365 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4366 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} ymm12 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
4367 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm12[4,5],ymm7[6,7]
4368 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
4369 ; AVX2-SLOW-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5, %xmm12 # 16-byte Folded Reload
4370 ; AVX2-SLOW-NEXT: # xmm12 = xmm5[3,3],mem[3,3]
4371 ; AVX2-SLOW-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm12, %xmm12 # 16-byte Folded Reload
4372 ; AVX2-SLOW-NEXT: # xmm12 = xmm12[0,1,2],mem[3]
4373 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0],ymm12[1,2,3],ymm7[4,5,6,7]
4374 ; AVX2-SLOW-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm12 # 32-byte Folded Reload
4375 ; AVX2-SLOW-NEXT: # ymm12 = ymm15[2],mem[2],ymm15[3],mem[3],ymm15[6],mem[6],ymm15[7],mem[7]
4376 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm13 = ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[6],ymm0[6],ymm1[7],ymm0[7]
4377 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm12 = ymm12[3,3,3,3]
4378 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm13 = ymm13[3,3,3,3]
4379 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm12 = ymm13[0,1,2],ymm12[3,4,5,6,7]
4380 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4381 ; AVX2-SLOW-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm13 # 32-byte Folded Reload
4382 ; AVX2-SLOW-NEXT: # ymm13 = ymm0[3,3],mem[3,3],ymm0[7,7],mem[7,7]
4383 ; AVX2-SLOW-NEXT: vpermilps {{.*#+}} ymm15 = mem[2,3,2,3,6,7,6,7]
4384 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm13 = ymm15[0],ymm13[1,2],ymm15[3,4],ymm13[5,6],ymm15[7]
4385 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm13 = ymm13[2,1,2,3]
4386 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm12 = ymm13[0],ymm12[1,2,3,4],ymm13[5,6,7]
4387 ; AVX2-SLOW-NEXT: movq {{[0-9]+}}(%rsp), %rax
4388 ; AVX2-SLOW-NEXT: vmovaps %ymm12, 640(%rax)
4389 ; AVX2-SLOW-NEXT: vmovaps %ymm7, 544(%rax)
4390 ; AVX2-SLOW-NEXT: vmovaps %ymm4, 416(%rax)
4391 ; AVX2-SLOW-NEXT: vmovaps %ymm3, 320(%rax)
4392 ; AVX2-SLOW-NEXT: vmovaps %ymm10, 192(%rax)
4393 ; AVX2-SLOW-NEXT: vmovaps %ymm6, 96(%rax)
4394 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
4395 ; AVX2-SLOW-NEXT: vmovaps %ymm4, 608(%rax)
4396 ; AVX2-SLOW-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
4397 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 576(%rax)
4398 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
4399 ; AVX2-SLOW-NEXT: vmovaps %ymm3, 384(%rax)
4400 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4401 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 352(%rax)
4402 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
4403 ; AVX2-SLOW-NEXT: vmovaps %ymm2, 160(%rax)
4404 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4405 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 128(%rax)
4406 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4407 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 864(%rax)
4408 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4409 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 832(%rax)
4410 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4411 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 768(%rax)
4412 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4413 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 736(%rax)
4414 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4415 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 704(%rax)
4416 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4417 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 672(%rax)
4418 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4419 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 512(%rax)
4420 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4421 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 480(%rax)
4422 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4423 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 448(%rax)
4424 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4425 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 288(%rax)
4426 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4427 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 256(%rax)
4428 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4429 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 224(%rax)
4430 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4431 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 64(%rax)
4432 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4433 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 32(%rax)
4434 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4435 ; AVX2-SLOW-NEXT: vmovaps %ymm0, (%rax)
4436 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4437 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 800(%rax)
4438 ; AVX2-SLOW-NEXT: addq $1320, %rsp # imm = 0x528
4439 ; AVX2-SLOW-NEXT: vzeroupper
4440 ; AVX2-SLOW-NEXT: retq
4442 ; AVX2-FAST-LABEL: store_i32_stride7_vf32:
4443 ; AVX2-FAST: # %bb.0:
4444 ; AVX2-FAST-NEXT: subq $1416, %rsp # imm = 0x588
4445 ; AVX2-FAST-NEXT: movq {{[0-9]+}}(%rsp), %rax
4446 ; AVX2-FAST-NEXT: vmovaps (%rax), %xmm0
4447 ; AVX2-FAST-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4448 ; AVX2-FAST-NEXT: vmovaps 32(%rax), %xmm3
4449 ; AVX2-FAST-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4450 ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
4451 ; AVX2-FAST-NEXT: vmovaps (%r8), %xmm2
4452 ; AVX2-FAST-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4453 ; AVX2-FAST-NEXT: vmovaps 32(%r8), %xmm4
4454 ; AVX2-FAST-NEXT: vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4455 ; AVX2-FAST-NEXT: vmovaps (%r9), %xmm1
4456 ; AVX2-FAST-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4457 ; AVX2-FAST-NEXT: vmovaps 32(%r9), %xmm5
4458 ; AVX2-FAST-NEXT: vmovaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4459 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,1,1,1]
4460 ; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2,3]
4461 ; AVX2-FAST-NEXT: vbroadcastsd %xmm1, %ymm1
4462 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5],ymm1[6,7]
4463 ; AVX2-FAST-NEXT: vmovaps (%rcx), %xmm2
4464 ; AVX2-FAST-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4465 ; AVX2-FAST-NEXT: vmovaps 32(%rcx), %xmm6
4466 ; AVX2-FAST-NEXT: vmovaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4467 ; AVX2-FAST-NEXT: vmovaps (%rdx), %xmm1
4468 ; AVX2-FAST-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4469 ; AVX2-FAST-NEXT: vinsertps {{.*#+}} xmm1 = zero,xmm1[1],xmm2[1],zero
4470 ; AVX2-FAST-NEXT: vmovaps (%rdi), %xmm9
4471 ; AVX2-FAST-NEXT: vmovaps 32(%rdi), %xmm7
4472 ; AVX2-FAST-NEXT: vmovaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4473 ; AVX2-FAST-NEXT: vmovaps (%rsi), %xmm11
4474 ; AVX2-FAST-NEXT: vmovaps 32(%rsi), %xmm8
4475 ; AVX2-FAST-NEXT: vmovaps %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4476 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm2 = xmm11[1,1,2,2]
4477 ; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm2 = xmm2[0,1],xmm9[2],xmm2[3]
4478 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,1,2,1]
4479 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0],ymm1[1,2],ymm2[3,4,5,6,7]
4480 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
4481 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4482 ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0
4483 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm1 = xmm5[1,1,1,1]
4484 ; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0],xmm4[1],xmm1[2,3]
4485 ; AVX2-FAST-NEXT: vbroadcastsd %xmm1, %ymm1
4486 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5],ymm1[6,7]
4487 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm1 = xmm8[1,1,2,2]
4488 ; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm7[2],xmm1[3]
4489 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1]
4490 ; AVX2-FAST-NEXT: vmovaps 32(%rdx), %xmm2
4491 ; AVX2-FAST-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4492 ; AVX2-FAST-NEXT: vinsertps {{.*#+}} xmm2 = zero,xmm2[1],xmm6[1],zero
4493 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2],ymm1[3,4,5,6,7]
4494 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
4495 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4496 ; AVX2-FAST-NEXT: vmovaps 64(%r8), %xmm1
4497 ; AVX2-FAST-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4498 ; AVX2-FAST-NEXT: vmovaps 64(%r9), %xmm0
4499 ; AVX2-FAST-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4500 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
4501 ; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
4502 ; AVX2-FAST-NEXT: vbroadcastsd %xmm0, %ymm0
4503 ; AVX2-FAST-NEXT: vmovaps 64(%rax), %xmm1
4504 ; AVX2-FAST-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4505 ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
4506 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
4507 ; AVX2-FAST-NEXT: vmovaps 64(%rdi), %xmm2
4508 ; AVX2-FAST-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4509 ; AVX2-FAST-NEXT: vmovaps 64(%rsi), %xmm1
4510 ; AVX2-FAST-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4511 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,1,2,2]
4512 ; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3]
4513 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1]
4514 ; AVX2-FAST-NEXT: vmovaps 64(%rcx), %xmm3
4515 ; AVX2-FAST-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4516 ; AVX2-FAST-NEXT: vmovaps 64(%rdx), %xmm2
4517 ; AVX2-FAST-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4518 ; AVX2-FAST-NEXT: vinsertps {{.*#+}} xmm2 = zero,xmm2[1],xmm3[1],zero
4519 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2],ymm1[3,4,5,6,7]
4520 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
4521 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4522 ; AVX2-FAST-NEXT: vmovaps 96(%r8), %xmm1
4523 ; AVX2-FAST-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4524 ; AVX2-FAST-NEXT: vmovaps 96(%r9), %xmm2
4525 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm0 = xmm2[1,1,1,1]
4526 ; AVX2-FAST-NEXT: vmovaps %xmm2, %xmm7
4527 ; AVX2-FAST-NEXT: vmovaps %xmm2, (%rsp) # 16-byte Spill
4528 ; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
4529 ; AVX2-FAST-NEXT: vbroadcastsd %xmm0, %ymm0
4530 ; AVX2-FAST-NEXT: vmovaps 96(%rax), %xmm1
4531 ; AVX2-FAST-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4532 ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
4533 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
4534 ; AVX2-FAST-NEXT: vmovaps 96(%rdi), %xmm2
4535 ; AVX2-FAST-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4536 ; AVX2-FAST-NEXT: vmovaps 96(%rsi), %xmm1
4537 ; AVX2-FAST-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4538 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,1,2,2]
4539 ; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3]
4540 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1]
4541 ; AVX2-FAST-NEXT: vmovaps 96(%rcx), %xmm3
4542 ; AVX2-FAST-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4543 ; AVX2-FAST-NEXT: vmovaps 96(%rdx), %xmm2
4544 ; AVX2-FAST-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4545 ; AVX2-FAST-NEXT: vinsertps {{.*#+}} xmm2 = zero,xmm2[1],xmm3[1],zero
4546 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2],ymm1[3,4,5,6,7]
4547 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
4548 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4549 ; AVX2-FAST-NEXT: vmovaps (%rdi), %ymm0
4550 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4551 ; AVX2-FAST-NEXT: vmovaps (%rsi), %ymm13
4552 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm13[2],ymm0[3],ymm13[3],ymm0[6],ymm13[6],ymm0[7],ymm13[7]
4553 ; AVX2-FAST-NEXT: vmovups %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4554 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
4555 ; AVX2-FAST-NEXT: vmovaps (%rdx), %ymm6
4556 ; AVX2-FAST-NEXT: vmovaps (%rcx), %ymm10
4557 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm1 = ymm6[2],ymm10[2],ymm6[3],ymm10[3],ymm6[6],ymm10[6],ymm6[7],ymm10[7]
4558 ; AVX2-FAST-NEXT: vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4559 ; AVX2-FAST-NEXT: vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4560 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
4561 ; AVX2-FAST-NEXT: vmovaps (%r8), %ymm2
4562 ; AVX2-FAST-NEXT: vmovaps (%r9), %ymm3
4563 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm1 = ymm3[1,1,2,2,5,5,6,6]
4564 ; AVX2-FAST-NEXT: vmovaps %ymm3, %ymm14
4565 ; AVX2-FAST-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4566 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4,5],ymm2[6],ymm1[7]
4567 ; AVX2-FAST-NEXT: vmovaps %ymm2, %ymm8
4568 ; AVX2-FAST-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4569 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
4570 ; AVX2-FAST-NEXT: vmovaps 16(%rax), %xmm2
4571 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7]
4572 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
4573 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4574 ; AVX2-FAST-NEXT: vmovaps 32(%rdi), %ymm1
4575 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4576 ; AVX2-FAST-NEXT: vmovaps 32(%rsi), %ymm0
4577 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4578 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm0 = ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[6],ymm0[6],ymm1[7],ymm0[7]
4579 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
4580 ; AVX2-FAST-NEXT: vmovaps 32(%rdx), %ymm1
4581 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4582 ; AVX2-FAST-NEXT: vmovaps 32(%rcx), %ymm2
4583 ; AVX2-FAST-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4584 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm1 = ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[6],ymm2[6],ymm1[7],ymm2[7]
4585 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
4586 ; AVX2-FAST-NEXT: vmovaps 32(%r8), %ymm2
4587 ; AVX2-FAST-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4588 ; AVX2-FAST-NEXT: vmovaps 32(%r9), %ymm1
4589 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4590 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm1 = ymm1[1,1,2,2,5,5,6,6]
4591 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4,5],ymm2[6],ymm1[7]
4592 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
4593 ; AVX2-FAST-NEXT: vmovaps 48(%rax), %xmm2
4594 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7]
4595 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
4596 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4597 ; AVX2-FAST-NEXT: vmovaps 64(%rdi), %ymm0
4598 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4599 ; AVX2-FAST-NEXT: vmovaps 64(%rsi), %ymm1
4600 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4601 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
4602 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
4603 ; AVX2-FAST-NEXT: vmovaps 64(%rdx), %ymm2
4604 ; AVX2-FAST-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4605 ; AVX2-FAST-NEXT: vmovaps 64(%rcx), %ymm1
4606 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4607 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm1 = ymm2[2],ymm1[2],ymm2[3],ymm1[3],ymm2[6],ymm1[6],ymm2[7],ymm1[7]
4608 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
4609 ; AVX2-FAST-NEXT: vmovaps 64(%r8), %ymm2
4610 ; AVX2-FAST-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4611 ; AVX2-FAST-NEXT: vmovaps 64(%r9), %ymm1
4612 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4613 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm1 = ymm1[1,1,2,2,5,5,6,6]
4614 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4,5],ymm2[6],ymm1[7]
4615 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
4616 ; AVX2-FAST-NEXT: vmovaps 80(%rax), %xmm2
4617 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7]
4618 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
4619 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4620 ; AVX2-FAST-NEXT: vmovaps 96(%rdi), %ymm4
4621 ; AVX2-FAST-NEXT: vmovaps 96(%rsi), %ymm2
4622 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm0 = ymm2[1,1,1,1,5,5,5,5]
4623 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm4[1],ymm0[2,3,4],ymm4[5],ymm0[6,7]
4624 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm3 = ymm0[2,2,2,2]
4625 ; AVX2-FAST-NEXT: vmovaps 96(%rdx), %ymm5
4626 ; AVX2-FAST-NEXT: vmovaps 96(%rcx), %ymm1
4627 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm15 = ymm5[1,1],ymm1[1,1],ymm5[5,5],ymm1[5,5]
4628 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3,4],ymm15[5,6],ymm3[7]
4629 ; AVX2-FAST-NEXT: vbroadcastsd 112(%r8), %ymm15
4630 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm3 = ymm15[0],ymm3[1,2,3,4,5,6],ymm15[7]
4631 ; AVX2-FAST-NEXT: vbroadcastss 112(%r9), %xmm15
4632 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0],ymm15[1],ymm3[2,3,4,5,6,7]
4633 ; AVX2-FAST-NEXT: vbroadcastss 112(%rax), %ymm15
4634 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm3[0,1],ymm15[2],ymm3[3,4,5,6,7]
4635 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4636 ; AVX2-FAST-NEXT: vbroadcastss 112(%rdx), %ymm3
4637 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm15 = ymm1[3,1,2,0,7,5,6,4]
4638 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm3 = ymm15[0,1,2,3,4,5],ymm3[6],ymm15[7]
4639 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} ymm15 = ymm4[0],ymm2[0],ymm4[1],ymm2[1],ymm4[4],ymm2[4],ymm4[5],ymm2[5]
4640 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm15[4,5],ymm3[6,7]
4641 ; AVX2-FAST-NEXT: vbroadcastss 108(%r8), %ymm15
4642 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0],ymm15[1],ymm3[2,3,4,5,6,7]
4643 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm15 = xmm7[2,2,3,3]
4644 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm15[2,3],ymm3[4,5,6,7]
4645 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm15 = ymm4[2],ymm2[2],ymm4[3],ymm2[3],ymm4[6],ymm2[6],ymm4[7],ymm2[7]
4646 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm15 = ymm15[2,2,2,2]
4647 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm0 = ymm5[2],ymm1[2],ymm5[3],ymm1[3],ymm5[6],ymm1[6],ymm5[7],ymm1[7]
4648 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm15 = ymm15[0,1,2,3],ymm0[4,5,6,7]
4649 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm15 = ymm15[0,1,2,3,4,5],mem[6,7]
4650 ; AVX2-FAST-NEXT: vbroadcastsd {{.*#+}} ymm12 = [5,6,5,6,5,6,5,6]
4651 ; AVX2-FAST-NEXT: vpermps 96(%r9), %ymm12, %ymm12
4652 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm12 = ymm12[0],ymm15[1,2,3,4,5,6],ymm12[7]
4653 ; AVX2-FAST-NEXT: vmovaps 96(%rax), %ymm15
4654 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2],ymm15[3],ymm3[4,5,6,7]
4655 ; AVX2-FAST-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4656 ; AVX2-FAST-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm15[2,3],ymm0[2,3]
4657 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm12[0],ymm0[1],ymm12[2,3,4],ymm0[5],ymm12[6,7]
4658 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4659 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm0 = ymm1[2],ymm5[2],ymm1[3],ymm5[3],ymm1[6],ymm5[6],ymm1[7],ymm5[7]
4660 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm1 = ymm2[2],ymm4[2],ymm2[3],ymm4[3],ymm2[6],ymm4[6],ymm2[7],ymm4[7]
4661 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,3,3,3]
4662 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[3,3,3,3]
4663 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
4664 ; AVX2-FAST-NEXT: vbroadcastss 124(%r8), %ymm1
4665 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
4666 ; AVX2-FAST-NEXT: vbroadcastss 124(%r9), %ymm1
4667 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
4668 ; AVX2-FAST-NEXT: vbroadcastsd 120(%rax), %ymm1
4669 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5,6],ymm1[7]
4670 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4671 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
4672 ; AVX2-FAST-NEXT: vbroadcastss %xmm12, %xmm0
4673 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
4674 ; AVX2-FAST-NEXT: vbroadcastss %xmm7, %xmm1
4675 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
4676 ; AVX2-FAST-NEXT: vmovaps %xmm11, %xmm15
4677 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} xmm2 = xmm9[0],xmm11[0],xmm9[1],xmm11[1]
4678 ; AVX2-FAST-NEXT: vbroadcastf128 {{.*#+}} ymm11 = [0,1,2,2,0,1,2,2]
4679 ; AVX2-FAST-NEXT: # ymm11 = mem[0,1,0,1]
4680 ; AVX2-FAST-NEXT: vpermps %ymm2, %ymm11, %ymm2
4681 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1],ymm1[2,3],ymm2[4,5,6,7]
4682 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
4683 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
4684 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} xmm2 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
4685 ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2
4686 ; AVX2-FAST-NEXT: vbroadcastsd {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 16-byte Folded Reload
4687 ; AVX2-FAST-NEXT: vunpcklpd {{.*#+}} ymm2 = ymm2[0],ymm3[0],ymm2[2],ymm3[2]
4688 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm2[4,5,6],ymm1[7]
4689 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4690 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm1 = xmm9[3,3],xmm15[3,3]
4691 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} xmm2 = xmm7[2],xmm12[2],xmm7[3],xmm12[3]
4692 ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
4693 ; AVX2-FAST-NEXT: vpermps %ymm2, %ymm11, %ymm2
4694 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3,4],ymm1[5,6],ymm2[7]
4695 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm2 = xmm5[2,2,2,2]
4696 ; AVX2-FAST-NEXT: vmovaps %xmm5, %xmm9
4697 ; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm2 = xmm4[0,1,2],xmm2[3]
4698 ; AVX2-FAST-NEXT: vbroadcastsd 8(%rax), %ymm3
4699 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
4700 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm2[2,3,4],ymm1[5,6,7]
4701 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4702 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm1 = ymm13[1,1,1,1,5,5,5,5]
4703 ; AVX2-FAST-NEXT: vblendps $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
4704 ; AVX2-FAST-NEXT: # ymm1 = ymm1[0],mem[1],ymm1[2,3,4],mem[5],ymm1[6,7]
4705 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,2,2,2]
4706 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm2 = ymm6[1,1],ymm10[1,1],ymm6[5,5],ymm10[5,5]
4707 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5,6],ymm1[7]
4708 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm2 = ymm14[0,0,0,0,4,4,4,4]
4709 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm3 = ymm8[0,1,0,1,4,5,4,5]
4710 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0],ymm2[1],ymm3[2,3,4],ymm2[5],ymm3[6,7]
4711 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[2,1,2,3]
4712 ; AVX2-FAST-NEXT: vbroadcastsd 16(%rax), %ymm3
4713 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm3[2,3],ymm2[4,5,6,7]
4714 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2],ymm1[3,4,5,6],ymm2[7]
4715 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4716 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
4717 ; AVX2-FAST-NEXT: vbroadcastss %xmm0, %xmm1
4718 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
4719 ; AVX2-FAST-NEXT: vbroadcastss %xmm6, %xmm2
4720 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
4721 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
4722 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
4723 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} xmm2 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
4724 ; AVX2-FAST-NEXT: vpermps %ymm2, %ymm11, %ymm2
4725 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1],ymm1[2,3],ymm2[4,5,6,7]
4726 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
4727 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
4728 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} xmm2 = xmm8[0],xmm7[0],xmm8[1],xmm7[1]
4729 ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2
4730 ; AVX2-FAST-NEXT: vbroadcastsd {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 16-byte Folded Reload
4731 ; AVX2-FAST-NEXT: vunpcklpd {{.*#+}} ymm2 = ymm2[0],ymm3[0],ymm2[2],ymm3[2]
4732 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6],ymm1[7]
4733 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4734 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm1 = xmm4[3,3],xmm5[3,3]
4735 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} xmm2 = xmm6[2],xmm0[2],xmm6[3],xmm0[3]
4736 ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
4737 ; AVX2-FAST-NEXT: vpermps %ymm2, %ymm11, %ymm2
4738 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3,4],ymm1[5,6],ymm2[7]
4739 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm2 = xmm7[2,2,2,2]
4740 ; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm2 = xmm8[0,1,2],xmm2[3]
4741 ; AVX2-FAST-NEXT: vbroadcastsd 40(%rax), %ymm3
4742 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
4743 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm2[2,3,4],ymm1[5,6,7]
4744 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4745 ; AVX2-FAST-NEXT: vpermilps $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
4746 ; AVX2-FAST-NEXT: # ymm1 = mem[1,1,1,1,5,5,5,5]
4747 ; AVX2-FAST-NEXT: vblendps $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
4748 ; AVX2-FAST-NEXT: # ymm1 = ymm1[0],mem[1],ymm1[2,3,4],mem[5],ymm1[6,7]
4749 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,2,2,2]
4750 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4751 ; AVX2-FAST-NEXT: vshufps $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm2 # 32-byte Folded Reload
4752 ; AVX2-FAST-NEXT: # ymm2 = ymm0[1,1],mem[1,1],ymm0[5,5],mem[5,5]
4753 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5,6],ymm1[7]
4754 ; AVX2-FAST-NEXT: vpermilps $0, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
4755 ; AVX2-FAST-NEXT: # ymm2 = mem[0,0,0,0,4,4,4,4]
4756 ; AVX2-FAST-NEXT: vpermilps $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Folded Reload
4757 ; AVX2-FAST-NEXT: # ymm3 = mem[0,1,0,1,4,5,4,5]
4758 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0],ymm2[1],ymm3[2,3,4],ymm2[5],ymm3[6,7]
4759 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[2,1,2,3]
4760 ; AVX2-FAST-NEXT: vbroadcastsd 48(%rax), %ymm3
4761 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm3[2,3],ymm2[4,5,6,7]
4762 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2],ymm1[3,4,5,6],ymm2[7]
4763 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4764 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
4765 ; AVX2-FAST-NEXT: vbroadcastss %xmm8, %xmm1
4766 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
4767 ; AVX2-FAST-NEXT: vbroadcastss %xmm7, %xmm2
4768 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
4769 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
4770 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
4771 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} xmm2 = xmm4[0],xmm0[0],xmm4[1],xmm0[1]
4772 ; AVX2-FAST-NEXT: vpermps %ymm2, %ymm11, %ymm2
4773 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1],ymm1[2,3],ymm2[4,5,6,7]
4774 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
4775 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
4776 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} xmm2 = xmm5[0],xmm6[0],xmm5[1],xmm6[1]
4777 ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2
4778 ; AVX2-FAST-NEXT: vbroadcastsd {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 16-byte Folded Reload
4779 ; AVX2-FAST-NEXT: vunpcklpd {{.*#+}} ymm2 = ymm2[0],ymm3[0],ymm2[2],ymm3[2]
4780 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6],ymm1[7]
4781 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4782 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm1 = xmm4[3,3],xmm0[3,3]
4783 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} xmm2 = xmm7[2],xmm8[2],xmm7[3],xmm8[3]
4784 ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
4785 ; AVX2-FAST-NEXT: vpermps %ymm2, %ymm11, %ymm2
4786 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3,4],ymm1[5,6],ymm2[7]
4787 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm2 = xmm6[2,2,2,2]
4788 ; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm2 = xmm5[0,1,2],xmm2[3]
4789 ; AVX2-FAST-NEXT: vbroadcastsd 72(%rax), %ymm3
4790 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
4791 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm2[2,3,4],ymm1[5,6,7]
4792 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4793 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
4794 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm1 = ymm12[1,1,1,1,5,5,5,5]
4795 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
4796 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm14[1],ymm1[2,3,4],ymm14[5],ymm1[6,7]
4797 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,2,2,2]
4798 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
4799 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
4800 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm2 = ymm13[1,1],ymm10[1,1],ymm13[5,5],ymm10[5,5]
4801 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5,6],ymm1[7]
4802 ; AVX2-FAST-NEXT: vpermilps $0, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
4803 ; AVX2-FAST-NEXT: # ymm2 = mem[0,0,0,0,4,4,4,4]
4804 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
4805 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm3 = ymm15[0,1,0,1,4,5,4,5]
4806 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0],ymm2[1],ymm3[2,3,4],ymm2[5],ymm3[6,7]
4807 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[2,1,2,3]
4808 ; AVX2-FAST-NEXT: vbroadcastsd 80(%rax), %ymm3
4809 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm3[2,3],ymm2[4,5,6,7]
4810 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2],ymm1[3,4,5,6],ymm2[7]
4811 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4812 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
4813 ; AVX2-FAST-NEXT: vbroadcastss %xmm7, %xmm1
4814 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
4815 ; AVX2-FAST-NEXT: vbroadcastss %xmm6, %xmm3
4816 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} xmm2 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
4817 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
4818 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
4819 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} xmm3 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
4820 ; AVX2-FAST-NEXT: vpermps %ymm3, %ymm11, %ymm3
4821 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm8 = ymm3[0,1],ymm2[2,3],ymm3[4,5,6,7]
4822 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
4823 ; AVX2-FAST-NEXT: vmovaps (%rsp), %xmm2 # 16-byte Reload
4824 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} xmm3 = xmm5[0],xmm2[0],xmm5[1],xmm2[1]
4825 ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm3
4826 ; AVX2-FAST-NEXT: vbroadcastsd {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 16-byte Folded Reload
4827 ; AVX2-FAST-NEXT: vunpcklpd {{.*#+}} ymm3 = ymm3[0],ymm4[0],ymm3[2],ymm4[2]
4828 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm3 = ymm8[0,1,2,3],ymm3[4,5,6],ymm8[7]
4829 ; AVX2-FAST-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4830 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm3 = xmm0[3,3],xmm1[3,3]
4831 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} xmm4 = xmm6[2],xmm7[2],xmm6[3],xmm7[3]
4832 ; AVX2-FAST-NEXT: vpermps %ymm4, %ymm11, %ymm4
4833 ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm3
4834 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm7 = ymm4[0,1,2,3,4],ymm3[5,6],ymm4[7]
4835 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm3 = xmm2[2,2,2,2]
4836 ; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm3 = xmm5[0,1,2],xmm3[3]
4837 ; AVX2-FAST-NEXT: vbroadcastsd 104(%rax), %ymm4
4838 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm4[4,5,6,7]
4839 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm6 = ymm7[0,1],ymm3[2,3,4],ymm7[5,6,7]
4840 ; AVX2-FAST-NEXT: vbroadcastss 16(%rdx), %ymm3
4841 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4842 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm4 = ymm0[3,1,2,0,7,5,6,4]
4843 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0,1,2,3,4,5],ymm3[6],ymm4[7]
4844 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
4845 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
4846 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} ymm4 = ymm1[0],ymm7[0],ymm1[1],ymm7[1],ymm1[4],ymm7[4],ymm1[5],ymm7[5]
4847 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm4[4,5],ymm3[6,7]
4848 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
4849 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm4 = xmm2[3,3],xmm9[3,3]
4850 ; AVX2-FAST-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm4 # 16-byte Folded Reload
4851 ; AVX2-FAST-NEXT: # xmm4 = xmm4[0,1,2],mem[3]
4852 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0],ymm4[1,2,3],ymm3[4,5,6,7]
4853 ; AVX2-FAST-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm3 # 32-byte Folded Reload
4854 ; AVX2-FAST-NEXT: # ymm3 = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7]
4855 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm4 = ymm7[2],ymm1[2],ymm7[3],ymm1[3],ymm7[6],ymm1[6],ymm7[7],ymm1[7]
4856 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[3,3,3,3]
4857 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[3,3,3,3]
4858 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0,1,2],ymm3[3,4,5,6,7]
4859 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4860 ; AVX2-FAST-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm4 # 32-byte Folded Reload
4861 ; AVX2-FAST-NEXT: # ymm4 = ymm0[3,3],mem[3,3],ymm0[7,7],mem[7,7]
4862 ; AVX2-FAST-NEXT: vpermilps {{.*#+}} ymm7 = mem[2,3,2,3,6,7,6,7]
4863 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm4 = ymm7[0],ymm4[1,2],ymm7[3,4],ymm4[5,6],ymm7[7]
4864 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[2,1,2,3]
4865 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0],ymm3[1,2,3,4],ymm4[5,6,7]
4866 ; AVX2-FAST-NEXT: vbroadcastss 48(%rdx), %ymm4
4867 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4868 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm7 = ymm0[3,1,2,0,7,5,6,4]
4869 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm4 = ymm7[0,1,2,3,4,5],ymm4[6],ymm7[7]
4870 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
4871 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
4872 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} ymm7 = ymm9[0],ymm1[0],ymm9[1],ymm1[1],ymm9[4],ymm1[4],ymm9[5],ymm1[5]
4873 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm7[4,5],ymm4[6,7]
4874 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
4875 ; AVX2-FAST-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5, %xmm7 # 16-byte Folded Reload
4876 ; AVX2-FAST-NEXT: # xmm7 = xmm5[3,3],mem[3,3]
4877 ; AVX2-FAST-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7, %xmm7 # 16-byte Folded Reload
4878 ; AVX2-FAST-NEXT: # xmm7 = xmm7[0,1,2],mem[3]
4879 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0],ymm7[1,2,3],ymm4[4,5,6,7]
4880 ; AVX2-FAST-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm7 # 32-byte Folded Reload
4881 ; AVX2-FAST-NEXT: # ymm7 = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7]
4882 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm8 = ymm1[2],ymm9[2],ymm1[3],ymm9[3],ymm1[6],ymm9[6],ymm1[7],ymm9[7]
4883 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm7 = ymm7[3,3,3,3]
4884 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm8 = ymm8[3,3,3,3]
4885 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm7 = ymm8[0,1,2],ymm7[3,4,5,6,7]
4886 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4887 ; AVX2-FAST-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm8 # 32-byte Folded Reload
4888 ; AVX2-FAST-NEXT: # ymm8 = ymm0[3,3],mem[3,3],ymm0[7,7],mem[7,7]
4889 ; AVX2-FAST-NEXT: vpermilps {{.*#+}} ymm9 = mem[2,3,2,3,6,7,6,7]
4890 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm8 = ymm9[0],ymm8[1,2],ymm9[3,4],ymm8[5,6],ymm9[7]
4891 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm8 = ymm8[2,1,2,3]
4892 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm7 = ymm8[0],ymm7[1,2,3,4],ymm8[5,6,7]
4893 ; AVX2-FAST-NEXT: vbroadcastss 80(%rdx), %ymm8
4894 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm9 = ymm10[3,1,2,0,7,5,6,4]
4895 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm8 = ymm9[0,1,2,3,4,5],ymm8[6],ymm9[7]
4896 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} ymm9 = ymm14[0],ymm12[0],ymm14[1],ymm12[1],ymm14[4],ymm12[4],ymm14[5],ymm12[5]
4897 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm9[4,5],ymm8[6,7]
4898 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
4899 ; AVX2-FAST-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm9 # 16-byte Folded Reload
4900 ; AVX2-FAST-NEXT: # xmm9 = xmm0[3,3],mem[3,3]
4901 ; AVX2-FAST-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9, %xmm9 # 16-byte Folded Reload
4902 ; AVX2-FAST-NEXT: # xmm9 = xmm9[0,1,2],mem[3]
4903 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0],ymm9[1,2,3],ymm8[4,5,6,7]
4904 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm9 = ymm10[2],ymm13[2],ymm10[3],ymm13[3],ymm10[6],ymm13[6],ymm10[7],ymm13[7]
4905 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm10 = ymm12[2],ymm14[2],ymm12[3],ymm14[3],ymm12[6],ymm14[6],ymm12[7],ymm14[7]
4906 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm9 = ymm9[3,3,3,3]
4907 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm10 = ymm10[3,3,3,3]
4908 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm9 = ymm10[0,1,2],ymm9[3,4,5,6,7]
4909 ; AVX2-FAST-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm10 # 32-byte Folded Reload
4910 ; AVX2-FAST-NEXT: # ymm10 = ymm15[3,3],mem[3,3],ymm15[7,7],mem[7,7]
4911 ; AVX2-FAST-NEXT: vpermilps {{.*#+}} ymm11 = mem[2,3,2,3,6,7,6,7]
4912 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm10 = ymm11[0],ymm10[1,2],ymm11[3,4],ymm10[5,6],ymm11[7]
4913 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm10 = ymm10[2,1,2,3]
4914 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm9 = ymm10[0],ymm9[1,2,3,4],ymm10[5,6,7]
4915 ; AVX2-FAST-NEXT: movq {{[0-9]+}}(%rsp), %rax
4916 ; AVX2-FAST-NEXT: vmovaps %ymm9, 640(%rax)
4917 ; AVX2-FAST-NEXT: vmovaps %ymm8, 544(%rax)
4918 ; AVX2-FAST-NEXT: vmovaps %ymm7, 416(%rax)
4919 ; AVX2-FAST-NEXT: vmovaps %ymm4, 320(%rax)
4920 ; AVX2-FAST-NEXT: vmovaps %ymm3, 192(%rax)
4921 ; AVX2-FAST-NEXT: vmovaps %ymm2, 96(%rax)
4922 ; AVX2-FAST-NEXT: vmovaps %ymm6, 736(%rax)
4923 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4924 ; AVX2-FAST-NEXT: vmovaps %ymm0, 672(%rax)
4925 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4926 ; AVX2-FAST-NEXT: vmovaps %ymm0, 608(%rax)
4927 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4928 ; AVX2-FAST-NEXT: vmovaps %ymm0, 576(%rax)
4929 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4930 ; AVX2-FAST-NEXT: vmovaps %ymm0, 512(%rax)
4931 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4932 ; AVX2-FAST-NEXT: vmovaps %ymm0, 448(%rax)
4933 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4934 ; AVX2-FAST-NEXT: vmovaps %ymm0, 384(%rax)
4935 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4936 ; AVX2-FAST-NEXT: vmovaps %ymm0, 352(%rax)
4937 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4938 ; AVX2-FAST-NEXT: vmovaps %ymm0, 288(%rax)
4939 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4940 ; AVX2-FAST-NEXT: vmovaps %ymm0, 224(%rax)
4941 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4942 ; AVX2-FAST-NEXT: vmovaps %ymm0, 160(%rax)
4943 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4944 ; AVX2-FAST-NEXT: vmovaps %ymm0, 128(%rax)
4945 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4946 ; AVX2-FAST-NEXT: vmovaps %ymm0, 64(%rax)
4947 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4948 ; AVX2-FAST-NEXT: vmovaps %ymm0, (%rax)
4949 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4950 ; AVX2-FAST-NEXT: vmovaps %ymm0, 864(%rax)
4951 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4952 ; AVX2-FAST-NEXT: vmovaps %ymm0, 832(%rax)
4953 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4954 ; AVX2-FAST-NEXT: vmovaps %ymm0, 768(%rax)
4955 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4956 ; AVX2-FAST-NEXT: vmovaps %ymm0, 704(%rax)
4957 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4958 ; AVX2-FAST-NEXT: vmovaps %ymm0, 480(%rax)
4959 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4960 ; AVX2-FAST-NEXT: vmovaps %ymm0, 256(%rax)
4961 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4962 ; AVX2-FAST-NEXT: vmovaps %ymm0, 32(%rax)
4963 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4964 ; AVX2-FAST-NEXT: vmovaps %ymm0, 800(%rax)
4965 ; AVX2-FAST-NEXT: addq $1416, %rsp # imm = 0x588
4966 ; AVX2-FAST-NEXT: vzeroupper
4967 ; AVX2-FAST-NEXT: retq
4969 ; AVX2-FAST-PERLANE-LABEL: store_i32_stride7_vf32:
4970 ; AVX2-FAST-PERLANE: # %bb.0:
4971 ; AVX2-FAST-PERLANE-NEXT: subq $1320, %rsp # imm = 0x528
4972 ; AVX2-FAST-PERLANE-NEXT: movq {{[0-9]+}}(%rsp), %rax
4973 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rax), %xmm0
4974 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4975 ; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%rax), %xmm3
4976 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4977 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
4978 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%r8), %xmm14
4979 ; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%r8), %xmm7
4980 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4981 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%r9), %xmm10
4982 ; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%r9), %xmm9
4983 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4984 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm1 = xmm10[1,1,1,1]
4985 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0],xmm14[1],xmm1[2,3]
4986 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd %xmm1, %ymm1
4987 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5],ymm1[6,7]
4988 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rcx), %xmm12
4989 ; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%rcx), %xmm4
4990 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4991 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rdx), %xmm8
4992 ; AVX2-FAST-PERLANE-NEXT: vinsertps {{.*#+}} xmm1 = zero,xmm8[1],xmm12[1],zero
4993 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rdi), %xmm6
4994 ; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%rdi), %xmm11
4995 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4996 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rsi), %xmm5
4997 ; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%rsi), %xmm13
4998 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4999 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm2 = xmm5[1,1,2,2]
5000 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} xmm2 = xmm2[0,1],xmm6[2],xmm2[3]
5001 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,1,2,1]
5002 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0],ymm1[1,2],ymm2[3,4,5,6,7]
5003 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
5004 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5005 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0
5006 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm1 = xmm9[1,1,1,1]
5007 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0],xmm7[1],xmm1[2,3]
5008 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd %xmm1, %ymm1
5009 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5],ymm1[6,7]
5010 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm1 = xmm13[1,1,2,2]
5011 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm11[2],xmm1[3]
5012 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1]
5013 ; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%rdx), %xmm9
5014 ; AVX2-FAST-PERLANE-NEXT: vinsertps {{.*#+}} xmm2 = zero,xmm9[1],xmm4[1],zero
5015 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2],ymm1[3,4,5,6,7]
5016 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
5017 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5018 ; AVX2-FAST-PERLANE-NEXT: vmovaps 64(%r8), %xmm1
5019 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5020 ; AVX2-FAST-PERLANE-NEXT: vmovaps 64(%r9), %xmm0
5021 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5022 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
5023 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
5024 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd %xmm0, %ymm0
5025 ; AVX2-FAST-PERLANE-NEXT: vmovaps 64(%rax), %xmm1
5026 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5027 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
5028 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
5029 ; AVX2-FAST-PERLANE-NEXT: vmovaps 64(%rdi), %xmm2
5030 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5031 ; AVX2-FAST-PERLANE-NEXT: vmovaps 64(%rsi), %xmm1
5032 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5033 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,1,2,2]
5034 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3]
5035 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1]
5036 ; AVX2-FAST-PERLANE-NEXT: vmovaps 64(%rcx), %xmm2
5037 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm2, (%rsp) # 16-byte Spill
5038 ; AVX2-FAST-PERLANE-NEXT: vmovaps 64(%rdx), %xmm13
5039 ; AVX2-FAST-PERLANE-NEXT: vinsertps {{.*#+}} xmm2 = zero,xmm13[1],xmm2[1],zero
5040 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2],ymm1[3,4,5,6,7]
5041 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
5042 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5043 ; AVX2-FAST-PERLANE-NEXT: vmovaps 96(%r8), %xmm1
5044 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5045 ; AVX2-FAST-PERLANE-NEXT: vmovaps 96(%r9), %xmm7
5046 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm0 = xmm7[1,1,1,1]
5047 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
5048 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd %xmm0, %ymm0
5049 ; AVX2-FAST-PERLANE-NEXT: vmovaps 96(%rax), %xmm1
5050 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5051 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
5052 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
5053 ; AVX2-FAST-PERLANE-NEXT: vmovaps 96(%rdi), %xmm2
5054 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5055 ; AVX2-FAST-PERLANE-NEXT: vmovaps 96(%rsi), %xmm1
5056 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5057 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,1,2,2]
5058 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3]
5059 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1]
5060 ; AVX2-FAST-PERLANE-NEXT: vmovaps 96(%rcx), %xmm3
5061 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5062 ; AVX2-FAST-PERLANE-NEXT: vmovaps 96(%rdx), %xmm2
5063 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5064 ; AVX2-FAST-PERLANE-NEXT: vinsertps {{.*#+}} xmm2 = zero,xmm2[1],xmm3[1],zero
5065 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2],ymm1[3,4,5,6,7]
5066 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
5067 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5068 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rdi), %ymm0
5069 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5070 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rsi), %ymm1
5071 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5072 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
5073 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
5074 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rdx), %ymm2
5075 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5076 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rcx), %ymm1
5077 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5078 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm1 = ymm2[2],ymm1[2],ymm2[3],ymm1[3],ymm2[6],ymm1[6],ymm2[7],ymm1[7]
5079 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
5080 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%r8), %ymm2
5081 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5082 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%r9), %ymm1
5083 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5084 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm1 = ymm1[1,1,2,2,5,5,6,6]
5085 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4,5],ymm2[6],ymm1[7]
5086 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
5087 ; AVX2-FAST-PERLANE-NEXT: vmovaps 16(%rax), %xmm2
5088 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7]
5089 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
5090 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5091 ; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%rdi), %ymm0
5092 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5093 ; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%rsi), %ymm1
5094 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5095 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
5096 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
5097 ; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%rdx), %ymm1
5098 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5099 ; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%rcx), %ymm2
5100 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5101 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm1 = ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[6],ymm2[6],ymm1[7],ymm2[7]
5102 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
5103 ; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%r8), %ymm2
5104 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5105 ; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%r9), %ymm1
5106 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5107 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm1 = ymm1[1,1,2,2,5,5,6,6]
5108 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4,5],ymm2[6],ymm1[7]
5109 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
5110 ; AVX2-FAST-PERLANE-NEXT: vmovaps 48(%rax), %xmm2
5111 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7]
5112 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
5113 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5114 ; AVX2-FAST-PERLANE-NEXT: vmovaps 64(%rdi), %ymm1
5115 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5116 ; AVX2-FAST-PERLANE-NEXT: vmovaps 64(%rsi), %ymm0
5117 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5118 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm0 = ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[6],ymm0[6],ymm1[7],ymm0[7]
5119 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
5120 ; AVX2-FAST-PERLANE-NEXT: vmovaps 64(%rdx), %ymm1
5121 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5122 ; AVX2-FAST-PERLANE-NEXT: vmovaps 64(%rcx), %ymm2
5123 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5124 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm1 = ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[6],ymm2[6],ymm1[7],ymm2[7]
5125 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
5126 ; AVX2-FAST-PERLANE-NEXT: vmovaps 64(%r8), %ymm2
5127 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5128 ; AVX2-FAST-PERLANE-NEXT: vmovaps 64(%r9), %ymm1
5129 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5130 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm1 = ymm1[1,1,2,2,5,5,6,6]
5131 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4,5],ymm2[6],ymm1[7]
5132 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
5133 ; AVX2-FAST-PERLANE-NEXT: vmovaps 80(%rax), %xmm2
5134 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7]
5135 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
5136 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5137 ; AVX2-FAST-PERLANE-NEXT: vmovaps 96(%rdi), %ymm3
5138 ; AVX2-FAST-PERLANE-NEXT: vmovaps 96(%rsi), %ymm2
5139 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm0 = ymm2[1,1,1,1,5,5,5,5]
5140 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm3[1],ymm0[2,3,4],ymm3[5],ymm0[6,7]
5141 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
5142 ; AVX2-FAST-PERLANE-NEXT: vmovaps 96(%rdx), %ymm4
5143 ; AVX2-FAST-PERLANE-NEXT: vmovaps 96(%rcx), %ymm1
5144 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm15 = ymm4[1,1],ymm1[1,1],ymm4[5,5],ymm1[5,5]
5145 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm15[5,6],ymm0[7]
5146 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 112(%r8), %ymm15
5147 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm15[0],ymm0[1,2,3,4,5,6],ymm15[7]
5148 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss 112(%r9), %xmm15
5149 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm15[1],ymm0[2,3,4,5,6,7]
5150 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss 112(%rax), %ymm15
5151 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm15[2],ymm0[3,4,5,6,7]
5152 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5153 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss %xmm12, %xmm0
5154 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss %xmm8, %xmm15
5155 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} xmm0 = xmm15[0],xmm0[0],xmm15[1],xmm0[1]
5156 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} xmm15 = xmm6[0],xmm5[0],xmm6[1],xmm5[1]
5157 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm15 = xmm15[0,1,2,2]
5158 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm15 = ymm15[0,1,2,1]
5159 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm15[0,1],ymm0[2,3],ymm15[4,5,6,7]
5160 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5161 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm14, %xmm11
5162 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5163 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} xmm15 = xmm14[0],xmm10[0],xmm14[1],xmm10[1]
5164 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm15, %ymm0, %ymm15
5165 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 16-byte Folded Reload
5166 ; AVX2-FAST-PERLANE-NEXT: vunpcklpd {{.*#+}} ymm14 = ymm15[0],ymm14[0],ymm15[2],ymm14[2]
5167 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm14[4,5,6],ymm0[7]
5168 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5169 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm0 = xmm6[3,3],xmm5[3,3]
5170 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} xmm5 = xmm8[2],xmm12[2],xmm8[3],xmm12[3]
5171 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
5172 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm5 = xmm5[0,1,2,2]
5173 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,1,2,1]
5174 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm5[0,1,2,3,4],ymm0[5,6],ymm5[7]
5175 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm5 = xmm10[2,2,2,2]
5176 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} xmm5 = xmm11[0,1,2],xmm5[3]
5177 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 8(%rax), %ymm6
5178 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm6[4,5,6,7]
5179 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm5[2,3,4],ymm0[5,6,7]
5180 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5181 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
5182 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss %xmm8, %xmm0
5183 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss %xmm9, %xmm5
5184 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} xmm0 = xmm5[0],xmm0[0],xmm5[1],xmm0[1]
5185 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
5186 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
5187 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} xmm5 = xmm10[0],xmm11[0],xmm10[1],xmm11[1]
5188 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm5 = xmm5[0,1,2,2]
5189 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,1,2,1]
5190 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm5[0,1],ymm0[2,3],ymm5[4,5,6,7]
5191 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
5192 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
5193 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} xmm5 = xmm14[0],xmm12[0],xmm14[1],xmm12[1]
5194 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm5, %ymm0, %ymm5
5195 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 16-byte Folded Reload
5196 ; AVX2-FAST-PERLANE-NEXT: vunpcklpd {{.*#+}} ymm5 = ymm5[0],ymm6[0],ymm5[2],ymm6[2]
5197 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm5[4,5,6],ymm0[7]
5198 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5199 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm0 = xmm10[3,3],xmm11[3,3]
5200 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} xmm5 = xmm9[2],xmm8[2],xmm9[3],xmm8[3]
5201 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
5202 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm5 = xmm5[0,1,2,2]
5203 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,1,2,1]
5204 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm5[0,1,2,3,4],ymm0[5,6],ymm5[7]
5205 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm5 = xmm12[2,2,2,2]
5206 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} xmm5 = xmm14[0,1,2],xmm5[3]
5207 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 40(%rax), %ymm6
5208 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm6[4,5,6,7]
5209 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm5[2,3,4],ymm0[5,6,7]
5210 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5211 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rsp), %xmm12 # 16-byte Reload
5212 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss %xmm12, %xmm0
5213 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss %xmm13, %xmm5
5214 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} xmm0 = xmm5[0],xmm0[0],xmm5[1],xmm0[1]
5215 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
5216 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
5217 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} xmm5 = xmm11[0],xmm10[0],xmm11[1],xmm10[1]
5218 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm5 = xmm5[0,1,2,2]
5219 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,1,2,1]
5220 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm5[0,1],ymm0[2,3],ymm5[4,5,6,7]
5221 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
5222 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
5223 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} xmm5 = xmm9[0],xmm8[0],xmm9[1],xmm8[1]
5224 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm5, %ymm0, %ymm5
5225 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 16-byte Folded Reload
5226 ; AVX2-FAST-PERLANE-NEXT: vunpcklpd {{.*#+}} ymm5 = ymm5[0],ymm6[0],ymm5[2],ymm6[2]
5227 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm5[4,5,6],ymm0[7]
5228 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5229 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm0 = xmm11[3,3],xmm10[3,3]
5230 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} xmm5 = xmm13[2],xmm12[2],xmm13[3],xmm12[3]
5231 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
5232 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm5 = xmm5[0,1,2,2]
5233 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,1,2,1]
5234 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm5[0,1,2,3,4],ymm0[5,6],ymm5[7]
5235 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm5 = xmm8[2,2,2,2]
5236 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} xmm5 = xmm9[0,1,2],xmm5[3]
5237 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 72(%rax), %ymm6
5238 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm6[4,5,6,7]
5239 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm5[2,3,4],ymm0[5,6,7]
5240 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5241 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
5242 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss %xmm12, %xmm0
5243 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
5244 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss %xmm11, %xmm5
5245 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} xmm0 = xmm5[0],xmm0[0],xmm5[1],xmm0[1]
5246 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
5247 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
5248 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} xmm5 = xmm10[0],xmm9[0],xmm10[1],xmm9[1]
5249 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm5 = xmm5[0,1,2,2]
5250 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,1,2,1]
5251 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm5[0,1],ymm0[2,3],ymm5[4,5,6,7]
5252 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
5253 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} xmm5 = xmm8[0],xmm7[0],xmm8[1],xmm7[1]
5254 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm5, %ymm0, %ymm5
5255 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 16-byte Folded Reload
5256 ; AVX2-FAST-PERLANE-NEXT: vunpcklpd {{.*#+}} ymm5 = ymm5[0],ymm6[0],ymm5[2],ymm6[2]
5257 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm5[4,5,6],ymm0[7]
5258 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5259 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm0 = xmm10[3,3],xmm9[3,3]
5260 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} xmm5 = xmm11[2],xmm12[2],xmm11[3],xmm12[3]
5261 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
5262 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm5 = xmm5[0,1,2,2]
5263 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,1,2,1]
5264 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm5[0,1,2,3,4],ymm0[5,6],ymm5[7]
5265 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm5 = xmm7[2,2,2,2]
5266 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} xmm5 = xmm8[0,1,2],xmm5[3]
5267 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 104(%rax), %ymm6
5268 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm6[4,5,6,7]
5269 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm5[2,3,4],ymm0[5,6,7]
5270 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5271 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss 112(%rdx), %ymm0
5272 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm6 = ymm1[3,1,2,0,7,5,6,4]
5273 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm6[0,1,2,3,4,5],ymm0[6],ymm6[7]
5274 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} ymm6 = ymm3[0],ymm2[0],ymm3[1],ymm2[1],ymm3[4],ymm2[4],ymm3[5],ymm2[5]
5275 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm6[4,5],ymm0[6,7]
5276 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss 108(%r8), %ymm6
5277 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm6[1],ymm0[2,3,4,5,6,7]
5278 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm6 = xmm7[2,2,3,3]
5279 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm6[2,3],ymm0[4,5,6,7]
5280 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm6 = ymm3[2],ymm2[2],ymm3[3],ymm2[3],ymm3[6],ymm2[6],ymm3[7],ymm2[7]
5281 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[2,2,2,2]
5282 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm7 = ymm4[2],ymm1[2],ymm4[3],ymm1[3],ymm4[6],ymm1[6],ymm4[7],ymm1[7]
5283 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm7[4,5,6,7]
5284 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3,4,5],mem[6,7]
5285 ; AVX2-FAST-PERLANE-NEXT: vpermilps {{.*#+}} ymm8 = mem[1,2,2,3,5,6,6,7]
5286 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm8 = ymm8[2,2,2,2]
5287 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0],ymm6[1,2,3,4,5,6],ymm8[7]
5288 ; AVX2-FAST-PERLANE-NEXT: vmovaps 96(%rax), %ymm9
5289 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm9[3],ymm0[4,5,6,7]
5290 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5291 ; AVX2-FAST-PERLANE-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm9[2,3],ymm7[2,3]
5292 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm8[0],ymm0[1],ymm8[2,3,4],ymm0[5],ymm8[6,7]
5293 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5294 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm0 = ymm1[2],ymm4[2],ymm1[3],ymm4[3],ymm1[6],ymm4[6],ymm1[7],ymm4[7]
5295 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm1 = ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[6],ymm3[6],ymm2[7],ymm3[7]
5296 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,3,3,3]
5297 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[3,3,3,3]
5298 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
5299 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss 124(%r8), %ymm1
5300 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
5301 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss 124(%r9), %ymm1
5302 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
5303 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 120(%rax), %ymm1
5304 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5,6],ymm1[7]
5305 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5306 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
5307 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm1 = ymm8[1,1,1,1,5,5,5,5]
5308 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
5309 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm6[1],ymm1[2,3,4],ymm6[5],ymm1[6,7]
5310 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,2,2,2]
5311 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
5312 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
5313 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm2 = ymm10[1,1],ymm5[1,1],ymm10[5,5],ymm5[5,5]
5314 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5,6],ymm1[7]
5315 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
5316 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm2 = ymm13[0,0,0,0,4,4,4,4]
5317 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
5318 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm3 = ymm11[0,1,0,1,4,5,4,5]
5319 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0],ymm2[1],ymm3[2,3,4],ymm2[5],ymm3[6,7]
5320 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[2,1,2,3]
5321 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 16(%rax), %ymm3
5322 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm3[2,3],ymm2[4,5,6,7]
5323 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2],ymm1[3,4,5,6],ymm2[7]
5324 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5325 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
5326 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm2 = ymm14[1,1,1,1,5,5,5,5]
5327 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
5328 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0],ymm12[1],ymm2[2,3,4],ymm12[5],ymm2[6,7]
5329 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[2,2,2,2]
5330 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
5331 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5332 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm3 = ymm0[1,1],ymm9[1,1],ymm0[5,5],ymm9[5,5]
5333 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm3[5,6],ymm2[7]
5334 ; AVX2-FAST-PERLANE-NEXT: vpermilps $0, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Folded Reload
5335 ; AVX2-FAST-PERLANE-NEXT: # ymm3 = mem[0,0,0,0,4,4,4,4]
5336 ; AVX2-FAST-PERLANE-NEXT: vpermilps $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Folded Reload
5337 ; AVX2-FAST-PERLANE-NEXT: # ymm4 = mem[0,1,0,1,4,5,4,5]
5338 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0],ymm3[1],ymm4[2,3,4],ymm3[5],ymm4[6,7]
5339 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[2,1,2,3]
5340 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 48(%rax), %ymm4
5341 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm4[2,3],ymm3[4,5,6,7]
5342 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm3[0,1,2],ymm2[3,4,5,6],ymm3[7]
5343 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5344 ; AVX2-FAST-PERLANE-NEXT: vpermilps $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Folded Reload
5345 ; AVX2-FAST-PERLANE-NEXT: # ymm3 = mem[1,1,1,1,5,5,5,5]
5346 ; AVX2-FAST-PERLANE-NEXT: vblendps $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
5347 ; AVX2-FAST-PERLANE-NEXT: # ymm3 = ymm3[0],mem[1],ymm3[2,3,4],mem[5],ymm3[6,7]
5348 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[2,2,2,2]
5349 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5350 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
5351 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm4 = ymm0[1,1],ymm15[1,1],ymm0[5,5],ymm15[5,5]
5352 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3,4],ymm4[5,6],ymm3[7]
5353 ; AVX2-FAST-PERLANE-NEXT: vpermilps $0, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Folded Reload
5354 ; AVX2-FAST-PERLANE-NEXT: # ymm4 = mem[0,0,0,0,4,4,4,4]
5355 ; AVX2-FAST-PERLANE-NEXT: vpermilps $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Folded Reload
5356 ; AVX2-FAST-PERLANE-NEXT: # ymm7 = mem[0,1,0,1,4,5,4,5]
5357 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm4 = ymm7[0],ymm4[1],ymm7[2,3,4],ymm4[5],ymm7[6,7]
5358 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[2,1,2,3]
5359 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 80(%rax), %ymm7
5360 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm7[2,3],ymm4[4,5,6,7]
5361 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm4[0,1,2],ymm3[3,4,5,6],ymm4[7]
5362 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, (%rsp) # 32-byte Spill
5363 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss 16(%rdx), %ymm4
5364 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm7 = ymm5[3,1,2,0,7,5,6,4]
5365 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm4 = ymm7[0,1,2,3,4,5],ymm4[6],ymm7[7]
5366 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm6, %ymm1
5367 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} ymm7 = ymm6[0],ymm8[0],ymm6[1],ymm8[1],ymm6[4],ymm8[4],ymm6[5],ymm8[5]
5368 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm7[4,5],ymm4[6,7]
5369 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
5370 ; AVX2-FAST-PERLANE-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm7 # 16-byte Folded Reload
5371 ; AVX2-FAST-PERLANE-NEXT: # xmm7 = xmm3[3,3],mem[3,3]
5372 ; AVX2-FAST-PERLANE-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7, %xmm7 # 16-byte Folded Reload
5373 ; AVX2-FAST-PERLANE-NEXT: # xmm7 = xmm7[0,1,2],mem[3]
5374 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm6 = ymm4[0],ymm7[1,2,3],ymm4[4,5,6,7]
5375 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm4 = ymm5[2],ymm10[2],ymm5[3],ymm10[3],ymm5[6],ymm10[6],ymm5[7],ymm10[7]
5376 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm7 = ymm8[2],ymm1[2],ymm8[3],ymm1[3],ymm8[6],ymm1[6],ymm8[7],ymm1[7]
5377 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[3,3,3,3]
5378 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm7 = ymm7[3,3,3,3]
5379 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm4 = ymm7[0,1,2],ymm4[3,4,5,6,7]
5380 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm7 = ymm11[3,3],ymm13[3,3],ymm11[7,7],ymm13[7,7]
5381 ; AVX2-FAST-PERLANE-NEXT: vpermilps {{.*#+}} ymm10 = mem[2,3,2,3,6,7,6,7]
5382 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm7 = ymm10[0],ymm7[1,2],ymm10[3,4],ymm7[5,6],ymm10[7]
5383 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm7 = ymm7[2,1,2,3]
5384 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm10 = ymm7[0],ymm4[1,2,3,4],ymm7[5,6,7]
5385 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss 48(%rdx), %ymm4
5386 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm7 = ymm9[3,1,2,0,7,5,6,4]
5387 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm4 = ymm7[0,1,2,3,4,5],ymm4[6],ymm7[7]
5388 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} ymm7 = ymm12[0],ymm14[0],ymm12[1],ymm14[1],ymm12[4],ymm14[4],ymm12[5],ymm14[5]
5389 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm7[4,5],ymm4[6,7]
5390 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
5391 ; AVX2-FAST-PERLANE-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm7 # 16-byte Folded Reload
5392 ; AVX2-FAST-PERLANE-NEXT: # xmm7 = xmm3[3,3],mem[3,3]
5393 ; AVX2-FAST-PERLANE-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7, %xmm7 # 16-byte Folded Reload
5394 ; AVX2-FAST-PERLANE-NEXT: # xmm7 = xmm7[0,1,2],mem[3]
5395 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0],ymm7[1,2,3],ymm4[4,5,6,7]
5396 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm4 # 32-byte Folded Reload
5397 ; AVX2-FAST-PERLANE-NEXT: # ymm4 = ymm9[2],mem[2],ymm9[3],mem[3],ymm9[6],mem[6],ymm9[7],mem[7]
5398 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm7 = ymm14[2],ymm12[2],ymm14[3],ymm12[3],ymm14[6],ymm12[6],ymm14[7],ymm12[7]
5399 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[3,3,3,3]
5400 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm7 = ymm7[3,3,3,3]
5401 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm4 = ymm7[0,1,2],ymm4[3,4,5,6,7]
5402 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5403 ; AVX2-FAST-PERLANE-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm7 # 32-byte Folded Reload
5404 ; AVX2-FAST-PERLANE-NEXT: # ymm7 = ymm0[3,3],mem[3,3],ymm0[7,7],mem[7,7]
5405 ; AVX2-FAST-PERLANE-NEXT: vpermilps {{.*#+}} ymm12 = mem[2,3,2,3,6,7,6,7]
5406 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm7 = ymm12[0],ymm7[1,2],ymm12[3,4],ymm7[5,6],ymm12[7]
5407 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm7 = ymm7[2,1,2,3]
5408 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm4 = ymm7[0],ymm4[1,2,3,4],ymm7[5,6,7]
5409 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss 80(%rdx), %ymm7
5410 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm12 = ymm15[3,1,2,0,7,5,6,4]
5411 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm7 = ymm12[0,1,2,3,4,5],ymm7[6],ymm12[7]
5412 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
5413 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5414 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} ymm12 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
5415 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm12[4,5],ymm7[6,7]
5416 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
5417 ; AVX2-FAST-PERLANE-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5, %xmm12 # 16-byte Folded Reload
5418 ; AVX2-FAST-PERLANE-NEXT: # xmm12 = xmm5[3,3],mem[3,3]
5419 ; AVX2-FAST-PERLANE-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm12, %xmm12 # 16-byte Folded Reload
5420 ; AVX2-FAST-PERLANE-NEXT: # xmm12 = xmm12[0,1,2],mem[3]
5421 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0],ymm12[1,2,3],ymm7[4,5,6,7]
5422 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm12 # 32-byte Folded Reload
5423 ; AVX2-FAST-PERLANE-NEXT: # ymm12 = ymm15[2],mem[2],ymm15[3],mem[3],ymm15[6],mem[6],ymm15[7],mem[7]
5424 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm13 = ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[6],ymm0[6],ymm1[7],ymm0[7]
5425 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm12 = ymm12[3,3,3,3]
5426 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm13 = ymm13[3,3,3,3]
5427 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm12 = ymm13[0,1,2],ymm12[3,4,5,6,7]
5428 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5429 ; AVX2-FAST-PERLANE-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm13 # 32-byte Folded Reload
5430 ; AVX2-FAST-PERLANE-NEXT: # ymm13 = ymm0[3,3],mem[3,3],ymm0[7,7],mem[7,7]
5431 ; AVX2-FAST-PERLANE-NEXT: vpermilps {{.*#+}} ymm15 = mem[2,3,2,3,6,7,6,7]
5432 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm13 = ymm15[0],ymm13[1,2],ymm15[3,4],ymm13[5,6],ymm15[7]
5433 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm13 = ymm13[2,1,2,3]
5434 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm12 = ymm13[0],ymm12[1,2,3,4],ymm13[5,6,7]
5435 ; AVX2-FAST-PERLANE-NEXT: movq {{[0-9]+}}(%rsp), %rax
5436 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm12, 640(%rax)
5437 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm7, 544(%rax)
5438 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm4, 416(%rax)
5439 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm3, 320(%rax)
5440 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm10, 192(%rax)
5441 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm6, 96(%rax)
5442 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
5443 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm4, 608(%rax)
5444 ; AVX2-FAST-PERLANE-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
5445 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 576(%rax)
5446 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
5447 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm3, 384(%rax)
5448 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5449 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 352(%rax)
5450 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
5451 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm2, 160(%rax)
5452 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5453 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 128(%rax)
5454 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5455 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 864(%rax)
5456 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5457 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 832(%rax)
5458 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5459 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 768(%rax)
5460 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5461 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 736(%rax)
5462 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5463 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 704(%rax)
5464 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5465 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 672(%rax)
5466 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5467 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 512(%rax)
5468 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5469 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 480(%rax)
5470 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5471 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 448(%rax)
5472 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5473 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 288(%rax)
5474 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5475 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 256(%rax)
5476 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5477 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 224(%rax)
5478 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5479 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 64(%rax)
5480 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5481 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 32(%rax)
5482 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5483 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, (%rax)
5484 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5485 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 800(%rax)
5486 ; AVX2-FAST-PERLANE-NEXT: addq $1320, %rsp # imm = 0x528
5487 ; AVX2-FAST-PERLANE-NEXT: vzeroupper
5488 ; AVX2-FAST-PERLANE-NEXT: retq
5490 ; AVX512F-LABEL: store_i32_stride7_vf32:
5492 ; AVX512F-NEXT: pushq %rax
5493 ; AVX512F-NEXT: movq {{[0-9]+}}(%rsp), %rax
5494 ; AVX512F-NEXT: vmovdqa64 (%rdi), %zmm14
5495 ; AVX512F-NEXT: vmovdqa64 64(%rdi), %zmm18
5496 ; AVX512F-NEXT: vmovdqa64 (%rsi), %zmm8
5497 ; AVX512F-NEXT: vmovdqa64 64(%rsi), %zmm12
5498 ; AVX512F-NEXT: vmovdqa64 (%rdx), %zmm1
5499 ; AVX512F-NEXT: vmovdqa64 64(%rdx), %zmm23
5500 ; AVX512F-NEXT: vmovdqa64 (%rcx), %zmm9
5501 ; AVX512F-NEXT: vmovdqa64 64(%rcx), %zmm13
5502 ; AVX512F-NEXT: vmovdqa64 (%r8), %zmm4
5503 ; AVX512F-NEXT: vmovdqa64 64(%r8), %zmm22
5504 ; AVX512F-NEXT: vmovdqa64 (%r9), %zmm10
5505 ; AVX512F-NEXT: vmovdqa64 64(%r9), %zmm26
5506 ; AVX512F-NEXT: vmovdqa64 (%rax), %zmm15
5507 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm21 = <u,u,u,u,14,30,u,u,u,u,u,15,31,u,u,u>
5508 ; AVX512F-NEXT: vmovdqa64 %zmm23, %zmm0
5509 ; AVX512F-NEXT: vpermt2d %zmm13, %zmm21, %zmm0
5510 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm6 = <u,u,14,30,u,u,u,u,u,15,31,u,u,u,u,u>
5511 ; AVX512F-NEXT: vmovdqa64 %zmm18, %zmm3
5512 ; AVX512F-NEXT: vpermt2d %zmm12, %zmm6, %zmm3
5513 ; AVX512F-NEXT: movw $6192, %cx # imm = 0x1830
5514 ; AVX512F-NEXT: kmovw %ecx, %k1
5515 ; AVX512F-NEXT: vmovdqa32 %zmm0, %zmm3 {%k1}
5516 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm0 = <u,u,2,3,4,5,30,u,u,9,10,11,12,31,u,u>
5517 ; AVX512F-NEXT: vpermi2d %zmm22, %zmm3, %zmm0
5518 ; AVX512F-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
5519 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm20 = <u,u,u,u,12,28,u,u,u,u,u,13,29,u,u,u>
5520 ; AVX512F-NEXT: vmovdqa64 %zmm18, %zmm0
5521 ; AVX512F-NEXT: vpermt2d %zmm12, %zmm20, %zmm0
5522 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm11 = <11,u,u,u,u,u,28,12,u,u,u,u,u,29,13,u>
5523 ; AVX512F-NEXT: vmovdqa64 %zmm13, %zmm3
5524 ; AVX512F-NEXT: vpermt2d %zmm23, %zmm11, %zmm3
5525 ; AVX512F-NEXT: vmovdqa32 %zmm0, %zmm3 {%k1}
5526 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm0 = <0,27,u,u,4,5,6,7,28,u,u,11,12,13,14,29>
5527 ; AVX512F-NEXT: vpermi2d %zmm22, %zmm3, %zmm0
5528 ; AVX512F-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
5529 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm28 = <u,u,0,16,u,u,u,u,u,1,17,u,u,u,u,u>
5530 ; AVX512F-NEXT: vmovdqa64 %zmm1, %zmm0
5531 ; AVX512F-NEXT: vpermt2d %zmm9, %zmm28, %zmm0
5532 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm25 = <0,16,u,u,u,u,u,1,17,u,u,u,u,u,2,18>
5533 ; AVX512F-NEXT: vmovdqa64 %zmm14, %zmm7
5534 ; AVX512F-NEXT: vpermt2d %zmm8, %zmm25, %zmm7
5535 ; AVX512F-NEXT: movw $1548, %cx # imm = 0x60C
5536 ; AVX512F-NEXT: kmovw %ecx, %k2
5537 ; AVX512F-NEXT: vmovdqa32 %zmm0, %zmm7 {%k2}
5538 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm0 = <u,u,u,u,0,16,u,u,u,u,u,1,17,u,u,u>
5539 ; AVX512F-NEXT: vmovdqa64 %zmm4, %zmm5
5540 ; AVX512F-NEXT: vpermt2d %zmm10, %zmm0, %zmm5
5541 ; AVX512F-NEXT: vmovdqa64 %zmm23, %zmm30
5542 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm17 = <9,u,u,u,u,u,26,10,u,u,u,u,u,27,11,u>
5543 ; AVX512F-NEXT: vmovdqa64 %zmm12, %zmm16
5544 ; AVX512F-NEXT: vpermt2d %zmm18, %zmm17, %zmm16
5545 ; AVX512F-NEXT: vmovdqa64 %zmm23, %zmm27
5546 ; AVX512F-NEXT: vmovdqa64 %zmm18, %zmm3
5547 ; AVX512F-NEXT: vmovdqa64 %zmm23, %zmm31
5548 ; AVX512F-NEXT: vmovdqa64 %zmm18, %zmm24
5549 ; AVX512F-NEXT: vmovdqa64 %zmm23, %zmm19
5550 ; AVX512F-NEXT: vpermt2d %zmm13, %zmm28, %zmm23
5551 ; AVX512F-NEXT: vmovdqa64 %zmm18, %zmm29
5552 ; AVX512F-NEXT: vpermt2d %zmm12, %zmm25, %zmm18
5553 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm2 = <u,u,u,u,4,5,16,u,u,u,u,11,12,17,u,u>
5554 ; AVX512F-NEXT: vpermt2d %zmm15, %zmm2, %zmm5
5555 ; AVX512F-NEXT: movw $14448, %cx # imm = 0x3870
5556 ; AVX512F-NEXT: vmovdqa32 %zmm23, %zmm18 {%k2}
5557 ; AVX512F-NEXT: kmovw %ecx, %k2
5558 ; AVX512F-NEXT: vmovdqa32 %zmm5, %zmm7 {%k2}
5559 ; AVX512F-NEXT: vmovdqa64 %zmm22, %zmm5
5560 ; AVX512F-NEXT: vmovdqa64 %zmm22, %zmm0
5561 ; AVX512F-NEXT: vmovdqa64 %zmm22, %zmm23
5562 ; AVX512F-NEXT: vmovdqa64 %zmm22, %zmm28
5563 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm25 = <u,u,u,u,0,16,u,u,u,u,u,1,17,u,u,u>
5564 ; AVX512F-NEXT: vpermt2d %zmm26, %zmm25, %zmm22
5565 ; AVX512F-NEXT: vmovdqa64 64(%rax), %zmm25
5566 ; AVX512F-NEXT: vpermt2d %zmm25, %zmm2, %zmm22
5567 ; AVX512F-NEXT: vmovdqa32 %zmm22, %zmm18 {%k2}
5568 ; AVX512F-NEXT: vpermi2d %zmm9, %zmm1, %zmm21
5569 ; AVX512F-NEXT: vpermi2d %zmm8, %zmm14, %zmm6
5570 ; AVX512F-NEXT: vmovdqa32 %zmm21, %zmm6 {%k1}
5571 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm2 = <13,u,u,u,u,u,30,14,u,u,u,u,u,31,15,u>
5572 ; AVX512F-NEXT: vpermi2d %zmm4, %zmm10, %zmm2
5573 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm21 = <0,29,u,u,u,u,6,7,30,u,u,u,u,13,14,31>
5574 ; AVX512F-NEXT: vpermi2d %zmm15, %zmm2, %zmm21
5575 ; AVX512F-NEXT: movw $-7741, %ax # imm = 0xE1C3
5576 ; AVX512F-NEXT: kmovw %eax, %k2
5577 ; AVX512F-NEXT: vmovdqa32 %zmm21, %zmm6 {%k2}
5578 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm2 = <u,9,25,u,u,u,u,u,10,26,u,u,u,u,u,11>
5579 ; AVX512F-NEXT: vpermt2d %zmm13, %zmm2, %zmm30
5580 ; AVX512F-NEXT: movw $-31994, %ax # imm = 0x8306
5581 ; AVX512F-NEXT: kmovw %eax, %k2
5582 ; AVX512F-NEXT: vmovdqa32 %zmm30, %zmm16 {%k2}
5583 ; AVX512F-NEXT: vpermi2d %zmm8, %zmm14, %zmm20
5584 ; AVX512F-NEXT: vpermi2d %zmm1, %zmm9, %zmm11
5585 ; AVX512F-NEXT: vmovdqa32 %zmm20, %zmm11 {%k1}
5586 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm20 = <u,11,27,u,u,u,u,u,12,28,u,u,u,u,u,13>
5587 ; AVX512F-NEXT: vpermi2d %zmm10, %zmm4, %zmm20
5588 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm21 = <u,1,2,27,u,u,u,u,8,9,28,u,u,u,u,15>
5589 ; AVX512F-NEXT: vpermi2d %zmm15, %zmm20, %zmm21
5590 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm20 = <u,u,u,9,25,u,u,u,u,u,10,26,u,u,u,u>
5591 ; AVX512F-NEXT: vpermt2d %zmm26, %zmm20, %zmm5
5592 ; AVX512F-NEXT: movw $-30962, %ax # imm = 0x870E
5593 ; AVX512F-NEXT: kmovw %eax, %k1
5594 ; AVX512F-NEXT: vmovdqa32 %zmm21, %zmm11 {%k1}
5595 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm21 = <u,u,u,3,4,25,u,u,u,u,10,11,26,u,u,u>
5596 ; AVX512F-NEXT: vpermt2d %zmm25, %zmm21, %zmm5
5597 ; AVX512F-NEXT: movw $7224, %ax # imm = 0x1C38
5598 ; AVX512F-NEXT: kmovw %eax, %k1
5599 ; AVX512F-NEXT: vmovdqa32 %zmm5, %zmm16 {%k1}
5600 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm22 = <u,u,u,7,23,u,u,u,u,u,8,24,u,u,u,u>
5601 ; AVX512F-NEXT: vpermt2d %zmm13, %zmm22, %zmm27
5602 ; AVX512F-NEXT: vpermi2d %zmm9, %zmm1, %zmm2
5603 ; AVX512F-NEXT: vpermi2d %zmm14, %zmm8, %zmm17
5604 ; AVX512F-NEXT: vmovdqa32 %zmm2, %zmm17 {%k2}
5605 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm5 = <u,7,23,u,u,u,u,u,8,24,u,u,u,u,u,9>
5606 ; AVX512F-NEXT: vpermt2d %zmm12, %zmm5, %zmm3
5607 ; AVX512F-NEXT: movw $3096, %ax # imm = 0xC18
5608 ; AVX512F-NEXT: kmovw %eax, %k2
5609 ; AVX512F-NEXT: vmovdqa32 %zmm27, %zmm3 {%k2}
5610 ; AVX512F-NEXT: vpermi2d %zmm10, %zmm4, %zmm20
5611 ; AVX512F-NEXT: vpermt2d %zmm15, %zmm21, %zmm20
5612 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm2 = <u,u,u,u,u,7,23,u,u,u,u,u,8,24,u,u>
5613 ; AVX512F-NEXT: vpermt2d %zmm26, %zmm2, %zmm0
5614 ; AVX512F-NEXT: vmovdqa32 %zmm20, %zmm17 {%k1}
5615 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm20 = <22,u,u,u,u,5,6,23,u,u,u,u,12,13,24,u>
5616 ; AVX512F-NEXT: vpermt2d %zmm25, %zmm20, %zmm0
5617 ; AVX512F-NEXT: movw $28897, %ax # imm = 0x70E1
5618 ; AVX512F-NEXT: kmovw %eax, %k3
5619 ; AVX512F-NEXT: vmovdqa32 %zmm0, %zmm3 {%k3}
5620 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm0 = <u,u,u,u,u,5,21,u,u,u,u,u,6,22,u,u>
5621 ; AVX512F-NEXT: vpermt2d %zmm13, %zmm0, %zmm31
5622 ; AVX512F-NEXT: vpermi2d %zmm9, %zmm1, %zmm22
5623 ; AVX512F-NEXT: vpermi2d %zmm8, %zmm14, %zmm5
5624 ; AVX512F-NEXT: vmovdqa32 %zmm22, %zmm5 {%k2}
5625 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm21 = <u,u,u,5,21,u,u,u,u,u,6,22,u,u,u,u>
5626 ; AVX512F-NEXT: vpermt2d %zmm12, %zmm21, %zmm24
5627 ; AVX512F-NEXT: movw $12384, %ax # imm = 0x3060
5628 ; AVX512F-NEXT: kmovw %eax, %k1
5629 ; AVX512F-NEXT: vmovdqa32 %zmm31, %zmm24 {%k1}
5630 ; AVX512F-NEXT: vpermi2d %zmm10, %zmm4, %zmm2
5631 ; AVX512F-NEXT: vpermt2d %zmm15, %zmm20, %zmm2
5632 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm20 = <4,20,u,u,u,u,u,5,21,u,u,u,u,u,6,22>
5633 ; AVX512F-NEXT: vpermt2d %zmm26, %zmm20, %zmm23
5634 ; AVX512F-NEXT: vmovdqa32 %zmm2, %zmm5 {%k3}
5635 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm2 = <0,1,20,u,u,u,u,7,8,21,u,u,u,u,14,15>
5636 ; AVX512F-NEXT: vpermt2d %zmm25, %zmm2, %zmm23
5637 ; AVX512F-NEXT: movw $15480, %ax # imm = 0x3C78
5638 ; AVX512F-NEXT: kmovw %eax, %k2
5639 ; AVX512F-NEXT: vmovdqa32 %zmm24, %zmm23 {%k2}
5640 ; AVX512F-NEXT: vpermi2d %zmm9, %zmm1, %zmm0
5641 ; AVX512F-NEXT: vpermi2d %zmm8, %zmm14, %zmm21
5642 ; AVX512F-NEXT: vmovdqa32 %zmm0, %zmm21 {%k1}
5643 ; AVX512F-NEXT: vpermi2d %zmm10, %zmm4, %zmm20
5644 ; AVX512F-NEXT: vpermt2d %zmm15, %zmm2, %zmm20
5645 ; AVX512F-NEXT: vmovdqa32 %zmm21, %zmm20 {%k2}
5646 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm0 = <u,u,u,u,u,3,19,u,u,u,u,u,4,20,u,u>
5647 ; AVX512F-NEXT: vpermt2d %zmm12, %zmm0, %zmm29
5648 ; AVX512F-NEXT: vpermt2d %zmm8, %zmm0, %zmm14
5649 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm0 = <2,18,u,u,u,u,u,3,19,u,u,u,u,u,4,20>
5650 ; AVX512F-NEXT: vpermt2d %zmm13, %zmm0, %zmm19
5651 ; AVX512F-NEXT: vpermt2d %zmm9, %zmm0, %zmm1
5652 ; AVX512F-NEXT: vmovdqa32 %zmm29, %zmm19 {%k1}
5653 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm0 = <u,u,2,18,u,u,u,u,u,3,19,u,u,u,u,u>
5654 ; AVX512F-NEXT: vpermt2d %zmm10, %zmm0, %zmm4
5655 ; AVX512F-NEXT: vpermt2d %zmm26, %zmm0, %zmm28
5656 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm0 = <u,u,2,3,18,u,u,u,u,9,10,19,u,u,u,u>
5657 ; AVX512F-NEXT: vpermt2d %zmm15, %zmm0, %zmm4
5658 ; AVX512F-NEXT: vpermt2d %zmm25, %zmm0, %zmm28
5659 ; AVX512F-NEXT: vmovdqa32 %zmm14, %zmm1 {%k1}
5660 ; AVX512F-NEXT: movw $3612, %ax # imm = 0xE1C
5661 ; AVX512F-NEXT: kmovw %eax, %k1
5662 ; AVX512F-NEXT: vmovdqa32 %zmm28, %zmm19 {%k1}
5663 ; AVX512F-NEXT: vmovdqa32 %zmm4, %zmm1 {%k1}
5664 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm0 = <29,u,2,3,4,5,6,30,u,9,10,11,12,13,31,u>
5665 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
5666 ; AVX512F-NEXT: vpermi2d %zmm26, %zmm2, %zmm0
5667 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm2 = <0,1,27,u,4,5,6,7,8,28,u,11,12,13,14,15>
5668 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
5669 ; AVX512F-NEXT: vpermi2d %zmm26, %zmm4, %zmm2
5670 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm4 = [0,29,2,3,4,5,6,7,30,9,10,11,12,13,14,31]
5671 ; AVX512F-NEXT: vpermi2d %zmm25, %zmm0, %zmm4
5672 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm0 = [0,1,2,27,4,5,6,7,8,9,28,11,12,13,14,15]
5673 ; AVX512F-NEXT: vpermi2d %zmm25, %zmm2, %zmm0
5674 ; AVX512F-NEXT: movq {{[0-9]+}}(%rsp), %rax
5675 ; AVX512F-NEXT: vmovdqa64 %zmm1, 64(%rax)
5676 ; AVX512F-NEXT: vmovdqa64 %zmm20, 128(%rax)
5677 ; AVX512F-NEXT: vmovdqa64 %zmm5, 192(%rax)
5678 ; AVX512F-NEXT: vmovdqa64 %zmm17, 256(%rax)
5679 ; AVX512F-NEXT: vmovdqa64 %zmm11, 320(%rax)
5680 ; AVX512F-NEXT: vmovdqa64 %zmm6, 384(%rax)
5681 ; AVX512F-NEXT: vmovdqa64 %zmm18, 448(%rax)
5682 ; AVX512F-NEXT: vmovdqa64 %zmm19, 512(%rax)
5683 ; AVX512F-NEXT: vmovdqa64 %zmm23, 576(%rax)
5684 ; AVX512F-NEXT: vmovdqa64 %zmm3, 640(%rax)
5685 ; AVX512F-NEXT: vmovdqa64 %zmm16, 704(%rax)
5686 ; AVX512F-NEXT: vmovdqa64 %zmm7, (%rax)
5687 ; AVX512F-NEXT: vmovdqa64 %zmm0, 768(%rax)
5688 ; AVX512F-NEXT: vmovdqa64 %zmm4, 832(%rax)
5689 ; AVX512F-NEXT: popq %rax
5690 ; AVX512F-NEXT: vzeroupper
5691 ; AVX512F-NEXT: retq
5693 ; AVX512BW-LABEL: store_i32_stride7_vf32:
5694 ; AVX512BW: # %bb.0:
5695 ; AVX512BW-NEXT: pushq %rax
5696 ; AVX512BW-NEXT: movq {{[0-9]+}}(%rsp), %rax
5697 ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm14
5698 ; AVX512BW-NEXT: vmovdqa64 64(%rdi), %zmm18
5699 ; AVX512BW-NEXT: vmovdqa64 (%rsi), %zmm8
5700 ; AVX512BW-NEXT: vmovdqa64 64(%rsi), %zmm12
5701 ; AVX512BW-NEXT: vmovdqa64 (%rdx), %zmm1
5702 ; AVX512BW-NEXT: vmovdqa64 64(%rdx), %zmm23
5703 ; AVX512BW-NEXT: vmovdqa64 (%rcx), %zmm9
5704 ; AVX512BW-NEXT: vmovdqa64 64(%rcx), %zmm13
5705 ; AVX512BW-NEXT: vmovdqa64 (%r8), %zmm4
5706 ; AVX512BW-NEXT: vmovdqa64 64(%r8), %zmm22
5707 ; AVX512BW-NEXT: vmovdqa64 (%r9), %zmm10
5708 ; AVX512BW-NEXT: vmovdqa64 64(%r9), %zmm26
5709 ; AVX512BW-NEXT: vmovdqa64 (%rax), %zmm15
5710 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm21 = <u,u,u,u,14,30,u,u,u,u,u,15,31,u,u,u>
5711 ; AVX512BW-NEXT: vmovdqa64 %zmm23, %zmm0
5712 ; AVX512BW-NEXT: vpermt2d %zmm13, %zmm21, %zmm0
5713 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm6 = <u,u,14,30,u,u,u,u,u,15,31,u,u,u,u,u>
5714 ; AVX512BW-NEXT: vmovdqa64 %zmm18, %zmm3
5715 ; AVX512BW-NEXT: vpermt2d %zmm12, %zmm6, %zmm3
5716 ; AVX512BW-NEXT: movw $6192, %cx # imm = 0x1830
5717 ; AVX512BW-NEXT: kmovd %ecx, %k1
5718 ; AVX512BW-NEXT: vmovdqa32 %zmm0, %zmm3 {%k1}
5719 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm0 = <u,u,2,3,4,5,30,u,u,9,10,11,12,31,u,u>
5720 ; AVX512BW-NEXT: vpermi2d %zmm22, %zmm3, %zmm0
5721 ; AVX512BW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
5722 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm20 = <u,u,u,u,12,28,u,u,u,u,u,13,29,u,u,u>
5723 ; AVX512BW-NEXT: vmovdqa64 %zmm18, %zmm0
5724 ; AVX512BW-NEXT: vpermt2d %zmm12, %zmm20, %zmm0
5725 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm11 = <11,u,u,u,u,u,28,12,u,u,u,u,u,29,13,u>
5726 ; AVX512BW-NEXT: vmovdqa64 %zmm13, %zmm3
5727 ; AVX512BW-NEXT: vpermt2d %zmm23, %zmm11, %zmm3
5728 ; AVX512BW-NEXT: vmovdqa32 %zmm0, %zmm3 {%k1}
5729 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm0 = <0,27,u,u,4,5,6,7,28,u,u,11,12,13,14,29>
5730 ; AVX512BW-NEXT: vpermi2d %zmm22, %zmm3, %zmm0
5731 ; AVX512BW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
5732 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm28 = <u,u,0,16,u,u,u,u,u,1,17,u,u,u,u,u>
5733 ; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm0
5734 ; AVX512BW-NEXT: vpermt2d %zmm9, %zmm28, %zmm0
5735 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm25 = <0,16,u,u,u,u,u,1,17,u,u,u,u,u,2,18>
5736 ; AVX512BW-NEXT: vmovdqa64 %zmm14, %zmm7
5737 ; AVX512BW-NEXT: vpermt2d %zmm8, %zmm25, %zmm7
5738 ; AVX512BW-NEXT: movw $1548, %cx # imm = 0x60C
5739 ; AVX512BW-NEXT: kmovd %ecx, %k2
5740 ; AVX512BW-NEXT: vmovdqa32 %zmm0, %zmm7 {%k2}
5741 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm0 = <u,u,u,u,0,16,u,u,u,u,u,1,17,u,u,u>
5742 ; AVX512BW-NEXT: vmovdqa64 %zmm4, %zmm5
5743 ; AVX512BW-NEXT: vpermt2d %zmm10, %zmm0, %zmm5
5744 ; AVX512BW-NEXT: vmovdqa64 %zmm23, %zmm30
5745 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm17 = <9,u,u,u,u,u,26,10,u,u,u,u,u,27,11,u>
5746 ; AVX512BW-NEXT: vmovdqa64 %zmm12, %zmm16
5747 ; AVX512BW-NEXT: vpermt2d %zmm18, %zmm17, %zmm16
5748 ; AVX512BW-NEXT: vmovdqa64 %zmm23, %zmm27
5749 ; AVX512BW-NEXT: vmovdqa64 %zmm18, %zmm3
5750 ; AVX512BW-NEXT: vmovdqa64 %zmm23, %zmm31
5751 ; AVX512BW-NEXT: vmovdqa64 %zmm18, %zmm24
5752 ; AVX512BW-NEXT: vmovdqa64 %zmm23, %zmm19
5753 ; AVX512BW-NEXT: vpermt2d %zmm13, %zmm28, %zmm23
5754 ; AVX512BW-NEXT: vmovdqa64 %zmm18, %zmm29
5755 ; AVX512BW-NEXT: vpermt2d %zmm12, %zmm25, %zmm18
5756 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm2 = <u,u,u,u,4,5,16,u,u,u,u,11,12,17,u,u>
5757 ; AVX512BW-NEXT: vpermt2d %zmm15, %zmm2, %zmm5
5758 ; AVX512BW-NEXT: movw $14448, %cx # imm = 0x3870
5759 ; AVX512BW-NEXT: vmovdqa32 %zmm23, %zmm18 {%k2}
5760 ; AVX512BW-NEXT: kmovd %ecx, %k2
5761 ; AVX512BW-NEXT: vmovdqa32 %zmm5, %zmm7 {%k2}
5762 ; AVX512BW-NEXT: vmovdqa64 %zmm22, %zmm5
5763 ; AVX512BW-NEXT: vmovdqa64 %zmm22, %zmm0
5764 ; AVX512BW-NEXT: vmovdqa64 %zmm22, %zmm23
5765 ; AVX512BW-NEXT: vmovdqa64 %zmm22, %zmm28
5766 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm25 = <u,u,u,u,0,16,u,u,u,u,u,1,17,u,u,u>
5767 ; AVX512BW-NEXT: vpermt2d %zmm26, %zmm25, %zmm22
5768 ; AVX512BW-NEXT: vmovdqa64 64(%rax), %zmm25
5769 ; AVX512BW-NEXT: vpermt2d %zmm25, %zmm2, %zmm22
5770 ; AVX512BW-NEXT: vmovdqa32 %zmm22, %zmm18 {%k2}
5771 ; AVX512BW-NEXT: vpermi2d %zmm9, %zmm1, %zmm21
5772 ; AVX512BW-NEXT: vpermi2d %zmm8, %zmm14, %zmm6
5773 ; AVX512BW-NEXT: vmovdqa32 %zmm21, %zmm6 {%k1}
5774 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm2 = <13,u,u,u,u,u,30,14,u,u,u,u,u,31,15,u>
5775 ; AVX512BW-NEXT: vpermi2d %zmm4, %zmm10, %zmm2
5776 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm21 = <0,29,u,u,u,u,6,7,30,u,u,u,u,13,14,31>
5777 ; AVX512BW-NEXT: vpermi2d %zmm15, %zmm2, %zmm21
5778 ; AVX512BW-NEXT: movw $-7741, %ax # imm = 0xE1C3
5779 ; AVX512BW-NEXT: kmovd %eax, %k2
5780 ; AVX512BW-NEXT: vmovdqa32 %zmm21, %zmm6 {%k2}
5781 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm2 = <u,9,25,u,u,u,u,u,10,26,u,u,u,u,u,11>
5782 ; AVX512BW-NEXT: vpermt2d %zmm13, %zmm2, %zmm30
5783 ; AVX512BW-NEXT: movw $-31994, %ax # imm = 0x8306
5784 ; AVX512BW-NEXT: kmovd %eax, %k2
5785 ; AVX512BW-NEXT: vmovdqa32 %zmm30, %zmm16 {%k2}
5786 ; AVX512BW-NEXT: vpermi2d %zmm8, %zmm14, %zmm20
5787 ; AVX512BW-NEXT: vpermi2d %zmm1, %zmm9, %zmm11
5788 ; AVX512BW-NEXT: vmovdqa32 %zmm20, %zmm11 {%k1}
5789 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm20 = <u,11,27,u,u,u,u,u,12,28,u,u,u,u,u,13>
5790 ; AVX512BW-NEXT: vpermi2d %zmm10, %zmm4, %zmm20
5791 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm21 = <u,1,2,27,u,u,u,u,8,9,28,u,u,u,u,15>
5792 ; AVX512BW-NEXT: vpermi2d %zmm15, %zmm20, %zmm21
5793 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm20 = <u,u,u,9,25,u,u,u,u,u,10,26,u,u,u,u>
5794 ; AVX512BW-NEXT: vpermt2d %zmm26, %zmm20, %zmm5
5795 ; AVX512BW-NEXT: movw $-30962, %ax # imm = 0x870E
5796 ; AVX512BW-NEXT: kmovd %eax, %k1
5797 ; AVX512BW-NEXT: vmovdqa32 %zmm21, %zmm11 {%k1}
5798 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm21 = <u,u,u,3,4,25,u,u,u,u,10,11,26,u,u,u>
5799 ; AVX512BW-NEXT: vpermt2d %zmm25, %zmm21, %zmm5
5800 ; AVX512BW-NEXT: movw $7224, %ax # imm = 0x1C38
5801 ; AVX512BW-NEXT: kmovd %eax, %k1
5802 ; AVX512BW-NEXT: vmovdqa32 %zmm5, %zmm16 {%k1}
5803 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm22 = <u,u,u,7,23,u,u,u,u,u,8,24,u,u,u,u>
5804 ; AVX512BW-NEXT: vpermt2d %zmm13, %zmm22, %zmm27
5805 ; AVX512BW-NEXT: vpermi2d %zmm9, %zmm1, %zmm2
5806 ; AVX512BW-NEXT: vpermi2d %zmm14, %zmm8, %zmm17
5807 ; AVX512BW-NEXT: vmovdqa32 %zmm2, %zmm17 {%k2}
5808 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm5 = <u,7,23,u,u,u,u,u,8,24,u,u,u,u,u,9>
5809 ; AVX512BW-NEXT: vpermt2d %zmm12, %zmm5, %zmm3
5810 ; AVX512BW-NEXT: movw $3096, %ax # imm = 0xC18
5811 ; AVX512BW-NEXT: kmovd %eax, %k2
5812 ; AVX512BW-NEXT: vmovdqa32 %zmm27, %zmm3 {%k2}
5813 ; AVX512BW-NEXT: vpermi2d %zmm10, %zmm4, %zmm20
5814 ; AVX512BW-NEXT: vpermt2d %zmm15, %zmm21, %zmm20
5815 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm2 = <u,u,u,u,u,7,23,u,u,u,u,u,8,24,u,u>
5816 ; AVX512BW-NEXT: vpermt2d %zmm26, %zmm2, %zmm0
5817 ; AVX512BW-NEXT: vmovdqa32 %zmm20, %zmm17 {%k1}
5818 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm20 = <22,u,u,u,u,5,6,23,u,u,u,u,12,13,24,u>
5819 ; AVX512BW-NEXT: vpermt2d %zmm25, %zmm20, %zmm0
5820 ; AVX512BW-NEXT: movw $28897, %ax # imm = 0x70E1
5821 ; AVX512BW-NEXT: kmovd %eax, %k3
5822 ; AVX512BW-NEXT: vmovdqa32 %zmm0, %zmm3 {%k3}
5823 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm0 = <u,u,u,u,u,5,21,u,u,u,u,u,6,22,u,u>
5824 ; AVX512BW-NEXT: vpermt2d %zmm13, %zmm0, %zmm31
5825 ; AVX512BW-NEXT: vpermi2d %zmm9, %zmm1, %zmm22
5826 ; AVX512BW-NEXT: vpermi2d %zmm8, %zmm14, %zmm5
5827 ; AVX512BW-NEXT: vmovdqa32 %zmm22, %zmm5 {%k2}
5828 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm21 = <u,u,u,5,21,u,u,u,u,u,6,22,u,u,u,u>
5829 ; AVX512BW-NEXT: vpermt2d %zmm12, %zmm21, %zmm24
5830 ; AVX512BW-NEXT: movw $12384, %ax # imm = 0x3060
5831 ; AVX512BW-NEXT: kmovd %eax, %k1
5832 ; AVX512BW-NEXT: vmovdqa32 %zmm31, %zmm24 {%k1}
5833 ; AVX512BW-NEXT: vpermi2d %zmm10, %zmm4, %zmm2
5834 ; AVX512BW-NEXT: vpermt2d %zmm15, %zmm20, %zmm2
5835 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm20 = <4,20,u,u,u,u,u,5,21,u,u,u,u,u,6,22>
5836 ; AVX512BW-NEXT: vpermt2d %zmm26, %zmm20, %zmm23
5837 ; AVX512BW-NEXT: vmovdqa32 %zmm2, %zmm5 {%k3}
5838 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm2 = <0,1,20,u,u,u,u,7,8,21,u,u,u,u,14,15>
5839 ; AVX512BW-NEXT: vpermt2d %zmm25, %zmm2, %zmm23
5840 ; AVX512BW-NEXT: movw $15480, %ax # imm = 0x3C78
5841 ; AVX512BW-NEXT: kmovd %eax, %k2
5842 ; AVX512BW-NEXT: vmovdqa32 %zmm24, %zmm23 {%k2}
5843 ; AVX512BW-NEXT: vpermi2d %zmm9, %zmm1, %zmm0
5844 ; AVX512BW-NEXT: vpermi2d %zmm8, %zmm14, %zmm21
5845 ; AVX512BW-NEXT: vmovdqa32 %zmm0, %zmm21 {%k1}
5846 ; AVX512BW-NEXT: vpermi2d %zmm10, %zmm4, %zmm20
5847 ; AVX512BW-NEXT: vpermt2d %zmm15, %zmm2, %zmm20
5848 ; AVX512BW-NEXT: vmovdqa32 %zmm21, %zmm20 {%k2}
5849 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm0 = <u,u,u,u,u,3,19,u,u,u,u,u,4,20,u,u>
5850 ; AVX512BW-NEXT: vpermt2d %zmm12, %zmm0, %zmm29
5851 ; AVX512BW-NEXT: vpermt2d %zmm8, %zmm0, %zmm14
5852 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm0 = <2,18,u,u,u,u,u,3,19,u,u,u,u,u,4,20>
5853 ; AVX512BW-NEXT: vpermt2d %zmm13, %zmm0, %zmm19
5854 ; AVX512BW-NEXT: vpermt2d %zmm9, %zmm0, %zmm1
5855 ; AVX512BW-NEXT: vmovdqa32 %zmm29, %zmm19 {%k1}
5856 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm0 = <u,u,2,18,u,u,u,u,u,3,19,u,u,u,u,u>
5857 ; AVX512BW-NEXT: vpermt2d %zmm10, %zmm0, %zmm4
5858 ; AVX512BW-NEXT: vpermt2d %zmm26, %zmm0, %zmm28
5859 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm0 = <u,u,2,3,18,u,u,u,u,9,10,19,u,u,u,u>
5860 ; AVX512BW-NEXT: vpermt2d %zmm15, %zmm0, %zmm4
5861 ; AVX512BW-NEXT: vpermt2d %zmm25, %zmm0, %zmm28
5862 ; AVX512BW-NEXT: vmovdqa32 %zmm14, %zmm1 {%k1}
5863 ; AVX512BW-NEXT: movw $3612, %ax # imm = 0xE1C
5864 ; AVX512BW-NEXT: kmovd %eax, %k1
5865 ; AVX512BW-NEXT: vmovdqa32 %zmm28, %zmm19 {%k1}
5866 ; AVX512BW-NEXT: vmovdqa32 %zmm4, %zmm1 {%k1}
5867 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm0 = <29,u,2,3,4,5,6,30,u,9,10,11,12,13,31,u>
5868 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
5869 ; AVX512BW-NEXT: vpermi2d %zmm26, %zmm2, %zmm0
5870 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm2 = <0,1,27,u,4,5,6,7,8,28,u,11,12,13,14,15>
5871 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
5872 ; AVX512BW-NEXT: vpermi2d %zmm26, %zmm4, %zmm2
5873 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm4 = [0,29,2,3,4,5,6,7,30,9,10,11,12,13,14,31]
5874 ; AVX512BW-NEXT: vpermi2d %zmm25, %zmm0, %zmm4
5875 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm0 = [0,1,2,27,4,5,6,7,8,9,28,11,12,13,14,15]
5876 ; AVX512BW-NEXT: vpermi2d %zmm25, %zmm2, %zmm0
5877 ; AVX512BW-NEXT: movq {{[0-9]+}}(%rsp), %rax
5878 ; AVX512BW-NEXT: vmovdqa64 %zmm1, 64(%rax)
5879 ; AVX512BW-NEXT: vmovdqa64 %zmm20, 128(%rax)
5880 ; AVX512BW-NEXT: vmovdqa64 %zmm5, 192(%rax)
5881 ; AVX512BW-NEXT: vmovdqa64 %zmm17, 256(%rax)
5882 ; AVX512BW-NEXT: vmovdqa64 %zmm11, 320(%rax)
5883 ; AVX512BW-NEXT: vmovdqa64 %zmm6, 384(%rax)
5884 ; AVX512BW-NEXT: vmovdqa64 %zmm18, 448(%rax)
5885 ; AVX512BW-NEXT: vmovdqa64 %zmm19, 512(%rax)
5886 ; AVX512BW-NEXT: vmovdqa64 %zmm23, 576(%rax)
5887 ; AVX512BW-NEXT: vmovdqa64 %zmm3, 640(%rax)
5888 ; AVX512BW-NEXT: vmovdqa64 %zmm16, 704(%rax)
5889 ; AVX512BW-NEXT: vmovdqa64 %zmm7, (%rax)
5890 ; AVX512BW-NEXT: vmovdqa64 %zmm0, 768(%rax)
5891 ; AVX512BW-NEXT: vmovdqa64 %zmm4, 832(%rax)
5892 ; AVX512BW-NEXT: popq %rax
5893 ; AVX512BW-NEXT: vzeroupper
5894 ; AVX512BW-NEXT: retq
5895 %in.vec0 = load <32 x i32>, ptr %in.vecptr0, align 64
5896 %in.vec1 = load <32 x i32>, ptr %in.vecptr1, align 64
5897 %in.vec2 = load <32 x i32>, ptr %in.vecptr2, align 64
5898 %in.vec3 = load <32 x i32>, ptr %in.vecptr3, align 64
5899 %in.vec4 = load <32 x i32>, ptr %in.vecptr4, align 64
5900 %in.vec5 = load <32 x i32>, ptr %in.vecptr5, align 64
5901 %in.vec6 = load <32 x i32>, ptr %in.vecptr6, align 64
5902 %1 = shufflevector <32 x i32> %in.vec0, <32 x i32> %in.vec1, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
5903 %2 = shufflevector <32 x i32> %in.vec2, <32 x i32> %in.vec3, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
5904 %3 = shufflevector <32 x i32> %in.vec4, <32 x i32> %in.vec5, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
5905 %4 = shufflevector <64 x i32> %1, <64 x i32> %2, <128 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95, i32 96, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 112, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127>
5906 %5 = shufflevector <32 x i32> %in.vec6, <32 x i32> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
5907 %6 = shufflevector <64 x i32> %3, <64 x i32> %5, <96 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95>
5908 %7 = shufflevector <96 x i32> %6, <96 x i32> poison, <128 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
5909 %8 = shufflevector <128 x i32> %4, <128 x i32> %7, <224 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95, i32 96, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 112, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127, i32 128, i32 129, i32 130, i32 131, i32 132, i32 133, i32 134, i32 135, i32 136, i32 137, i32 138, i32 139, i32 140, i32 141, i32 142, i32 143, i32 144, i32 145, i32 146, i32 147, i32 148, i32 149, i32 150, i32 151, i32 152, i32 153, i32 154, i32 155, i32 156, i32 157, i32 158, i32 159, i32 160, i32 161, i32 162, i32 163, i32 164, i32 165, i32 166, i32 167, i32 168, i32 169, i32 170, i32 171, i32 172, i32 173, i32 174, i32 175, i32 176, i32 177, i32 178, i32 179, i32 180, i32 181, i32 182, i32 183, i32 184, i32 185, i32 186, i32 187, i32 188, i32 189, i32 190, i32 191, i32 192, i32 193, i32 194, i32 195, i32 196, i32 197, i32 198, i32 199, i32 200, i32 201, i32 202, i32 203, i32 204, i32 205, i32 206, i32 207, i32 208, i32 209, i32 210, i32 211, i32 212, i32 213, i32 214, i32 215, i32 216, i32 217, i32 218, i32 219, i32 220, i32 221, i32 222, i32 223>
5910 %interleaved.vec = shufflevector <224 x i32> %8, <224 x i32> poison, <224 x i32> <i32 0, i32 32, i32 64, i32 96, i32 128, i32 160, i32 192, i32 1, i32 33, i32 65, i32 97, i32 129, i32 161, i32 193, i32 2, i32 34, i32 66, i32 98, i32 130, i32 162, i32 194, i32 3, i32 35, i32 67, i32 99, i32 131, i32 163, i32 195, i32 4, i32 36, i32 68, i32 100, i32 132, i32 164, i32 196, i32 5, i32 37, i32 69, i32 101, i32 133, i32 165, i32 197, i32 6, i32 38, i32 70, i32 102, i32 134, i32 166, i32 198, i32 7, i32 39, i32 71, i32 103, i32 135, i32 167, i32 199, i32 8, i32 40, i32 72, i32 104, i32 136, i32 168, i32 200, i32 9, i32 41, i32 73, i32 105, i32 137, i32 169, i32 201, i32 10, i32 42, i32 74, i32 106, i32 138, i32 170, i32 202, i32 11, i32 43, i32 75, i32 107, i32 139, i32 171, i32 203, i32 12, i32 44, i32 76, i32 108, i32 140, i32 172, i32 204, i32 13, i32 45, i32 77, i32 109, i32 141, i32 173, i32 205, i32 14, i32 46, i32 78, i32 110, i32 142, i32 174, i32 206, i32 15, i32 47, i32 79, i32 111, i32 143, i32 175, i32 207, i32 16, i32 48, i32 80, i32 112, i32 144, i32 176, i32 208, i32 17, i32 49, i32 81, i32 113, i32 145, i32 177, i32 209, i32 18, i32 50, i32 82, i32 114, i32 146, i32 178, i32 210, i32 19, i32 51, i32 83, i32 115, i32 147, i32 179, i32 211, i32 20, i32 52, i32 84, i32 116, i32 148, i32 180, i32 212, i32 21, i32 53, i32 85, i32 117, i32 149, i32 181, i32 213, i32 22, i32 54, i32 86, i32 118, i32 150, i32 182, i32 214, i32 23, i32 55, i32 87, i32 119, i32 151, i32 183, i32 215, i32 24, i32 56, i32 88, i32 120, i32 152, i32 184, i32 216, i32 25, i32 57, i32 89, i32 121, i32 153, i32 185, i32 217, i32 26, i32 58, i32 90, i32 122, i32 154, i32 186, i32 218, i32 27, i32 59, i32 91, i32 123, i32 155, i32 187, i32 219, i32 28, i32 60, i32 92, i32 124, i32 156, i32 188, i32 220, i32 29, i32 61, i32 93, i32 125, i32 157, i32 189, i32 221, i32 30, i32 62, i32 94, i32 126, i32 158, i32 190, i32 222, i32 31, i32 63, i32 95, i32 127, i32 159, i32 191, i32 223>
5911 store <224 x i32> %interleaved.vec, ptr %out.vec, align 64
5915 define void @store_i32_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecptr2, ptr %in.vecptr3, ptr %in.vecptr4, ptr %in.vecptr5, ptr %in.vecptr6, ptr %out.vec) nounwind {
5916 ; SSE-LABEL: store_i32_stride7_vf64:
5918 ; SSE-NEXT: subq $2760, %rsp # imm = 0xAC8
5919 ; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax
5920 ; SSE-NEXT: movdqa (%rdi), %xmm6
5921 ; SSE-NEXT: movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5922 ; SSE-NEXT: movdqa (%rsi), %xmm4
5923 ; SSE-NEXT: movdqa 16(%rsi), %xmm3
5924 ; SSE-NEXT: movaps (%rdx), %xmm2
5925 ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5926 ; SSE-NEXT: movdqa 16(%rdx), %xmm7
5927 ; SSE-NEXT: movdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5928 ; SSE-NEXT: movaps (%rcx), %xmm13
5929 ; SSE-NEXT: movaps 16(%rcx), %xmm9
5930 ; SSE-NEXT: movaps %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5931 ; SSE-NEXT: movaps (%r8), %xmm0
5932 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5933 ; SSE-NEXT: movaps 16(%r8), %xmm10
5934 ; SSE-NEXT: movaps %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5935 ; SSE-NEXT: movdqa (%r9), %xmm12
5936 ; SSE-NEXT: movdqa %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5937 ; SSE-NEXT: movdqa 16(%r9), %xmm8
5938 ; SSE-NEXT: movdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5939 ; SSE-NEXT: movdqa (%rax), %xmm15
5940 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm13[1,1]
5941 ; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5942 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm4[1,1,1,1]
5943 ; SSE-NEXT: movss {{.*#+}} xmm2 = xmm1[0],xmm2[1,2,3]
5944 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1],xmm0[2,0]
5945 ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5946 ; SSE-NEXT: movdqa %xmm6, %xmm0
5947 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm4[2],xmm0[3],xmm4[3]
5948 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm15[1,1,1,1]
5949 ; SSE-NEXT: movdqa %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5950 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm12[1,1,1,1]
5951 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
5952 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm0[0]
5953 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5954 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm7[1,1,1,1]
5955 ; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5956 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm3[1,1,1,1]
5957 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
5958 ; SSE-NEXT: movaps %xmm10, %xmm0
5959 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm9[1,1]
5960 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,0]
5961 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5962 ; SSE-NEXT: movdqa 16(%rax), %xmm0
5963 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5964 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
5965 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm8[1,1,1,1]
5966 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
5967 ; SSE-NEXT: movdqa 16(%rdi), %xmm0
5968 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5969 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm3[2],xmm0[3],xmm3[3]
5970 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
5971 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5972 ; SSE-NEXT: movdqa 32(%rsi), %xmm1
5973 ; SSE-NEXT: movaps 32(%rdx), %xmm3
5974 ; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5975 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1]
5976 ; SSE-NEXT: movdqa %xmm1, %xmm2
5977 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5978 ; SSE-NEXT: movaps %xmm3, %xmm1
5979 ; SSE-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
5980 ; SSE-NEXT: movaps 32(%rcx), %xmm3
5981 ; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5982 ; SSE-NEXT: movaps 32(%r8), %xmm0
5983 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5984 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm3[1,1]
5985 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,0]
5986 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5987 ; SSE-NEXT: movdqa 32(%r9), %xmm1
5988 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5989 ; SSE-NEXT: movdqa 32(%rax), %xmm0
5990 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5991 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
5992 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,1,1]
5993 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
5994 ; SSE-NEXT: movdqa 32(%rdi), %xmm0
5995 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5996 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
5997 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
5998 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5999 ; SSE-NEXT: movdqa 48(%rsi), %xmm2
6000 ; SSE-NEXT: movdqa 48(%rdx), %xmm0
6001 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6002 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
6003 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,1,1]
6004 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6005 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
6006 ; SSE-NEXT: movaps 48(%rcx), %xmm4
6007 ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6008 ; SSE-NEXT: movaps 48(%r8), %xmm0
6009 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6010 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm4[1,1]
6011 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,0]
6012 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6013 ; SSE-NEXT: movdqa 48(%r9), %xmm1
6014 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6015 ; SSE-NEXT: movdqa 48(%rax), %xmm0
6016 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6017 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
6018 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,1,1]
6019 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
6020 ; SSE-NEXT: movdqa 48(%rdi), %xmm5
6021 ; SSE-NEXT: movdqa %xmm5, %xmm0
6022 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
6023 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
6024 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6025 ; SSE-NEXT: movdqa 64(%rsi), %xmm1
6026 ; SSE-NEXT: movaps 64(%rdx), %xmm4
6027 ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6028 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1]
6029 ; SSE-NEXT: movdqa %xmm1, %xmm2
6030 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6031 ; SSE-NEXT: movaps %xmm4, %xmm1
6032 ; SSE-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
6033 ; SSE-NEXT: movaps 64(%rcx), %xmm4
6034 ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6035 ; SSE-NEXT: movaps 64(%r8), %xmm0
6036 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6037 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm4[1,1]
6038 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,0]
6039 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6040 ; SSE-NEXT: movdqa 64(%r9), %xmm1
6041 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6042 ; SSE-NEXT: movdqa 64(%rax), %xmm0
6043 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6044 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
6045 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,1,1]
6046 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
6047 ; SSE-NEXT: movdqa 64(%rdi), %xmm8
6048 ; SSE-NEXT: movdqa %xmm8, %xmm0
6049 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
6050 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
6051 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6052 ; SSE-NEXT: movdqa 80(%rsi), %xmm2
6053 ; SSE-NEXT: movdqa 80(%rdx), %xmm0
6054 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6055 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
6056 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,1,1]
6057 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6058 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
6059 ; SSE-NEXT: movaps 80(%rcx), %xmm4
6060 ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6061 ; SSE-NEXT: movaps 80(%r8), %xmm0
6062 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6063 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm4[1,1]
6064 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,0]
6065 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6066 ; SSE-NEXT: movdqa 80(%r9), %xmm1
6067 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6068 ; SSE-NEXT: movdqa 80(%rax), %xmm0
6069 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6070 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
6071 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,1,1]
6072 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
6073 ; SSE-NEXT: movdqa 80(%rdi), %xmm9
6074 ; SSE-NEXT: movdqa %xmm9, %xmm0
6075 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
6076 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
6077 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6078 ; SSE-NEXT: movdqa 96(%rsi), %xmm1
6079 ; SSE-NEXT: movaps 96(%rdx), %xmm4
6080 ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6081 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1]
6082 ; SSE-NEXT: movdqa %xmm1, %xmm2
6083 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6084 ; SSE-NEXT: movaps %xmm4, %xmm1
6085 ; SSE-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
6086 ; SSE-NEXT: movaps 96(%rcx), %xmm10
6087 ; SSE-NEXT: movaps 96(%r8), %xmm0
6088 ; SSE-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
6089 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm10[1,1]
6090 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,0]
6091 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6092 ; SSE-NEXT: movdqa 96(%r9), %xmm1
6093 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6094 ; SSE-NEXT: movdqa 96(%rax), %xmm0
6095 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6096 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
6097 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,1,1]
6098 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
6099 ; SSE-NEXT: movdqa 96(%rdi), %xmm12
6100 ; SSE-NEXT: movdqa %xmm12, %xmm0
6101 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
6102 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
6103 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6104 ; SSE-NEXT: movdqa 112(%rsi), %xmm2
6105 ; SSE-NEXT: movdqa 112(%rdx), %xmm0
6106 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6107 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
6108 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,1,1]
6109 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6110 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
6111 ; SSE-NEXT: movaps 112(%rcx), %xmm4
6112 ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6113 ; SSE-NEXT: movaps 112(%r8), %xmm0
6114 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6115 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm4[1,1]
6116 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,0]
6117 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6118 ; SSE-NEXT: movdqa 112(%r9), %xmm1
6119 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6120 ; SSE-NEXT: movdqa 112(%rax), %xmm0
6121 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6122 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
6123 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,1,1]
6124 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
6125 ; SSE-NEXT: movdqa 112(%rdi), %xmm14
6126 ; SSE-NEXT: movdqa %xmm14, %xmm0
6127 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
6128 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
6129 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6130 ; SSE-NEXT: movdqa 128(%rsi), %xmm1
6131 ; SSE-NEXT: movaps 128(%rdx), %xmm4
6132 ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6133 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1]
6134 ; SSE-NEXT: movdqa %xmm1, %xmm2
6135 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6136 ; SSE-NEXT: movaps %xmm4, %xmm1
6137 ; SSE-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
6138 ; SSE-NEXT: movaps 128(%rcx), %xmm4
6139 ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6140 ; SSE-NEXT: movaps 128(%r8), %xmm0
6141 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6142 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm4[1,1]
6143 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,0]
6144 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6145 ; SSE-NEXT: movdqa 128(%r9), %xmm1
6146 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6147 ; SSE-NEXT: movdqa 128(%rax), %xmm0
6148 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6149 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
6150 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,1,1]
6151 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
6152 ; SSE-NEXT: movdqa 128(%rdi), %xmm0
6153 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6154 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
6155 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
6156 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6157 ; SSE-NEXT: movdqa 144(%rsi), %xmm2
6158 ; SSE-NEXT: movdqa 144(%rdx), %xmm0
6159 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6160 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
6161 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,1,1]
6162 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6163 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
6164 ; SSE-NEXT: movaps 144(%rcx), %xmm4
6165 ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6166 ; SSE-NEXT: movaps 144(%r8), %xmm0
6167 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6168 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm4[1,1]
6169 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,0]
6170 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6171 ; SSE-NEXT: movdqa 144(%r9), %xmm1
6172 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6173 ; SSE-NEXT: movdqa 144(%rax), %xmm0
6174 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6175 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
6176 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,1,1]
6177 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
6178 ; SSE-NEXT: movdqa 144(%rdi), %xmm0
6179 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6180 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
6181 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
6182 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6183 ; SSE-NEXT: movdqa 160(%rsi), %xmm1
6184 ; SSE-NEXT: movaps 160(%rdx), %xmm4
6185 ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6186 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1]
6187 ; SSE-NEXT: movdqa %xmm1, %xmm2
6188 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6189 ; SSE-NEXT: movaps %xmm4, %xmm1
6190 ; SSE-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
6191 ; SSE-NEXT: movaps 160(%rcx), %xmm4
6192 ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6193 ; SSE-NEXT: movaps 160(%r8), %xmm0
6194 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6195 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm4[1,1]
6196 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,0]
6197 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6198 ; SSE-NEXT: movdqa 160(%r9), %xmm1
6199 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6200 ; SSE-NEXT: movdqa 160(%rax), %xmm0
6201 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6202 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
6203 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,1,1]
6204 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
6205 ; SSE-NEXT: movdqa 160(%rdi), %xmm0
6206 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6207 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
6208 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
6209 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6210 ; SSE-NEXT: movdqa 176(%rsi), %xmm2
6211 ; SSE-NEXT: movdqa 176(%rdx), %xmm0
6212 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6213 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
6214 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,1,1]
6215 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6216 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
6217 ; SSE-NEXT: movaps 176(%rcx), %xmm4
6218 ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6219 ; SSE-NEXT: movaps 176(%r8), %xmm0
6220 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6221 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm4[1,1]
6222 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,0]
6223 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6224 ; SSE-NEXT: movdqa 176(%r9), %xmm1
6225 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6226 ; SSE-NEXT: movdqa 176(%rax), %xmm0
6227 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6228 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
6229 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,1,1]
6230 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
6231 ; SSE-NEXT: movdqa 176(%rdi), %xmm0
6232 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6233 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
6234 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
6235 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6236 ; SSE-NEXT: movdqa 192(%rsi), %xmm1
6237 ; SSE-NEXT: movaps 192(%rdx), %xmm4
6238 ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6239 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1]
6240 ; SSE-NEXT: movdqa %xmm1, %xmm2
6241 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6242 ; SSE-NEXT: movaps %xmm4, %xmm1
6243 ; SSE-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
6244 ; SSE-NEXT: movaps 192(%rcx), %xmm4
6245 ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6246 ; SSE-NEXT: movaps 192(%r8), %xmm0
6247 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6248 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm4[1,1]
6249 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,0]
6250 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6251 ; SSE-NEXT: movdqa 192(%r9), %xmm1
6252 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6253 ; SSE-NEXT: movdqa 192(%rax), %xmm0
6254 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6255 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
6256 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,1,1]
6257 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
6258 ; SSE-NEXT: movdqa 192(%rdi), %xmm0
6259 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6260 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
6261 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
6262 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6263 ; SSE-NEXT: movdqa 208(%rsi), %xmm3
6264 ; SSE-NEXT: movdqa 208(%rdx), %xmm0
6265 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6266 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
6267 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm3[1,1,1,1]
6268 ; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6269 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
6270 ; SSE-NEXT: movaps 208(%rcx), %xmm6
6271 ; SSE-NEXT: movaps 208(%r8), %xmm4
6272 ; SSE-NEXT: movaps %xmm4, %xmm0
6273 ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6274 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm6[1,1]
6275 ; SSE-NEXT: movaps %xmm6, %xmm2
6276 ; SSE-NEXT: movaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6277 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,0]
6278 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6279 ; SSE-NEXT: movdqa 208(%r9), %xmm6
6280 ; SSE-NEXT: movdqa 208(%rax), %xmm1
6281 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1]
6282 ; SSE-NEXT: movdqa %xmm1, %xmm7
6283 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6284 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm6[1,1,1,1]
6285 ; SSE-NEXT: movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6286 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
6287 ; SSE-NEXT: movdqa 208(%rdi), %xmm0
6288 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6289 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm3[2],xmm0[3],xmm3[3]
6290 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
6291 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6292 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm4[3,3,3,3]
6293 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm2[3,3,3,3]
6294 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
6295 ; SSE-NEXT: movdqa %xmm7, %xmm0
6296 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3],xmm6[3,3]
6297 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,0]
6298 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6299 ; SSE-NEXT: movdqa 224(%rsi), %xmm0
6300 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6301 ; SSE-NEXT: movaps 224(%rdx), %xmm2
6302 ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6303 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
6304 ; SSE-NEXT: movss {{.*#+}} xmm2 = xmm0[0],xmm2[1,2,3]
6305 ; SSE-NEXT: movaps 224(%rcx), %xmm4
6306 ; SSE-NEXT: movaps 224(%r8), %xmm0
6307 ; SSE-NEXT: movaps %xmm0, %xmm1
6308 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm4[1,1]
6309 ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6310 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,0]
6311 ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6312 ; SSE-NEXT: movaps %xmm0, %xmm1
6313 ; SSE-NEXT: movaps 224(%r9), %xmm7
6314 ; SSE-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm7[0]
6315 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6316 ; SSE-NEXT: movaps %xmm7, %xmm1
6317 ; SSE-NEXT: movaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6318 ; SSE-NEXT: unpckhpd {{.*#+}} xmm7 = xmm7[1],xmm0[1]
6319 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,3,3,3]
6320 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm4[3,3,3,3]
6321 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
6322 ; SSE-NEXT: movaps 224(%rax), %xmm0
6323 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6324 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,3],xmm0[3,3]
6325 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1],xmm1[0,2]
6326 ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6327 ; SSE-NEXT: movdqa 240(%rsi), %xmm1
6328 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6329 ; SSE-NEXT: movdqa 240(%rdx), %xmm0
6330 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6331 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
6332 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,1,1]
6333 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
6334 ; SSE-NEXT: movaps 240(%rcx), %xmm6
6335 ; SSE-NEXT: movaps 240(%r8), %xmm0
6336 ; SSE-NEXT: movaps %xmm0, %xmm1
6337 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm6[1,1]
6338 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,0]
6339 ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6340 ; SSE-NEXT: movaps %xmm0, %xmm1
6341 ; SSE-NEXT: movaps 240(%r9), %xmm11
6342 ; SSE-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm11[0]
6343 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6344 ; SSE-NEXT: movaps %xmm11, %xmm1
6345 ; SSE-NEXT: movaps %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6346 ; SSE-NEXT: unpckhpd {{.*#+}} xmm11 = xmm11[1],xmm0[1]
6347 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,3,3,3]
6348 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm6[3,3,3,3]
6349 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
6350 ; SSE-NEXT: movaps 240(%rax), %xmm0
6351 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6352 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,3],xmm0[3,3]
6353 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1],xmm1[0,2]
6354 ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6355 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6356 ; SSE-NEXT: movaps %xmm1, %xmm0
6357 ; SSE-NEXT: movaps %xmm13, %xmm2
6358 ; SSE-NEXT: movaps %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6359 ; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm13[0],xmm0[1],xmm13[1]
6360 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
6361 ; SSE-NEXT: movaps %xmm4, %xmm3
6362 ; SSE-NEXT: unpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
6363 ; SSE-NEXT: # xmm3 = xmm3[0],mem[0],xmm3[1],mem[1]
6364 ; SSE-NEXT: movlhps {{.*#+}} xmm3 = xmm3[0],xmm0[0]
6365 ; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6366 ; SSE-NEXT: shufps {{.*#+}} xmm15 = xmm15[0,1],xmm4[1,3]
6367 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
6368 ; SSE-NEXT: movaps %xmm4, %xmm3
6369 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
6370 ; SSE-NEXT: unpcklps {{.*#+}} xmm3 = xmm3[0],xmm13[0],xmm3[1],xmm13[1]
6371 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,1],xmm15[0,2]
6372 ; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6373 ; SSE-NEXT: movaps %xmm4, %xmm0
6374 ; SSE-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],xmm13[2],xmm0[3],xmm13[3]
6375 ; SSE-NEXT: movaps %xmm1, %xmm3
6376 ; SSE-NEXT: unpckhps {{.*#+}} xmm3 = xmm3[2],xmm2[2],xmm3[3],xmm2[3]
6377 ; SSE-NEXT: movlhps {{.*#+}} xmm3 = xmm3[0],xmm0[0]
6378 ; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6379 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6380 ; SSE-NEXT: movaps %xmm1, %xmm0
6381 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
6382 ; SSE-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm13[0]
6383 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
6384 ; SSE-NEXT: movaps %xmm2, %xmm3
6385 ; SSE-NEXT: unpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
6386 ; SSE-NEXT: # xmm3 = xmm3[0],mem[0],xmm3[1],mem[1]
6387 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,1],xmm0[2,0]
6388 ; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6389 ; SSE-NEXT: movaps %xmm2, %xmm0
6390 ; SSE-NEXT: shufps $197, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
6391 ; SSE-NEXT: # xmm0 = xmm0[1,1],mem[0,3]
6392 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
6393 ; SSE-NEXT: movaps %xmm2, %xmm3
6394 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
6395 ; SSE-NEXT: unpcklps {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
6396 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,1],xmm0[2,0]
6397 ; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6398 ; SSE-NEXT: movaps %xmm13, %xmm3
6399 ; SSE-NEXT: unpckhps {{.*#+}} xmm3 = xmm3[2],xmm1[2],xmm3[3],xmm1[3]
6400 ; SSE-NEXT: movaps %xmm2, %xmm0
6401 ; SSE-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],xmm4[2],xmm0[3],xmm4[3]
6402 ; SSE-NEXT: movlhps {{.*#+}} xmm3 = xmm3[0],xmm0[0]
6403 ; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6404 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
6405 ; SSE-NEXT: movaps %xmm4, %xmm0
6406 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6407 ; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
6408 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
6409 ; SSE-NEXT: movaps %xmm3, %xmm2
6410 ; SSE-NEXT: unpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
6411 ; SSE-NEXT: # xmm2 = xmm2[0],mem[0],xmm2[1],mem[1]
6412 ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0]
6413 ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6414 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6415 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm3[1,3]
6416 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
6417 ; SSE-NEXT: movaps %xmm2, %xmm15
6418 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
6419 ; SSE-NEXT: unpcklps {{.*#+}} xmm15 = xmm15[0],xmm3[0],xmm15[1],xmm3[1]
6420 ; SSE-NEXT: shufps {{.*#+}} xmm15 = xmm15[0,1],xmm0[0,2]
6421 ; SSE-NEXT: movaps %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6422 ; SSE-NEXT: movaps %xmm2, %xmm0
6423 ; SSE-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],xmm3[2],xmm0[3],xmm3[3]
6424 ; SSE-NEXT: movaps %xmm4, %xmm2
6425 ; SSE-NEXT: unpckhps {{.*#+}} xmm2 = xmm2[2],xmm1[2],xmm2[3],xmm1[3]
6426 ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0]
6427 ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6428 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6429 ; SSE-NEXT: movaps %xmm1, %xmm0
6430 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
6431 ; SSE-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0]
6432 ; SSE-NEXT: movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6433 ; SSE-NEXT: movdqa %xmm5, %xmm3
6434 ; SSE-NEXT: punpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
6435 ; SSE-NEXT: # xmm3 = xmm3[0],mem[0],xmm3[1],mem[1]
6436 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,1],xmm0[2,0]
6437 ; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6438 ; SSE-NEXT: movdqa %xmm5, %xmm0
6439 ; SSE-NEXT: shufps $197, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
6440 ; SSE-NEXT: # xmm0 = xmm0[1,1],mem[0,3]
6441 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
6442 ; SSE-NEXT: movaps %xmm3, %xmm5
6443 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
6444 ; SSE-NEXT: unpcklps {{.*#+}} xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1]
6445 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,1],xmm0[2,0]
6446 ; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6447 ; SSE-NEXT: movaps %xmm2, %xmm5
6448 ; SSE-NEXT: unpckhps {{.*#+}} xmm5 = xmm5[2],xmm1[2],xmm5[3],xmm1[3]
6449 ; SSE-NEXT: movaps %xmm3, %xmm0
6450 ; SSE-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],xmm4[2],xmm0[3],xmm4[3]
6451 ; SSE-NEXT: movlhps {{.*#+}} xmm5 = xmm5[0],xmm0[0]
6452 ; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6453 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
6454 ; SSE-NEXT: movdqa %xmm2, %xmm0
6455 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6456 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
6457 ; SSE-NEXT: movdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6458 ; SSE-NEXT: movdqa %xmm8, %xmm3
6459 ; SSE-NEXT: punpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
6460 ; SSE-NEXT: # xmm3 = xmm3[0],mem[0],xmm3[1],mem[1]
6461 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm0[0]
6462 ; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6463 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6464 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm8[1,3]
6465 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
6466 ; SSE-NEXT: movaps %xmm4, %xmm8
6467 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
6468 ; SSE-NEXT: unpcklps {{.*#+}} xmm8 = xmm8[0],xmm3[0],xmm8[1],xmm3[1]
6469 ; SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[0,1],xmm0[0,2]
6470 ; SSE-NEXT: movaps %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6471 ; SSE-NEXT: movaps %xmm4, %xmm0
6472 ; SSE-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],xmm3[2],xmm0[3],xmm3[3]
6473 ; SSE-NEXT: movdqa %xmm2, %xmm3
6474 ; SSE-NEXT: punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm1[2],xmm3[3],xmm1[3]
6475 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm0[0]
6476 ; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6477 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6478 ; SSE-NEXT: movaps %xmm1, %xmm0
6479 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
6480 ; SSE-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0]
6481 ; SSE-NEXT: movdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6482 ; SSE-NEXT: movdqa %xmm9, %xmm3
6483 ; SSE-NEXT: punpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
6484 ; SSE-NEXT: # xmm3 = xmm3[0],mem[0],xmm3[1],mem[1]
6485 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,1],xmm0[2,0]
6486 ; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6487 ; SSE-NEXT: movdqa %xmm9, %xmm0
6488 ; SSE-NEXT: shufps $197, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
6489 ; SSE-NEXT: # xmm0 = xmm0[1,1],mem[0,3]
6490 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
6491 ; SSE-NEXT: movaps %xmm3, %xmm8
6492 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
6493 ; SSE-NEXT: unpcklps {{.*#+}} xmm8 = xmm8[0],xmm4[0],xmm8[1],xmm4[1]
6494 ; SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[0,1],xmm0[2,0]
6495 ; SSE-NEXT: movaps %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6496 ; SSE-NEXT: movaps %xmm2, %xmm8
6497 ; SSE-NEXT: unpckhps {{.*#+}} xmm8 = xmm8[2],xmm1[2],xmm8[3],xmm1[3]
6498 ; SSE-NEXT: movaps %xmm3, %xmm0
6499 ; SSE-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],xmm4[2],xmm0[3],xmm4[3]
6500 ; SSE-NEXT: movlhps {{.*#+}} xmm8 = xmm8[0],xmm0[0]
6501 ; SSE-NEXT: movaps %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6502 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
6503 ; SSE-NEXT: movaps %xmm2, %xmm0
6504 ; SSE-NEXT: movaps %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6505 ; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm10[0],xmm0[1],xmm10[1]
6506 ; SSE-NEXT: movdqa %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6507 ; SSE-NEXT: movdqa %xmm12, %xmm3
6508 ; SSE-NEXT: punpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
6509 ; SSE-NEXT: # xmm3 = xmm3[0],mem[0],xmm3[1],mem[1]
6510 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm0[0]
6511 ; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6512 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6513 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm12[1,3]
6514 ; SSE-NEXT: movaps (%rsp), %xmm4 # 16-byte Reload
6515 ; SSE-NEXT: movaps %xmm4, %xmm5
6516 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6517 ; SSE-NEXT: unpcklps {{.*#+}} xmm5 = xmm5[0],xmm1[0],xmm5[1],xmm1[1]
6518 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,1],xmm0[0,2]
6519 ; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6520 ; SSE-NEXT: movaps %xmm4, %xmm0
6521 ; SSE-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
6522 ; SSE-NEXT: movaps %xmm2, %xmm3
6523 ; SSE-NEXT: unpckhps {{.*#+}} xmm3 = xmm3[2],xmm10[2],xmm3[3],xmm10[3]
6524 ; SSE-NEXT: movlhps {{.*#+}} xmm3 = xmm3[0],xmm0[0]
6525 ; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6526 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6527 ; SSE-NEXT: movaps %xmm1, %xmm0
6528 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
6529 ; SSE-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0]
6530 ; SSE-NEXT: movdqa %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6531 ; SSE-NEXT: movdqa %xmm14, %xmm3
6532 ; SSE-NEXT: punpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
6533 ; SSE-NEXT: # xmm3 = xmm3[0],mem[0],xmm3[1],mem[1]
6534 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,1],xmm0[2,0]
6535 ; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6536 ; SSE-NEXT: shufps $197, {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Folded Reload
6537 ; SSE-NEXT: # xmm14 = xmm14[1,1],mem[0,3]
6538 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
6539 ; SSE-NEXT: movaps %xmm4, %xmm5
6540 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
6541 ; SSE-NEXT: unpcklps {{.*#+}} xmm5 = xmm5[0],xmm3[0],xmm5[1],xmm3[1]
6542 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,1],xmm14[2,0]
6543 ; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6544 ; SSE-NEXT: movaps %xmm2, %xmm5
6545 ; SSE-NEXT: unpckhps {{.*#+}} xmm5 = xmm5[2],xmm1[2],xmm5[3],xmm1[3]
6546 ; SSE-NEXT: movaps %xmm4, %xmm0
6547 ; SSE-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],xmm3[2],xmm0[3],xmm3[3]
6548 ; SSE-NEXT: movlhps {{.*#+}} xmm5 = xmm5[0],xmm0[0]
6549 ; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6550 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
6551 ; SSE-NEXT: movaps %xmm3, %xmm0
6552 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
6553 ; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
6554 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6555 ; SSE-NEXT: movaps %xmm1, %xmm5
6556 ; SSE-NEXT: unpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Folded Reload
6557 ; SSE-NEXT: # xmm5 = xmm5[0],mem[0],xmm5[1],mem[1]
6558 ; SSE-NEXT: movlhps {{.*#+}} xmm5 = xmm5[0],xmm0[0]
6559 ; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6560 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6561 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[1,3]
6562 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6563 ; SSE-NEXT: movaps %xmm1, %xmm8
6564 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
6565 ; SSE-NEXT: unpcklps {{.*#+}} xmm8 = xmm8[0],xmm4[0],xmm8[1],xmm4[1]
6566 ; SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[0,1],xmm0[0,2]
6567 ; SSE-NEXT: movaps %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6568 ; SSE-NEXT: movaps %xmm1, %xmm0
6569 ; SSE-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],xmm4[2],xmm0[3],xmm4[3]
6570 ; SSE-NEXT: movaps %xmm3, %xmm1
6571 ; SSE-NEXT: unpckhps {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
6572 ; SSE-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
6573 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6574 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
6575 ; SSE-NEXT: movaps %xmm2, %xmm0
6576 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
6577 ; SSE-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm3[0]
6578 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6579 ; SSE-NEXT: movaps %xmm1, %xmm5
6580 ; SSE-NEXT: unpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Folded Reload
6581 ; SSE-NEXT: # xmm5 = xmm5[0],mem[0],xmm5[1],mem[1]
6582 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,1],xmm0[2,0]
6583 ; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6584 ; SSE-NEXT: movaps %xmm1, %xmm0
6585 ; SSE-NEXT: shufps $197, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
6586 ; SSE-NEXT: # xmm0 = xmm0[1,1],mem[0,3]
6587 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6588 ; SSE-NEXT: movaps %xmm1, %xmm8
6589 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
6590 ; SSE-NEXT: unpcklps {{.*#+}} xmm8 = xmm8[0],xmm4[0],xmm8[1],xmm4[1]
6591 ; SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[0,1],xmm0[2,0]
6592 ; SSE-NEXT: movaps %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6593 ; SSE-NEXT: movaps %xmm3, %xmm8
6594 ; SSE-NEXT: unpckhps {{.*#+}} xmm8 = xmm8[2],xmm2[2],xmm8[3],xmm2[3]
6595 ; SSE-NEXT: movaps %xmm1, %xmm0
6596 ; SSE-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],xmm4[2],xmm0[3],xmm4[3]
6597 ; SSE-NEXT: movlhps {{.*#+}} xmm8 = xmm8[0],xmm0[0]
6598 ; SSE-NEXT: movaps %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6599 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
6600 ; SSE-NEXT: movaps %xmm3, %xmm0
6601 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
6602 ; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
6603 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6604 ; SSE-NEXT: movaps %xmm1, %xmm5
6605 ; SSE-NEXT: unpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Folded Reload
6606 ; SSE-NEXT: # xmm5 = xmm5[0],mem[0],xmm5[1],mem[1]
6607 ; SSE-NEXT: movlhps {{.*#+}} xmm5 = xmm5[0],xmm0[0]
6608 ; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6609 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6610 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[1,3]
6611 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6612 ; SSE-NEXT: movaps %xmm1, %xmm8
6613 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
6614 ; SSE-NEXT: unpcklps {{.*#+}} xmm8 = xmm8[0],xmm4[0],xmm8[1],xmm4[1]
6615 ; SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[0,1],xmm0[0,2]
6616 ; SSE-NEXT: movaps %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6617 ; SSE-NEXT: movaps %xmm1, %xmm0
6618 ; SSE-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],xmm4[2],xmm0[3],xmm4[3]
6619 ; SSE-NEXT: movaps %xmm3, %xmm1
6620 ; SSE-NEXT: unpckhps {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
6621 ; SSE-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
6622 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6623 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
6624 ; SSE-NEXT: movaps %xmm2, %xmm0
6625 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
6626 ; SSE-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm3[0]
6627 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6628 ; SSE-NEXT: movaps %xmm1, %xmm5
6629 ; SSE-NEXT: unpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Folded Reload
6630 ; SSE-NEXT: # xmm5 = xmm5[0],mem[0],xmm5[1],mem[1]
6631 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,1],xmm0[2,0]
6632 ; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6633 ; SSE-NEXT: movaps %xmm1, %xmm0
6634 ; SSE-NEXT: shufps $197, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
6635 ; SSE-NEXT: # xmm0 = xmm0[1,1],mem[0,3]
6636 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6637 ; SSE-NEXT: movaps %xmm1, %xmm4
6638 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
6639 ; SSE-NEXT: unpcklps {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
6640 ; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,1],xmm0[2,0]
6641 ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6642 ; SSE-NEXT: movaps %xmm3, %xmm4
6643 ; SSE-NEXT: unpckhps {{.*#+}} xmm4 = xmm4[2],xmm2[2],xmm4[3],xmm2[3]
6644 ; SSE-NEXT: movaps %xmm1, %xmm0
6645 ; SSE-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],xmm5[2],xmm0[3],xmm5[3]
6646 ; SSE-NEXT: movlhps {{.*#+}} xmm4 = xmm4[0],xmm0[0]
6647 ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6648 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
6649 ; SSE-NEXT: movaps %xmm15, %xmm0
6650 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
6651 ; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
6652 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6653 ; SSE-NEXT: movaps %xmm1, %xmm4
6654 ; SSE-NEXT: unpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
6655 ; SSE-NEXT: # xmm4 = xmm4[0],mem[0],xmm4[1],mem[1]
6656 ; SSE-NEXT: movlhps {{.*#+}} xmm4 = xmm4[0],xmm0[0]
6657 ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6658 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6659 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[1,3]
6660 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6661 ; SSE-NEXT: movaps %xmm1, %xmm4
6662 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
6663 ; SSE-NEXT: unpcklps {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
6664 ; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,1],xmm0[0,2]
6665 ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6666 ; SSE-NEXT: movaps %xmm1, %xmm0
6667 ; SSE-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],xmm5[2],xmm0[3],xmm5[3]
6668 ; SSE-NEXT: unpckhps {{.*#+}} xmm15 = xmm15[2],xmm2[2],xmm15[3],xmm2[3]
6669 ; SSE-NEXT: movlhps {{.*#+}} xmm15 = xmm15[0],xmm0[0]
6670 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
6671 ; SSE-NEXT: movaps %xmm14, %xmm0
6672 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6673 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm1[0,3]
6674 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,1],xmm14[3,3]
6675 ; SSE-NEXT: movaps %xmm1, %xmm4
6676 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6677 ; SSE-NEXT: movaps %xmm1, %xmm13
6678 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
6679 ; SSE-NEXT: unpckhps {{.*#+}} xmm13 = xmm13[2],xmm2[2],xmm13[3],xmm2[3]
6680 ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm1[0]
6681 ; SSE-NEXT: movaps %xmm2, %xmm3
6682 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
6683 ; SSE-NEXT: unpcklps {{.*#+}} xmm14 = xmm14[0],xmm2[0],xmm14[1],xmm2[1]
6684 ; SSE-NEXT: shufps {{.*#+}} xmm14 = xmm14[0,1],xmm3[2,0]
6685 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
6686 ; SSE-NEXT: movaps %xmm3, %xmm12
6687 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
6688 ; SSE-NEXT: unpcklps {{.*#+}} xmm12 = xmm12[0],xmm5[0],xmm12[1],xmm5[1]
6689 ; SSE-NEXT: shufps {{.*#+}} xmm12 = xmm12[0,1],xmm0[2,0]
6690 ; SSE-NEXT: unpckhps {{.*#+}} xmm3 = xmm3[2],xmm5[2],xmm3[3],xmm5[3]
6691 ; SSE-NEXT: movlhps {{.*#+}} xmm13 = xmm13[0],xmm3[0]
6692 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,3],xmm2[3,3]
6693 ; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,2],xmm1[2,0]
6694 ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6695 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
6696 ; SSE-NEXT: movaps %xmm3, %xmm0
6697 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
6698 ; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
6699 ; SSE-NEXT: movaps 224(%rdi), %xmm8
6700 ; SSE-NEXT: movaps %xmm8, %xmm10
6701 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
6702 ; SSE-NEXT: unpcklps {{.*#+}} xmm10 = xmm10[0],xmm2[0],xmm10[1],xmm2[1]
6703 ; SSE-NEXT: movlhps {{.*#+}} xmm10 = xmm10[0],xmm0[0]
6704 ; SSE-NEXT: movaps %xmm8, %xmm0
6705 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6706 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm1[0,3]
6707 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
6708 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,2],xmm0[2,0]
6709 ; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6710 ; SSE-NEXT: movaps %xmm8, %xmm0
6711 ; SSE-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
6712 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
6713 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[1,1],xmm1[1,1]
6714 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,2],xmm0[0,1]
6715 ; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6716 ; SSE-NEXT: movaps %xmm3, %xmm9
6717 ; SSE-NEXT: unpckhps {{.*#+}} xmm9 = xmm9[2],xmm4[2],xmm9[3],xmm4[3]
6718 ; SSE-NEXT: shufps {{.*#+}} xmm9 = xmm9[0,1],xmm7[2,0]
6719 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,3],xmm2[3,3]
6720 ; SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[3,1],xmm1[2,3]
6721 ; SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[2,0],xmm3[2,0]
6722 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6723 ; SSE-NEXT: movaps %xmm0, %xmm7
6724 ; SSE-NEXT: unpckhps {{.*#+}} xmm7 = xmm7[2],xmm6[2],xmm7[3],xmm6[3]
6725 ; SSE-NEXT: movlhps {{.*#+}} xmm6 = xmm6[0],xmm0[0]
6726 ; SSE-NEXT: movaps %xmm0, %xmm4
6727 ; SSE-NEXT: movaps 240(%rdi), %xmm3
6728 ; SSE-NEXT: movaps %xmm3, %xmm5
6729 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
6730 ; SSE-NEXT: unpcklps {{.*#+}} xmm5 = xmm5[0],xmm2[0],xmm5[1],xmm2[1]
6731 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,1],xmm6[2,0]
6732 ; SSE-NEXT: movaps %xmm3, %xmm6
6733 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6734 ; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[1,1],xmm1[0,3]
6735 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6736 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm6[2,0]
6737 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6738 ; SSE-NEXT: movaps %xmm3, %xmm6
6739 ; SSE-NEXT: unpckhps {{.*#+}} xmm6 = xmm6[2],xmm2[2],xmm6[3],xmm2[3]
6740 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6741 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm1[1,1]
6742 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm6[0,1]
6743 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6744 ; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,1],xmm11[2,0]
6745 ; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[3,3],xmm2[3,3]
6746 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1],xmm1[2,3]
6747 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[2,0],xmm4[2,0]
6748 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
6749 ; SSE-NEXT: shufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
6750 ; SSE-NEXT: # xmm4 = xmm4[3,3],mem[3,3]
6751 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6752 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,3],xmm4[2,0]
6753 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
6754 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm11[2,3,2,3]
6755 ; SSE-NEXT: movss {{.*#+}} xmm0 = xmm4[0],xmm0[1,2,3]
6756 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6757 ; SSE-NEXT: shufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Folded Reload
6758 ; SSE-NEXT: # xmm11 = xmm11[3,3],mem[3,3]
6759 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6760 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,3],xmm11[2,0]
6761 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
6762 ; SSE-NEXT: # xmm4 = mem[3,3,3,3]
6763 ; SSE-NEXT: movss {{.*#+}} xmm0 = xmm4[0],xmm0[1,2,3]
6764 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6765 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6766 ; SSE-NEXT: shufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
6767 ; SSE-NEXT: # xmm1 = xmm1[3,3],mem[3,3]
6768 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6769 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,3],xmm1[2,0]
6770 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
6771 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm6[2,3,2,3]
6772 ; SSE-NEXT: movss {{.*#+}} xmm0 = xmm4[0],xmm0[1,2,3]
6773 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6774 ; SSE-NEXT: shufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
6775 ; SSE-NEXT: # xmm6 = xmm6[3,3],mem[3,3]
6776 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6777 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,3],xmm6[2,0]
6778 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
6779 ; SSE-NEXT: # xmm4 = mem[3,3,3,3]
6780 ; SSE-NEXT: movss {{.*#+}} xmm0 = xmm4[0],xmm0[1,2,3]
6781 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6782 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
6783 ; SSE-NEXT: shufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
6784 ; SSE-NEXT: # xmm4 = xmm4[3,3],mem[3,3]
6785 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6786 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,3],xmm4[2,0]
6787 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
6788 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm6[2,3,2,3]
6789 ; SSE-NEXT: movss {{.*#+}} xmm0 = xmm4[0],xmm0[1,2,3]
6790 ; SSE-NEXT: movaps %xmm0, %xmm2
6791 ; SSE-NEXT: shufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
6792 ; SSE-NEXT: # xmm6 = xmm6[3,3],mem[3,3]
6793 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6794 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,3],xmm6[2,0]
6795 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
6796 ; SSE-NEXT: # xmm4 = mem[3,3,3,3]
6797 ; SSE-NEXT: movss {{.*#+}} xmm0 = xmm4[0],xmm0[1,2,3]
6798 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6799 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6800 ; SSE-NEXT: shufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
6801 ; SSE-NEXT: # xmm0 = xmm0[3,3],mem[3,3]
6802 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6803 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm0[2,0]
6804 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
6805 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm6[2,3,2,3]
6806 ; SSE-NEXT: movss {{.*#+}} xmm1 = xmm4[0],xmm1[1,2,3]
6807 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6808 ; SSE-NEXT: shufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
6809 ; SSE-NEXT: # xmm6 = xmm6[3,3],mem[3,3]
6810 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6811 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,3],xmm6[2,0]
6812 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
6813 ; SSE-NEXT: # xmm4 = mem[3,3,3,3]
6814 ; SSE-NEXT: movss {{.*#+}} xmm0 = xmm4[0],xmm0[1,2,3]
6815 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6816 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
6817 ; SSE-NEXT: shufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
6818 ; SSE-NEXT: # xmm4 = xmm4[3,3],mem[3,3]
6819 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6820 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,3],xmm4[2,0]
6821 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
6822 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm6[2,3,2,3]
6823 ; SSE-NEXT: movss {{.*#+}} xmm0 = xmm4[0],xmm0[1,2,3]
6824 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6825 ; SSE-NEXT: shufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
6826 ; SSE-NEXT: # xmm6 = xmm6[3,3],mem[3,3]
6827 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6828 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,3],xmm6[2,0]
6829 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
6830 ; SSE-NEXT: # xmm4 = mem[3,3,3,3]
6831 ; SSE-NEXT: movss {{.*#+}} xmm0 = xmm4[0],xmm0[1,2,3]
6832 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6833 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6834 ; SSE-NEXT: shufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
6835 ; SSE-NEXT: # xmm0 = xmm0[3,3],mem[3,3]
6836 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6837 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm0[2,0]
6838 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
6839 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm6[2,3,2,3]
6840 ; SSE-NEXT: movss {{.*#+}} xmm1 = xmm4[0],xmm1[1,2,3]
6841 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6842 ; SSE-NEXT: shufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
6843 ; SSE-NEXT: # xmm6 = xmm6[3,3],mem[3,3]
6844 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6845 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,3],xmm6[2,0]
6846 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
6847 ; SSE-NEXT: # xmm4 = mem[3,3,3,3]
6848 ; SSE-NEXT: movss {{.*#+}} xmm0 = xmm4[0],xmm0[1,2,3]
6849 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6850 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
6851 ; SSE-NEXT: shufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
6852 ; SSE-NEXT: # xmm4 = xmm4[3,3],mem[3,3]
6853 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6854 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm4[2,0]
6855 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6856 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm0[2,3,2,3]
6857 ; SSE-NEXT: movss {{.*#+}} xmm1 = xmm4[0],xmm1[1,2,3]
6858 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6859 ; SSE-NEXT: shufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
6860 ; SSE-NEXT: # xmm0 = xmm0[3,3],mem[3,3]
6861 ; SSE-NEXT: movaps (%rsp), %xmm1 # 16-byte Reload
6862 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm0[2,0]
6863 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
6864 ; SSE-NEXT: # xmm4 = mem[3,3,3,3]
6865 ; SSE-NEXT: movss {{.*#+}} xmm1 = xmm4[0],xmm1[1,2,3]
6866 ; SSE-NEXT: movaps %xmm1, (%rsp) # 16-byte Spill
6867 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6868 ; SSE-NEXT: shufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
6869 ; SSE-NEXT: # xmm0 = xmm0[3,3],mem[3,3]
6870 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6871 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm0[2,0]
6872 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6873 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm0[2,3,2,3]
6874 ; SSE-NEXT: movss {{.*#+}} xmm1 = xmm4[0],xmm1[1,2,3]
6875 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6876 ; SSE-NEXT: shufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
6877 ; SSE-NEXT: # xmm0 = xmm0[3,3],mem[3,3]
6878 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6879 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm0[2,0]
6880 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
6881 ; SSE-NEXT: # xmm4 = mem[3,3,3,3]
6882 ; SSE-NEXT: movss {{.*#+}} xmm1 = xmm4[0],xmm1[1,2,3]
6883 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6884 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6885 ; SSE-NEXT: shufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
6886 ; SSE-NEXT: # xmm0 = xmm0[3,3],mem[3,3]
6887 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6888 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm0[2,0]
6889 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6890 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm0[2,3,2,3]
6891 ; SSE-NEXT: movss {{.*#+}} xmm1 = xmm4[0],xmm1[1,2,3]
6892 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6893 ; SSE-NEXT: shufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
6894 ; SSE-NEXT: # xmm0 = xmm0[3,3],mem[3,3]
6895 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6896 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm0[2,0]
6897 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
6898 ; SSE-NEXT: # xmm4 = mem[3,3,3,3]
6899 ; SSE-NEXT: movss {{.*#+}} xmm1 = xmm4[0],xmm1[1,2,3]
6900 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6901 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6902 ; SSE-NEXT: shufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
6903 ; SSE-NEXT: # xmm0 = xmm0[3,3],mem[3,3]
6904 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6905 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm0[2,0]
6906 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6907 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm0[2,3,2,3]
6908 ; SSE-NEXT: movss {{.*#+}} xmm1 = xmm4[0],xmm1[1,2,3]
6909 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6910 ; SSE-NEXT: shufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
6911 ; SSE-NEXT: # xmm0 = xmm0[3,3],mem[3,3]
6912 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6913 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm0[2,0]
6914 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
6915 ; SSE-NEXT: # xmm4 = mem[3,3,3,3]
6916 ; SSE-NEXT: movss {{.*#+}} xmm1 = xmm4[0],xmm1[1,2,3]
6917 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6918 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6919 ; SSE-NEXT: shufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
6920 ; SSE-NEXT: # xmm0 = xmm0[3,3],mem[3,3]
6921 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6922 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm0[2,0]
6923 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6924 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm0[2,3,2,3]
6925 ; SSE-NEXT: movss {{.*#+}} xmm1 = xmm4[0],xmm1[1,2,3]
6926 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6927 ; SSE-NEXT: shufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
6928 ; SSE-NEXT: # xmm0 = xmm0[3,3],mem[3,3]
6929 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6930 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm0[2,0]
6931 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
6932 ; SSE-NEXT: # xmm4 = mem[3,3,3,3]
6933 ; SSE-NEXT: movss {{.*#+}} xmm1 = xmm4[0],xmm1[1,2,3]
6934 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6935 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6936 ; SSE-NEXT: shufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
6937 ; SSE-NEXT: # xmm0 = xmm0[3,3],mem[3,3]
6938 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6939 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm0[2,0]
6940 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6941 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm0[2,3,2,3]
6942 ; SSE-NEXT: movss {{.*#+}} xmm1 = xmm4[0],xmm1[1,2,3]
6943 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6944 ; SSE-NEXT: shufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
6945 ; SSE-NEXT: # xmm0 = xmm0[3,3],mem[3,3]
6946 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
6947 ; SSE-NEXT: shufps {{.*#+}} xmm11 = xmm11[0,3],xmm0[2,0]
6948 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
6949 ; SSE-NEXT: # xmm4 = mem[3,3,3,3]
6950 ; SSE-NEXT: movss {{.*#+}} xmm11 = xmm4[0],xmm11[1,2,3]
6951 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
6952 ; SSE-NEXT: shufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
6953 ; SSE-NEXT: # xmm4 = xmm4[3,3],mem[3,3]
6954 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
6955 ; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,3],xmm4[2,0]
6956 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6957 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm1[2,3,2,3]
6958 ; SSE-NEXT: movss {{.*#+}} xmm6 = xmm4[0],xmm6[1,2,3]
6959 ; SSE-NEXT: shufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
6960 ; SSE-NEXT: # xmm1 = xmm1[3,3],mem[3,3]
6961 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6962 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,3],xmm1[2,0]
6963 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
6964 ; SSE-NEXT: # xmm4 = mem[3,3,3,3]
6965 ; SSE-NEXT: movss {{.*#+}} xmm0 = xmm4[0],xmm0[1,2,3]
6966 ; SSE-NEXT: movaps %xmm0, %xmm4
6967 ; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax
6968 ; SSE-NEXT: movaps %xmm3, 1760(%rax)
6969 ; SSE-NEXT: movaps %xmm7, 1744(%rax)
6970 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6971 ; SSE-NEXT: movaps %xmm0, 1728(%rax)
6972 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6973 ; SSE-NEXT: movaps %xmm1, 1696(%rax)
6974 ; SSE-NEXT: movaps %xmm5, 1680(%rax)
6975 ; SSE-NEXT: movaps %xmm8, 1648(%rax)
6976 ; SSE-NEXT: movaps %xmm9, 1632(%rax)
6977 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6978 ; SSE-NEXT: movaps %xmm0, 1616(%rax)
6979 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6980 ; SSE-NEXT: movaps %xmm1, 1584(%rax)
6981 ; SSE-NEXT: movaps %xmm10, 1568(%rax)
6982 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6983 ; SSE-NEXT: movaps %xmm0, 1536(%rax)
6984 ; SSE-NEXT: movaps %xmm13, 1520(%rax)
6985 ; SSE-NEXT: movaps %xmm12, 1472(%rax)
6986 ; SSE-NEXT: movaps %xmm14, 1456(%rax)
6987 ; SSE-NEXT: movaps %xmm15, 1408(%rax)
6988 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6989 ; SSE-NEXT: movaps %xmm0, 1360(%rax)
6990 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6991 ; SSE-NEXT: movaps %xmm0, 1344(%rax)
6992 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6993 ; SSE-NEXT: movaps %xmm0, 1296(%rax)
6994 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6995 ; SSE-NEXT: movaps %xmm0, 1248(%rax)
6996 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6997 ; SSE-NEXT: movaps %xmm0, 1232(%rax)
6998 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6999 ; SSE-NEXT: movaps %xmm0, 1184(%rax)
7000 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7001 ; SSE-NEXT: movaps %xmm0, 1136(%rax)
7002 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7003 ; SSE-NEXT: movaps %xmm0, 1120(%rax)
7004 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
7005 ; SSE-NEXT: movaps %xmm1, 1072(%rax)
7006 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
7007 ; SSE-NEXT: movaps %xmm1, 1024(%rax)
7008 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
7009 ; SSE-NEXT: movaps %xmm1, 1008(%rax)
7010 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
7011 ; SSE-NEXT: movaps %xmm1, 960(%rax)
7012 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
7013 ; SSE-NEXT: movaps %xmm1, 912(%rax)
7014 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
7015 ; SSE-NEXT: movaps %xmm1, 896(%rax)
7016 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
7017 ; SSE-NEXT: movaps %xmm1, 848(%rax)
7018 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
7019 ; SSE-NEXT: movaps %xmm1, 800(%rax)
7020 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
7021 ; SSE-NEXT: movaps %xmm1, 784(%rax)
7022 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
7023 ; SSE-NEXT: movaps %xmm1, 736(%rax)
7024 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
7025 ; SSE-NEXT: movaps %xmm1, 688(%rax)
7026 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
7027 ; SSE-NEXT: movaps %xmm1, 672(%rax)
7028 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
7029 ; SSE-NEXT: movaps %xmm1, 624(%rax)
7030 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
7031 ; SSE-NEXT: movaps %xmm1, 576(%rax)
7032 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
7033 ; SSE-NEXT: movaps %xmm1, 560(%rax)
7034 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
7035 ; SSE-NEXT: movaps %xmm1, 512(%rax)
7036 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
7037 ; SSE-NEXT: movaps %xmm1, 464(%rax)
7038 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
7039 ; SSE-NEXT: movaps %xmm1, 448(%rax)
7040 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
7041 ; SSE-NEXT: movaps %xmm1, 400(%rax)
7042 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
7043 ; SSE-NEXT: movaps %xmm1, 352(%rax)
7044 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
7045 ; SSE-NEXT: movaps %xmm1, 336(%rax)
7046 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
7047 ; SSE-NEXT: movaps %xmm1, 288(%rax)
7048 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
7049 ; SSE-NEXT: movaps %xmm1, 240(%rax)
7050 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
7051 ; SSE-NEXT: movaps %xmm1, 224(%rax)
7052 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
7053 ; SSE-NEXT: movaps %xmm1, 176(%rax)
7054 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
7055 ; SSE-NEXT: movaps %xmm1, 128(%rax)
7056 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
7057 ; SSE-NEXT: movaps %xmm1, 112(%rax)
7058 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
7059 ; SSE-NEXT: movaps %xmm1, 64(%rax)
7060 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
7061 ; SSE-NEXT: movaps %xmm1, 16(%rax)
7062 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
7063 ; SSE-NEXT: movaps %xmm1, (%rax)
7064 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
7065 ; SSE-NEXT: movaps %xmm1, 1776(%rax)
7066 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
7067 ; SSE-NEXT: movaps %xmm1, 1712(%rax)
7068 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
7069 ; SSE-NEXT: movaps %xmm1, 1664(%rax)
7070 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
7071 ; SSE-NEXT: movaps %xmm1, 1600(%rax)
7072 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
7073 ; SSE-NEXT: movaps %xmm1, 1552(%rax)
7074 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
7075 ; SSE-NEXT: movaps %xmm1, 1504(%rax)
7076 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
7077 ; SSE-NEXT: movaps %xmm1, 1488(%rax)
7078 ; SSE-NEXT: movaps %xmm4, 1440(%rax)
7079 ; SSE-NEXT: movaps %xmm6, 1424(%rax)
7080 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7081 ; SSE-NEXT: movaps %xmm0, 1392(%rax)
7082 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7083 ; SSE-NEXT: movaps %xmm0, 1376(%rax)
7084 ; SSE-NEXT: movaps %xmm11, 1328(%rax)
7085 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7086 ; SSE-NEXT: movaps %xmm0, 1312(%rax)
7087 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7088 ; SSE-NEXT: movaps %xmm0, 1280(%rax)
7089 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7090 ; SSE-NEXT: movaps %xmm0, 1264(%rax)
7091 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7092 ; SSE-NEXT: movaps %xmm0, 1216(%rax)
7093 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7094 ; SSE-NEXT: movaps %xmm0, 1200(%rax)
7095 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7096 ; SSE-NEXT: movaps %xmm0, 1168(%rax)
7097 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7098 ; SSE-NEXT: movaps %xmm0, 1152(%rax)
7099 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7100 ; SSE-NEXT: movaps %xmm0, 1104(%rax)
7101 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7102 ; SSE-NEXT: movaps %xmm0, 1088(%rax)
7103 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7104 ; SSE-NEXT: movaps %xmm0, 1056(%rax)
7105 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7106 ; SSE-NEXT: movaps %xmm0, 1040(%rax)
7107 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7108 ; SSE-NEXT: movaps %xmm0, 992(%rax)
7109 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7110 ; SSE-NEXT: movaps %xmm0, 976(%rax)
7111 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7112 ; SSE-NEXT: movaps %xmm0, 944(%rax)
7113 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7114 ; SSE-NEXT: movaps %xmm0, 928(%rax)
7115 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7116 ; SSE-NEXT: movaps %xmm0, 880(%rax)
7117 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7118 ; SSE-NEXT: movaps %xmm0, 864(%rax)
7119 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7120 ; SSE-NEXT: movaps %xmm0, 832(%rax)
7121 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7122 ; SSE-NEXT: movaps %xmm0, 816(%rax)
7123 ; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
7124 ; SSE-NEXT: movaps %xmm0, 768(%rax)
7125 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7126 ; SSE-NEXT: movaps %xmm0, 752(%rax)
7127 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7128 ; SSE-NEXT: movaps %xmm0, 720(%rax)
7129 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7130 ; SSE-NEXT: movaps %xmm0, 704(%rax)
7131 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7132 ; SSE-NEXT: movaps %xmm0, 656(%rax)
7133 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7134 ; SSE-NEXT: movaps %xmm0, 640(%rax)
7135 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7136 ; SSE-NEXT: movaps %xmm0, 608(%rax)
7137 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7138 ; SSE-NEXT: movaps %xmm0, 592(%rax)
7139 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7140 ; SSE-NEXT: movaps %xmm0, 544(%rax)
7141 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7142 ; SSE-NEXT: movaps %xmm0, 528(%rax)
7143 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7144 ; SSE-NEXT: movaps %xmm0, 496(%rax)
7145 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7146 ; SSE-NEXT: movaps %xmm0, 480(%rax)
7147 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7148 ; SSE-NEXT: movaps %xmm0, 432(%rax)
7149 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7150 ; SSE-NEXT: movaps %xmm0, 416(%rax)
7151 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7152 ; SSE-NEXT: movaps %xmm0, 384(%rax)
7153 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7154 ; SSE-NEXT: movaps %xmm0, 368(%rax)
7155 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7156 ; SSE-NEXT: movaps %xmm0, 320(%rax)
7157 ; SSE-NEXT: movaps %xmm2, 304(%rax)
7158 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7159 ; SSE-NEXT: movaps %xmm0, 272(%rax)
7160 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7161 ; SSE-NEXT: movaps %xmm0, 256(%rax)
7162 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7163 ; SSE-NEXT: movaps %xmm0, 208(%rax)
7164 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7165 ; SSE-NEXT: movaps %xmm0, 192(%rax)
7166 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7167 ; SSE-NEXT: movaps %xmm0, 160(%rax)
7168 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7169 ; SSE-NEXT: movaps %xmm0, 144(%rax)
7170 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7171 ; SSE-NEXT: movaps %xmm0, 96(%rax)
7172 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7173 ; SSE-NEXT: movaps %xmm0, 80(%rax)
7174 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7175 ; SSE-NEXT: movaps %xmm0, 48(%rax)
7176 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7177 ; SSE-NEXT: movaps %xmm0, 32(%rax)
7178 ; SSE-NEXT: addq $2760, %rsp # imm = 0xAC8
7181 ; AVX1-ONLY-LABEL: store_i32_stride7_vf64:
7182 ; AVX1-ONLY: # %bb.0:
7183 ; AVX1-ONLY-NEXT: subq $3432, %rsp # imm = 0xD68
7184 ; AVX1-ONLY-NEXT: movq {{[0-9]+}}(%rsp), %rax
7185 ; AVX1-ONLY-NEXT: vmovaps 224(%rdi), %ymm1
7186 ; AVX1-ONLY-NEXT: vmovaps 224(%rsi), %ymm2
7187 ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7188 ; AVX1-ONLY-NEXT: vmovaps 224(%rdx), %ymm0
7189 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7190 ; AVX1-ONLY-NEXT: vmovaps 224(%rcx), %ymm5
7191 ; AVX1-ONLY-NEXT: vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7192 ; AVX1-ONLY-NEXT: vmovaps 224(%r8), %ymm4
7193 ; AVX1-ONLY-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7194 ; AVX1-ONLY-NEXT: vmovaps 224(%rax), %ymm3
7195 ; AVX1-ONLY-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7196 ; AVX1-ONLY-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm5[2],ymm0[3],ymm5[3],ymm0[6],ymm5[6],ymm0[7],ymm5[7]
7197 ; AVX1-ONLY-NEXT: vmovaps %ymm1, %ymm5
7198 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7199 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} ymm1 = ymm2[1],ymm1[1],ymm2[3],ymm1[3]
7200 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm5[0,1],ymm1[2,0],ymm5[4,5],ymm1[6,4]
7201 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
7202 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm4[6,7]
7203 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = mem[2,3,2,3]
7204 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[1,1,2,2,5,5,6,6]
7205 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5,6],ymm1[7]
7206 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm3[2,3],ymm0[2,3]
7207 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4],ymm1[5],ymm0[6,7]
7208 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7209 ; AVX1-ONLY-NEXT: vmovaps (%rax), %xmm2
7210 ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7211 ; AVX1-ONLY-NEXT: vmovaps (%r9), %xmm3
7212 ; AVX1-ONLY-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7213 ; AVX1-ONLY-NEXT: vmovaps (%r8), %xmm4
7214 ; AVX1-ONLY-NEXT: vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7215 ; AVX1-ONLY-NEXT: vmovaps (%rdi), %xmm1
7216 ; AVX1-ONLY-NEXT: vmovaps (%rsi), %xmm5
7217 ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm0 = xmm5[0],xmm1[0]
7218 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,1]
7219 ; AVX1-ONLY-NEXT: vmovaps %xmm1, %xmm6
7220 ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7221 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
7222 ; AVX1-ONLY-NEXT: vmovaps (%rcx), %xmm7
7223 ; AVX1-ONLY-NEXT: vmovaps (%rdx), %xmm8
7224 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} xmm1 = xmm8[0],xmm7[0],xmm8[1],xmm7[1]
7225 ; AVX1-ONLY-NEXT: vmovaps %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7226 ; AVX1-ONLY-NEXT: vmovaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7227 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,1,0,1]
7228 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
7229 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} xmm1 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
7230 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
7231 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2
7232 ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[2],ymm2[2]
7233 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6],ymm0[7]
7234 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7235 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm3[1,1],xmm4[1,1]
7236 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
7237 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm2[5],ymm0[6,7]
7238 ; AVX1-ONLY-NEXT: vmovaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7239 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm6[1],xmm5[1]
7240 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm5[1,1],xmm1[0,2]
7241 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm1
7242 ; AVX1-ONLY-NEXT: vinsertps {{.*#+}} xmm2 = zero,xmm8[1],xmm7[1],zero
7243 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2],ymm1[3,4,5,6,7]
7244 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
7245 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7246 ; AVX1-ONLY-NEXT: vmovaps (%rdi), %ymm1
7247 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7248 ; AVX1-ONLY-NEXT: vmovaps (%rsi), %ymm0
7249 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7250 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[1,1],ymm1[1,1],ymm0[5,5],ymm1[5,5]
7251 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
7252 ; AVX1-ONLY-NEXT: vmovaps (%rdx), %ymm2
7253 ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7254 ; AVX1-ONLY-NEXT: vmovaps (%rcx), %ymm1
7255 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7256 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm2[1,1],ymm1[1,1],ymm2[5,5],ymm1[5,5]
7257 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6],ymm0[7]
7258 ; AVX1-ONLY-NEXT: vmovaps (%r8), %ymm2
7259 ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7260 ; AVX1-ONLY-NEXT: vmovaps (%r9), %ymm8
7261 ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm8[0],ymm2[0],ymm8[2],ymm2[2]
7262 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[2,0],ymm2[2,1],ymm1[6,4],ymm2[6,5]
7263 ; AVX1-ONLY-NEXT: vmovaps (%rax), %ymm2
7264 ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7265 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm2[2,3],ymm1[2,3]
7266 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm1, %xmm1
7267 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[2],ymm2[3]
7268 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6],ymm1[7]
7269 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7270 ; AVX1-ONLY-NEXT: vmovaps 32(%rdi), %xmm1
7271 ; AVX1-ONLY-NEXT: vmovaps 32(%rsi), %xmm6
7272 ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm0 = xmm6[0],xmm1[0]
7273 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,1]
7274 ; AVX1-ONLY-NEXT: vmovaps %xmm1, %xmm9
7275 ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7276 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
7277 ; AVX1-ONLY-NEXT: vmovaps 32(%rcx), %xmm3
7278 ; AVX1-ONLY-NEXT: vmovaps 32(%rdx), %xmm2
7279 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} xmm1 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
7280 ; AVX1-ONLY-NEXT: vmovaps %xmm3, %xmm10
7281 ; AVX1-ONLY-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7282 ; AVX1-ONLY-NEXT: vmovaps %xmm2, %xmm12
7283 ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7284 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,1,0,1]
7285 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
7286 ; AVX1-ONLY-NEXT: vmovaps 32(%r9), %xmm3
7287 ; AVX1-ONLY-NEXT: vmovaps %xmm3, (%rsp) # 16-byte Spill
7288 ; AVX1-ONLY-NEXT: vmovaps 32(%r8), %xmm4
7289 ; AVX1-ONLY-NEXT: vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7290 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} xmm1 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
7291 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
7292 ; AVX1-ONLY-NEXT: vmovaps 32(%rax), %xmm2
7293 ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7294 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2
7295 ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[2],ymm2[2]
7296 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6],ymm0[7]
7297 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7298 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm3[1,1],xmm4[1,1]
7299 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
7300 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm2[5],ymm0[6,7]
7301 ; AVX1-ONLY-NEXT: vmovaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7302 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm9[1],xmm6[1]
7303 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm6[1,1],xmm1[0,2]
7304 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm1
7305 ; AVX1-ONLY-NEXT: vinsertps {{.*#+}} xmm2 = zero,xmm12[1],xmm10[1],zero
7306 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2],ymm1[3,4,5,6,7]
7307 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
7308 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7309 ; AVX1-ONLY-NEXT: vmovaps 32(%rdi), %ymm0
7310 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7311 ; AVX1-ONLY-NEXT: vmovaps 32(%rsi), %ymm1
7312 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7313 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm1[1,1],ymm0[1,1],ymm1[5,5],ymm0[5,5]
7314 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
7315 ; AVX1-ONLY-NEXT: vmovaps 32(%rdx), %ymm1
7316 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7317 ; AVX1-ONLY-NEXT: vmovaps 32(%rcx), %ymm10
7318 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[1,1],ymm10[1,1],ymm1[5,5],ymm10[5,5]
7319 ; AVX1-ONLY-NEXT: vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7320 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6],ymm0[7]
7321 ; AVX1-ONLY-NEXT: vmovaps 32(%r8), %ymm2
7322 ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7323 ; AVX1-ONLY-NEXT: vmovaps 32(%r9), %ymm1
7324 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7325 ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[2],ymm2[2]
7326 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[2,0],ymm2[2,1],ymm1[6,4],ymm2[6,5]
7327 ; AVX1-ONLY-NEXT: vmovaps 32(%rax), %ymm2
7328 ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7329 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm2[2,3],ymm1[2,3]
7330 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm1, %xmm1
7331 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[2],ymm2[3]
7332 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6],ymm1[7]
7333 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7334 ; AVX1-ONLY-NEXT: vmovaps 64(%rdi), %xmm1
7335 ; AVX1-ONLY-NEXT: vmovaps 64(%rsi), %xmm6
7336 ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm0 = xmm6[0],xmm1[0]
7337 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,1]
7338 ; AVX1-ONLY-NEXT: vmovaps %xmm1, %xmm7
7339 ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7340 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
7341 ; AVX1-ONLY-NEXT: vmovaps 64(%rcx), %xmm3
7342 ; AVX1-ONLY-NEXT: vmovaps 64(%rdx), %xmm5
7343 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} xmm1 = xmm5[0],xmm3[0],xmm5[1],xmm3[1]
7344 ; AVX1-ONLY-NEXT: vmovaps %xmm3, %xmm9
7345 ; AVX1-ONLY-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7346 ; AVX1-ONLY-NEXT: vmovaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7347 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,1,0,1]
7348 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
7349 ; AVX1-ONLY-NEXT: vmovaps 64(%r9), %xmm3
7350 ; AVX1-ONLY-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7351 ; AVX1-ONLY-NEXT: vmovaps 64(%r8), %xmm4
7352 ; AVX1-ONLY-NEXT: vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7353 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} xmm1 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
7354 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
7355 ; AVX1-ONLY-NEXT: vmovaps 64(%rax), %xmm2
7356 ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7357 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2
7358 ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[2],ymm2[2]
7359 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6],ymm0[7]
7360 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7361 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm3[1,1],xmm4[1,1]
7362 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
7363 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm2[5],ymm0[6,7]
7364 ; AVX1-ONLY-NEXT: vmovaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7365 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm7[1],xmm6[1]
7366 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm6[1,1],xmm1[0,2]
7367 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm1
7368 ; AVX1-ONLY-NEXT: vinsertps {{.*#+}} xmm2 = zero,xmm5[1],xmm9[1],zero
7369 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2],ymm1[3,4,5,6,7]
7370 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
7371 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7372 ; AVX1-ONLY-NEXT: vmovaps 64(%rdi), %ymm1
7373 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7374 ; AVX1-ONLY-NEXT: vmovaps 64(%rsi), %ymm0
7375 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7376 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[1,1],ymm1[1,1],ymm0[5,5],ymm1[5,5]
7377 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
7378 ; AVX1-ONLY-NEXT: vmovaps 64(%rdx), %ymm2
7379 ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7380 ; AVX1-ONLY-NEXT: vmovaps 64(%rcx), %ymm1
7381 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7382 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm2[1,1],ymm1[1,1],ymm2[5,5],ymm1[5,5]
7383 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6],ymm0[7]
7384 ; AVX1-ONLY-NEXT: vmovaps 64(%r8), %ymm2
7385 ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7386 ; AVX1-ONLY-NEXT: vmovaps 64(%r9), %ymm1
7387 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7388 ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[2],ymm2[2]
7389 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[2,0],ymm2[2,1],ymm1[6,4],ymm2[6,5]
7390 ; AVX1-ONLY-NEXT: vmovaps 64(%rax), %ymm2
7391 ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7392 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm2[2,3],ymm1[2,3]
7393 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm1, %xmm1
7394 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[2],ymm2[3]
7395 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6],ymm1[7]
7396 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7397 ; AVX1-ONLY-NEXT: vmovaps 96(%rdi), %xmm1
7398 ; AVX1-ONLY-NEXT: vmovaps 96(%rsi), %xmm3
7399 ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm0 = xmm3[0],xmm1[0]
7400 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,1]
7401 ; AVX1-ONLY-NEXT: vmovaps %xmm1, %xmm5
7402 ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7403 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
7404 ; AVX1-ONLY-NEXT: vmovaps 96(%rcx), %xmm4
7405 ; AVX1-ONLY-NEXT: vmovaps 96(%rdx), %xmm2
7406 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} xmm1 = xmm2[0],xmm4[0],xmm2[1],xmm4[1]
7407 ; AVX1-ONLY-NEXT: vmovaps %xmm4, %xmm7
7408 ; AVX1-ONLY-NEXT: vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7409 ; AVX1-ONLY-NEXT: vmovaps %xmm2, %xmm9
7410 ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7411 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,1,0,1]
7412 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
7413 ; AVX1-ONLY-NEXT: vmovaps 96(%r9), %xmm4
7414 ; AVX1-ONLY-NEXT: vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7415 ; AVX1-ONLY-NEXT: vmovaps 96(%r8), %xmm6
7416 ; AVX1-ONLY-NEXT: vmovaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7417 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} xmm1 = xmm6[0],xmm4[0],xmm6[1],xmm4[1]
7418 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
7419 ; AVX1-ONLY-NEXT: vmovaps 96(%rax), %xmm2
7420 ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7421 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2
7422 ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[2],ymm2[2]
7423 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6],ymm0[7]
7424 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7425 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm4[1,1],xmm6[1,1]
7426 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
7427 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm2[5],ymm0[6,7]
7428 ; AVX1-ONLY-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7429 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm5[1],xmm3[1]
7430 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm3[1,1],xmm1[0,2]
7431 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm1
7432 ; AVX1-ONLY-NEXT: vinsertps {{.*#+}} xmm2 = zero,xmm9[1],xmm7[1],zero
7433 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2],ymm1[3,4,5,6,7]
7434 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
7435 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7436 ; AVX1-ONLY-NEXT: vmovaps 96(%rdi), %ymm0
7437 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7438 ; AVX1-ONLY-NEXT: vmovaps 96(%rsi), %ymm1
7439 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7440 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm1[1,1],ymm0[1,1],ymm1[5,5],ymm0[5,5]
7441 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
7442 ; AVX1-ONLY-NEXT: vmovaps 96(%rdx), %ymm2
7443 ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7444 ; AVX1-ONLY-NEXT: vmovaps 96(%rcx), %ymm1
7445 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7446 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm2[1,1],ymm1[1,1],ymm2[5,5],ymm1[5,5]
7447 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6],ymm0[7]
7448 ; AVX1-ONLY-NEXT: vmovaps 96(%r8), %ymm2
7449 ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7450 ; AVX1-ONLY-NEXT: vmovaps 96(%r9), %ymm14
7451 ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm14[0],ymm2[0],ymm14[2],ymm2[2]
7452 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[2,0],ymm2[2,1],ymm1[6,4],ymm2[6,5]
7453 ; AVX1-ONLY-NEXT: vmovaps 96(%rax), %ymm2
7454 ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7455 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm2[2,3],ymm1[2,3]
7456 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm1, %xmm1
7457 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[2],ymm2[3]
7458 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6],ymm1[7]
7459 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7460 ; AVX1-ONLY-NEXT: vmovaps 128(%rdi), %xmm1
7461 ; AVX1-ONLY-NEXT: vmovaps 128(%rsi), %xmm4
7462 ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm0 = xmm4[0],xmm1[0]
7463 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,1]
7464 ; AVX1-ONLY-NEXT: vmovaps %xmm1, %xmm5
7465 ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7466 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
7467 ; AVX1-ONLY-NEXT: vmovaps 128(%rcx), %xmm3
7468 ; AVX1-ONLY-NEXT: vmovaps 128(%rdx), %xmm2
7469 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} xmm1 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
7470 ; AVX1-ONLY-NEXT: vmovaps %xmm3, %xmm7
7471 ; AVX1-ONLY-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7472 ; AVX1-ONLY-NEXT: vmovaps %xmm2, %xmm9
7473 ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7474 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,1,0,1]
7475 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
7476 ; AVX1-ONLY-NEXT: vmovaps 128(%r9), %xmm3
7477 ; AVX1-ONLY-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7478 ; AVX1-ONLY-NEXT: vmovaps 128(%r8), %xmm6
7479 ; AVX1-ONLY-NEXT: vmovaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7480 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} xmm1 = xmm6[0],xmm3[0],xmm6[1],xmm3[1]
7481 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
7482 ; AVX1-ONLY-NEXT: vmovaps 128(%rax), %xmm2
7483 ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7484 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2
7485 ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[2],ymm2[2]
7486 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6],ymm0[7]
7487 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7488 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm3[1,1],xmm6[1,1]
7489 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
7490 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm2[5],ymm0[6,7]
7491 ; AVX1-ONLY-NEXT: vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7492 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm5[1],xmm4[1]
7493 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm4[1,1],xmm1[0,2]
7494 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm1
7495 ; AVX1-ONLY-NEXT: vinsertps {{.*#+}} xmm2 = zero,xmm9[1],xmm7[1],zero
7496 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2],ymm1[3,4,5,6,7]
7497 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
7498 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7499 ; AVX1-ONLY-NEXT: vmovaps 128(%rdi), %ymm1
7500 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7501 ; AVX1-ONLY-NEXT: vmovaps 128(%rsi), %ymm0
7502 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7503 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[1,1],ymm1[1,1],ymm0[5,5],ymm1[5,5]
7504 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
7505 ; AVX1-ONLY-NEXT: vmovaps 128(%rdx), %ymm9
7506 ; AVX1-ONLY-NEXT: vmovaps 128(%rcx), %ymm1
7507 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7508 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm9[1,1],ymm1[1,1],ymm9[5,5],ymm1[5,5]
7509 ; AVX1-ONLY-NEXT: vmovups %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7510 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6],ymm0[7]
7511 ; AVX1-ONLY-NEXT: vmovaps 128(%r8), %ymm2
7512 ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7513 ; AVX1-ONLY-NEXT: vmovaps 128(%r9), %ymm1
7514 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7515 ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[2],ymm2[2]
7516 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[2,0],ymm2[2,1],ymm1[6,4],ymm2[6,5]
7517 ; AVX1-ONLY-NEXT: vmovaps 128(%rax), %ymm2
7518 ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7519 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm2[2,3],ymm1[2,3]
7520 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm1, %xmm1
7521 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[2],ymm2[3]
7522 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6],ymm1[7]
7523 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7524 ; AVX1-ONLY-NEXT: vmovaps 160(%rdi), %xmm1
7525 ; AVX1-ONLY-NEXT: vmovaps 160(%rsi), %xmm6
7526 ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm0 = xmm6[0],xmm1[0]
7527 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,1]
7528 ; AVX1-ONLY-NEXT: vmovaps %xmm1, %xmm11
7529 ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7530 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
7531 ; AVX1-ONLY-NEXT: vmovaps 160(%rcx), %xmm7
7532 ; AVX1-ONLY-NEXT: vmovaps 160(%rdx), %xmm3
7533 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} xmm1 = xmm3[0],xmm7[0],xmm3[1],xmm7[1]
7534 ; AVX1-ONLY-NEXT: vmovaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7535 ; AVX1-ONLY-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7536 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,1,0,1]
7537 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
7538 ; AVX1-ONLY-NEXT: vmovaps 160(%r9), %xmm4
7539 ; AVX1-ONLY-NEXT: vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7540 ; AVX1-ONLY-NEXT: vmovaps 160(%r8), %xmm5
7541 ; AVX1-ONLY-NEXT: vmovaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7542 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} xmm1 = xmm5[0],xmm4[0],xmm5[1],xmm4[1]
7543 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
7544 ; AVX1-ONLY-NEXT: vmovaps 160(%rax), %xmm2
7545 ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7546 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2
7547 ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[2],ymm2[2]
7548 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6],ymm0[7]
7549 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7550 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm4[1,1],xmm5[1,1]
7551 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
7552 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm2[5],ymm0[6,7]
7553 ; AVX1-ONLY-NEXT: vmovaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7554 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm11[1],xmm6[1]
7555 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm6[1,1],xmm1[0,2]
7556 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm1
7557 ; AVX1-ONLY-NEXT: vinsertps {{.*#+}} xmm2 = zero,xmm3[1],xmm7[1],zero
7558 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2],ymm1[3,4,5,6,7]
7559 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
7560 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7561 ; AVX1-ONLY-NEXT: vmovaps 160(%rdi), %ymm15
7562 ; AVX1-ONLY-NEXT: vmovaps 160(%rsi), %ymm0
7563 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7564 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[1,1],ymm15[1,1],ymm0[5,5],ymm15[5,5]
7565 ; AVX1-ONLY-NEXT: vmovups %ymm15, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7566 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
7567 ; AVX1-ONLY-NEXT: vmovaps 160(%rdx), %ymm2
7568 ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7569 ; AVX1-ONLY-NEXT: vmovaps 160(%rcx), %ymm1
7570 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7571 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm2[1,1],ymm1[1,1],ymm2[5,5],ymm1[5,5]
7572 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6],ymm0[7]
7573 ; AVX1-ONLY-NEXT: vmovaps 160(%r8), %ymm11
7574 ; AVX1-ONLY-NEXT: vmovaps 160(%r9), %ymm13
7575 ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm13[0],ymm11[0],ymm13[2],ymm11[2]
7576 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[2,0],ymm11[2,1],ymm1[6,4],ymm11[6,5]
7577 ; AVX1-ONLY-NEXT: vmovups %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7578 ; AVX1-ONLY-NEXT: vmovaps 160(%rax), %ymm2
7579 ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7580 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm2[2,3],ymm1[2,3]
7581 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm1, %xmm1
7582 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[2],ymm2[3]
7583 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6],ymm1[7]
7584 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7585 ; AVX1-ONLY-NEXT: vmovaps 192(%rdi), %xmm1
7586 ; AVX1-ONLY-NEXT: vmovaps 192(%rsi), %xmm3
7587 ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm0 = xmm3[0],xmm1[0]
7588 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,1]
7589 ; AVX1-ONLY-NEXT: vmovaps %xmm1, %xmm12
7590 ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7591 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
7592 ; AVX1-ONLY-NEXT: vmovaps 192(%rcx), %xmm7
7593 ; AVX1-ONLY-NEXT: vmovaps 192(%rdx), %xmm6
7594 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} xmm1 = xmm6[0],xmm7[0],xmm6[1],xmm7[1]
7595 ; AVX1-ONLY-NEXT: vmovaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7596 ; AVX1-ONLY-NEXT: vmovaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7597 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,1,0,1]
7598 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
7599 ; AVX1-ONLY-NEXT: vmovaps 192(%r9), %xmm4
7600 ; AVX1-ONLY-NEXT: vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7601 ; AVX1-ONLY-NEXT: vmovaps 192(%r8), %xmm5
7602 ; AVX1-ONLY-NEXT: vmovaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7603 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} xmm1 = xmm5[0],xmm4[0],xmm5[1],xmm4[1]
7604 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
7605 ; AVX1-ONLY-NEXT: vmovaps 192(%rax), %xmm2
7606 ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7607 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2
7608 ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[2],ymm2[2]
7609 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6],ymm0[7]
7610 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7611 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm4[1,1],xmm5[1,1]
7612 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
7613 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm2[5],ymm0[6,7]
7614 ; AVX1-ONLY-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7615 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm12[1],xmm3[1]
7616 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm3[1,1],xmm1[0,2]
7617 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm1
7618 ; AVX1-ONLY-NEXT: vinsertps {{.*#+}} xmm2 = zero,xmm6[1],xmm7[1],zero
7619 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2],ymm1[3,4,5,6,7]
7620 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
7621 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7622 ; AVX1-ONLY-NEXT: vmovaps 192(%rdi), %ymm0
7623 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7624 ; AVX1-ONLY-NEXT: vmovaps 192(%rsi), %ymm7
7625 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm7[1,1],ymm0[1,1],ymm7[5,5],ymm0[5,5]
7626 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
7627 ; AVX1-ONLY-NEXT: vmovaps 192(%rdx), %ymm1
7628 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7629 ; AVX1-ONLY-NEXT: vmovaps 192(%rcx), %ymm2
7630 ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7631 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm2 = ymm1[1,1],ymm2[1,1],ymm1[5,5],ymm2[5,5]
7632 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm4 = ymm0[0,1,2,3,4],ymm2[5,6],ymm0[7]
7633 ; AVX1-ONLY-NEXT: vmovaps 192(%r8), %ymm1
7634 ; AVX1-ONLY-NEXT: vmovaps 192(%r9), %ymm2
7635 ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm12 = ymm2[0],ymm1[0],ymm2[2],ymm1[2]
7636 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm12 = ymm12[2,0],ymm1[2,1],ymm12[6,4],ymm1[6,5]
7637 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = mem[2,3],ymm12[2,3]
7638 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm12, %xmm12
7639 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm0 = ymm12[0],ymm0[0],ymm12[2],ymm0[3]
7640 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm4[3,4,5,6],ymm0[7]
7641 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7642 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
7643 ; AVX1-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
7644 ; AVX1-ONLY-NEXT: # ymm0 = ymm0[1],mem[1],ymm0[3],mem[3]
7645 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,1,2,0,4,5,6,4]
7646 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
7647 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
7648 ; AVX1-ONLY-NEXT: vunpckhps {{.*#+}} ymm4 = ymm3[2],ymm5[2],ymm3[3],ymm5[3],ymm3[6],ymm5[6],ymm3[7],ymm5[7]
7649 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm4[2,3]
7650 ; AVX1-ONLY-NEXT: vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7651 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
7652 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} ymm4 = ymm4[1],ymm8[1],ymm4[3],ymm8[3]
7653 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm4 = ymm8[1,1],ymm4[0,2],ymm8[5,5],ymm4[4,6]
7654 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm4[2,3,2,3]
7655 ; AVX1-ONLY-NEXT: vmovaps 16(%rax), %xmm12
7656 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0],ymm12[1],ymm4[2,3,4,5,6,7]
7657 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm4[0,1],ymm0[2,3,4,5],ymm4[6,7]
7658 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7659 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
7660 ; AVX1-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
7661 ; AVX1-ONLY-NEXT: # ymm0 = ymm0[1],mem[1],ymm0[3],mem[3]
7662 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,1,2,0,4,5,6,4]
7663 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
7664 ; AVX1-ONLY-NEXT: vunpckhps {{.*#+}} ymm4 = ymm3[2],ymm10[2],ymm3[3],ymm10[3],ymm3[6],ymm10[6],ymm3[7],ymm10[7]
7665 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm4[2,3]
7666 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
7667 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
7668 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} ymm4 = ymm4[1],ymm3[1],ymm4[3],ymm3[3]
7669 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm4 = ymm3[1,1],ymm4[0,2],ymm3[5,5],ymm4[4,6]
7670 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm4[2,3,2,3]
7671 ; AVX1-ONLY-NEXT: vmovaps 48(%rax), %xmm12
7672 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0],ymm12[1],ymm4[2,3,4,5,6,7]
7673 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm4[0,1],ymm0[2,3,4,5],ymm4[6,7]
7674 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7675 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
7676 ; AVX1-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
7677 ; AVX1-ONLY-NEXT: # ymm0 = ymm0[1],mem[1],ymm0[3],mem[3]
7678 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,1,2,0,4,5,6,4]
7679 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
7680 ; AVX1-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm4 # 32-byte Folded Reload
7681 ; AVX1-ONLY-NEXT: # ymm4 = ymm3[2],mem[2],ymm3[3],mem[3],ymm3[6],mem[6],ymm3[7],mem[7]
7682 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm4[2,3]
7683 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
7684 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
7685 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} ymm4 = ymm4[1],ymm3[1],ymm4[3],ymm3[3]
7686 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm4 = ymm3[1,1],ymm4[0,2],ymm3[5,5],ymm4[4,6]
7687 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm4[2,3,2,3]
7688 ; AVX1-ONLY-NEXT: vmovaps 80(%rax), %xmm12
7689 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0],ymm12[1],ymm4[2,3,4,5,6,7]
7690 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm4[0,1],ymm0[2,3,4,5],ymm4[6,7]
7691 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7692 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
7693 ; AVX1-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
7694 ; AVX1-ONLY-NEXT: # ymm0 = ymm0[1],mem[1],ymm0[3],mem[3]
7695 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,1,2,0,4,5,6,4]
7696 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
7697 ; AVX1-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm4 # 32-byte Folded Reload
7698 ; AVX1-ONLY-NEXT: # ymm4 = ymm10[2],mem[2],ymm10[3],mem[3],ymm10[6],mem[6],ymm10[7],mem[7]
7699 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm4[2,3]
7700 ; AVX1-ONLY-NEXT: vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7701 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
7702 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} ymm4 = ymm4[1],ymm14[1],ymm4[3],ymm14[3]
7703 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm4 = ymm14[1,1],ymm4[0,2],ymm14[5,5],ymm4[4,6]
7704 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm4[2,3,2,3]
7705 ; AVX1-ONLY-NEXT: vmovaps 112(%rax), %xmm12
7706 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0],ymm12[1],ymm4[2,3,4,5,6,7]
7707 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm4[0,1],ymm0[2,3,4,5],ymm4[6,7]
7708 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7709 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
7710 ; AVX1-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
7711 ; AVX1-ONLY-NEXT: # ymm0 = ymm0[1],mem[1],ymm0[3],mem[3]
7712 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,1,2,0,4,5,6,4]
7713 ; AVX1-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm4 # 32-byte Folded Reload
7714 ; AVX1-ONLY-NEXT: # ymm4 = ymm9[2],mem[2],ymm9[3],mem[3],ymm9[6],mem[6],ymm9[7],mem[7]
7715 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm4[2,3]
7716 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
7717 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
7718 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} ymm4 = ymm3[1],ymm6[1],ymm3[3],ymm6[3]
7719 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm4 = ymm6[1,1],ymm4[0,2],ymm6[5,5],ymm4[4,6]
7720 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm4[2,3,2,3]
7721 ; AVX1-ONLY-NEXT: vmovaps 144(%rax), %xmm12
7722 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0],ymm12[1],ymm4[2,3,4,5,6,7]
7723 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm4[0,1],ymm0[2,3,4,5],ymm4[6,7]
7724 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7725 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
7726 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm15[1],ymm0[3],ymm15[3]
7727 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,1,2,0,4,5,6,4]
7728 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
7729 ; AVX1-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm4 # 32-byte Folded Reload
7730 ; AVX1-ONLY-NEXT: # ymm4 = ymm9[2],mem[2],ymm9[3],mem[3],ymm9[6],mem[6],ymm9[7],mem[7]
7731 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm4[2,3]
7732 ; AVX1-ONLY-NEXT: vmovups %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7733 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} ymm4 = ymm11[1],ymm13[1],ymm11[3],ymm13[3]
7734 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm4 = ymm13[1,1],ymm4[0,2],ymm13[5,5],ymm4[4,6]
7735 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm4[2,3,2,3]
7736 ; AVX1-ONLY-NEXT: vmovaps 176(%rax), %xmm12
7737 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0],ymm12[1],ymm4[2,3,4,5,6,7]
7738 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm4[0,1],ymm0[2,3,4,5],ymm4[6,7]
7739 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7740 ; AVX1-ONLY-NEXT: vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7741 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
7742 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm7[1],ymm11[1],ymm7[3],ymm11[3]
7743 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,1,2,0,4,5,6,4]
7744 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
7745 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
7746 ; AVX1-ONLY-NEXT: vunpckhps {{.*#+}} ymm4 = ymm6[2],ymm8[2],ymm6[3],ymm8[3],ymm6[6],ymm8[6],ymm6[7],ymm8[7]
7747 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm4[2,3]
7748 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} ymm1 = ymm1[1],ymm2[1],ymm1[3],ymm2[3]
7749 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm2[1,1],ymm1[0,2],ymm2[5,5],ymm1[4,6]
7750 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[2,3,2,3]
7751 ; AVX1-ONLY-NEXT: vmovaps 208(%rax), %xmm2
7752 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7]
7753 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
7754 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7755 ; AVX1-ONLY-NEXT: vmovaps 224(%rsi), %xmm3
7756 ; AVX1-ONLY-NEXT: vmovaps 224(%rdi), %xmm2
7757 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm2[1],xmm3[1]
7758 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm3[1,1],xmm1[0,2]
7759 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm1
7760 ; AVX1-ONLY-NEXT: vmovaps 224(%rcx), %xmm4
7761 ; AVX1-ONLY-NEXT: vmovaps 224(%rdx), %xmm12
7762 ; AVX1-ONLY-NEXT: vinsertps {{.*#+}} xmm0 = zero,xmm12[1],xmm4[1],zero
7763 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm0[1,2],ymm1[3,4,5,6,7]
7764 ; AVX1-ONLY-NEXT: vbroadcastss 228(%r8), %ymm14
7765 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2],ymm14[3],ymm1[4,5,6,7]
7766 ; AVX1-ONLY-NEXT: vbroadcastss 228(%r9), %ymm14
7767 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm14[4,5],ymm1[6,7]
7768 ; AVX1-ONLY-NEXT: vinsertf128 $1, 224(%rax), %ymm0, %ymm0
7769 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3,4],ymm0[5],ymm1[6,7]
7770 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7771 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm4[1],xmm12[1]
7772 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,0],xmm12[2,3]
7773 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
7774 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm2[3,3],xmm3[3,3]
7775 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
7776 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6],ymm0[7]
7777 ; AVX1-ONLY-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
7778 ; AVX1-ONLY-NEXT: # ymm0 = ymm0[0,1],mem[2,3],ymm0[4,5,6,7]
7779 ; AVX1-ONLY-NEXT: vbroadcastss 232(%r9), %xmm1
7780 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3],ymm0[4,5,6,7]
7781 ; AVX1-ONLY-NEXT: vbroadcastss 232(%rax), %ymm1
7782 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4],ymm0[5,6,7]
7783 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7784 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm8[3,3],ymm6[3,3],ymm8[7,7],ymm6[7,7]
7785 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
7786 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm11[3,3],ymm7[3,3],ymm11[7,7],ymm7[7,7]
7787 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm1, %xmm1
7788 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
7789 ; AVX1-ONLY-NEXT: vbroadcastss 220(%r8), %ymm1
7790 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
7791 ; AVX1-ONLY-NEXT: vbroadcastss 220(%r9), %ymm1
7792 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
7793 ; AVX1-ONLY-NEXT: vbroadcastsd 216(%rax), %ymm1
7794 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5,6],ymm1[7]
7795 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7796 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} xmm0 = xmm12[0],xmm4[0],xmm12[1],xmm4[1]
7797 ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm1 = xmm3[0],xmm2[0]
7798 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm1[2,0],xmm2[2,1]
7799 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,1,0,1]
7800 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm2
7801 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1],ymm0[2,3],ymm2[4,5,6,7]
7802 ; AVX1-ONLY-NEXT: vinsertf128 $1, 224(%r8), %ymm1, %ymm1
7803 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
7804 ; AVX1-ONLY-NEXT: vbroadcastss 224(%r9), %ymm1
7805 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
7806 ; AVX1-ONLY-NEXT: vbroadcastss 224(%rax), %ymm1
7807 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6],ymm0[7]
7808 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7809 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
7810 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
7811 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm8[3,3],ymm3[3,3],ymm8[7,7],ymm3[7,7]
7812 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
7813 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
7814 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
7815 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm4[3,3],ymm2[3,3],ymm4[7,7],ymm2[7,7]
7816 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm1, %xmm1
7817 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
7818 ; AVX1-ONLY-NEXT: vbroadcastss 252(%r8), %ymm1
7819 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
7820 ; AVX1-ONLY-NEXT: vbroadcastss 252(%r9), %ymm1
7821 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
7822 ; AVX1-ONLY-NEXT: vbroadcastsd 248(%rax), %ymm1
7823 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5,6],ymm1[7]
7824 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7825 ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm3[0],ymm8[0],ymm3[2],ymm8[2]
7826 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm8[3,1],ymm0[0,2],ymm8[7,5],ymm0[4,6]
7827 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} ymm1 = ymm4[0],ymm2[0],ymm4[1],ymm2[1],ymm4[4],ymm2[4],ymm4[5],ymm2[5]
7828 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
7829 ; AVX1-ONLY-NEXT: vbroadcastss 236(%r8), %ymm1
7830 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4,5,6,7]
7831 ; AVX1-ONLY-NEXT: vbroadcastss 236(%r9), %xmm1
7832 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
7833 ; AVX1-ONLY-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
7834 ; AVX1-ONLY-NEXT: # ymm0 = ymm0[0,1,2],mem[3],ymm0[4,5,6,7]
7835 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7836 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm3[1,1],ymm8[1,1],ymm3[5,5],ymm8[5,5]
7837 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm2[1,1],ymm4[1,1],ymm2[5,5],ymm4[5,5]
7838 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[2,3,2,3]
7839 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5,6],ymm1[7]
7840 ; AVX1-ONLY-NEXT: vbroadcastsd 240(%r8), %ymm1
7841 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5,6],ymm1[7]
7842 ; AVX1-ONLY-NEXT: vbroadcastss 240(%r9), %xmm1
7843 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4,5,6,7]
7844 ; AVX1-ONLY-NEXT: vbroadcastss 240(%rax), %ymm1
7845 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4,5,6,7]
7846 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7847 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7848 ; AVX1-ONLY-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
7849 ; AVX1-ONLY-NEXT: # xmm0 = xmm0[3,3],mem[3,3]
7850 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
7851 ; AVX1-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8, %xmm1 # 16-byte Folded Reload
7852 ; AVX1-ONLY-NEXT: # xmm1 = xmm8[2],mem[2],xmm8[3],mem[3]
7853 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm8, %ymm1, %ymm1
7854 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
7855 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5,6],ymm1[7]
7856 ; AVX1-ONLY-NEXT: vpermilps $170, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
7857 ; AVX1-ONLY-NEXT: # xmm1 = mem[2,2,2,2]
7858 ; AVX1-ONLY-NEXT: vblendps $7, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
7859 ; AVX1-ONLY-NEXT: # xmm1 = mem[0,1,2],xmm1[3]
7860 ; AVX1-ONLY-NEXT: vbroadcastsd 8(%rax), %ymm2
7861 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
7862 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4],ymm0[5,6,7]
7863 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7864 ; AVX1-ONLY-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm0 # 32-byte Folded Reload
7865 ; AVX1-ONLY-NEXT: # ymm0 = ymm5[3,3],mem[3,3],ymm5[7,7],mem[7,7]
7866 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
7867 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
7868 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
7869 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[3,3],ymm6[3,3],ymm1[7,7],ymm6[7,7]
7870 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm1, %xmm1
7871 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
7872 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
7873 ; AVX1-ONLY-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
7874 ; AVX1-ONLY-NEXT: # ymm1 = ymm1[3,3],mem[3,3],ymm1[7,7],mem[7,7]
7875 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
7876 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm2[2,3],ymm1[1,2],ymm2[6,7],ymm1[5,6]
7877 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[2,3,2,3]
7878 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,2,3,1,4,6,7,5]
7879 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4],ymm1[5,6,7]
7880 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7881 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7882 ; AVX1-ONLY-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
7883 ; AVX1-ONLY-NEXT: # xmm0 = xmm0[3,3],mem[3,3]
7884 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
7885 ; AVX1-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm1 # 16-byte Folded Reload
7886 ; AVX1-ONLY-NEXT: # xmm1 = xmm2[2],mem[2],xmm2[3],mem[3]
7887 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
7888 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
7889 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5,6],ymm1[7]
7890 ; AVX1-ONLY-NEXT: vpermilps $170, (%rsp), %xmm1 # 16-byte Folded Reload
7891 ; AVX1-ONLY-NEXT: # xmm1 = mem[2,2,2,2]
7892 ; AVX1-ONLY-NEXT: vblendps $7, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
7893 ; AVX1-ONLY-NEXT: # xmm1 = mem[0,1,2],xmm1[3]
7894 ; AVX1-ONLY-NEXT: vbroadcastsd 40(%rax), %ymm2
7895 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
7896 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4],ymm0[5,6,7]
7897 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7898 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
7899 ; AVX1-ONLY-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
7900 ; AVX1-ONLY-NEXT: # ymm0 = ymm0[3,3],mem[3,3],ymm0[7,7],mem[7,7]
7901 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
7902 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
7903 ; AVX1-ONLY-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm1 # 32-byte Folded Reload
7904 ; AVX1-ONLY-NEXT: # ymm1 = ymm4[3,3],mem[3,3],ymm4[7,7],mem[7,7]
7905 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm1, %xmm1
7906 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
7907 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
7908 ; AVX1-ONLY-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
7909 ; AVX1-ONLY-NEXT: # ymm1 = ymm1[3,3],mem[3,3],ymm1[7,7],mem[7,7]
7910 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
7911 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm2[2,3],ymm1[1,2],ymm2[6,7],ymm1[5,6]
7912 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[2,3,2,3]
7913 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,2,3,1,4,6,7,5]
7914 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4],ymm1[5,6,7]
7915 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7916 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7917 ; AVX1-ONLY-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
7918 ; AVX1-ONLY-NEXT: # xmm0 = xmm0[3,3],mem[3,3]
7919 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
7920 ; AVX1-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm1 # 16-byte Folded Reload
7921 ; AVX1-ONLY-NEXT: # xmm1 = xmm2[2],mem[2],xmm2[3],mem[3]
7922 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
7923 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
7924 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5,6],ymm1[7]
7925 ; AVX1-ONLY-NEXT: vpermilps $170, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
7926 ; AVX1-ONLY-NEXT: # xmm1 = mem[2,2,2,2]
7927 ; AVX1-ONLY-NEXT: vblendps $7, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
7928 ; AVX1-ONLY-NEXT: # xmm1 = mem[0,1,2],xmm1[3]
7929 ; AVX1-ONLY-NEXT: vbroadcastsd 72(%rax), %ymm2
7930 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
7931 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4],ymm0[5,6,7]
7932 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7933 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
7934 ; AVX1-ONLY-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
7935 ; AVX1-ONLY-NEXT: # ymm0 = ymm0[3,3],mem[3,3],ymm0[7,7],mem[7,7]
7936 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
7937 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
7938 ; AVX1-ONLY-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
7939 ; AVX1-ONLY-NEXT: # ymm1 = ymm1[3,3],mem[3,3],ymm1[7,7],mem[7,7]
7940 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm1, %xmm1
7941 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
7942 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
7943 ; AVX1-ONLY-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
7944 ; AVX1-ONLY-NEXT: # ymm1 = ymm1[3,3],mem[3,3],ymm1[7,7],mem[7,7]
7945 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
7946 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm2[2,3],ymm1[1,2],ymm2[6,7],ymm1[5,6]
7947 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[2,3,2,3]
7948 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,2,3,1,4,6,7,5]
7949 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4],ymm1[5,6,7]
7950 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7951 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7952 ; AVX1-ONLY-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
7953 ; AVX1-ONLY-NEXT: # xmm0 = xmm0[3,3],mem[3,3]
7954 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
7955 ; AVX1-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm1 # 16-byte Folded Reload
7956 ; AVX1-ONLY-NEXT: # xmm1 = xmm2[2],mem[2],xmm2[3],mem[3]
7957 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
7958 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
7959 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5,6],ymm1[7]
7960 ; AVX1-ONLY-NEXT: vpermilps $170, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
7961 ; AVX1-ONLY-NEXT: # xmm1 = mem[2,2,2,2]
7962 ; AVX1-ONLY-NEXT: vblendps $7, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
7963 ; AVX1-ONLY-NEXT: # xmm1 = mem[0,1,2],xmm1[3]
7964 ; AVX1-ONLY-NEXT: vbroadcastsd 104(%rax), %ymm2
7965 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
7966 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4],ymm0[5,6,7]
7967 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7968 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
7969 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[3,3],ymm10[3,3],ymm0[7,7],ymm10[7,7]
7970 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
7971 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
7972 ; AVX1-ONLY-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
7973 ; AVX1-ONLY-NEXT: # ymm1 = ymm1[3,3],mem[3,3],ymm1[7,7],mem[7,7]
7974 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm1, %xmm1
7975 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
7976 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
7977 ; AVX1-ONLY-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
7978 ; AVX1-ONLY-NEXT: # ymm1 = ymm1[3,3],mem[3,3],ymm1[7,7],mem[7,7]
7979 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
7980 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm2[2,3],ymm1[1,2],ymm2[6,7],ymm1[5,6]
7981 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[2,3,2,3]
7982 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,2,3,1,4,6,7,5]
7983 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4],ymm1[5,6,7]
7984 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7985 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7986 ; AVX1-ONLY-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
7987 ; AVX1-ONLY-NEXT: # xmm0 = xmm0[3,3],mem[3,3]
7988 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
7989 ; AVX1-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm1 # 16-byte Folded Reload
7990 ; AVX1-ONLY-NEXT: # xmm1 = xmm2[2],mem[2],xmm2[3],mem[3]
7991 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
7992 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
7993 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5,6],ymm1[7]
7994 ; AVX1-ONLY-NEXT: vpermilps $170, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
7995 ; AVX1-ONLY-NEXT: # xmm1 = mem[2,2,2,2]
7996 ; AVX1-ONLY-NEXT: vblendps $7, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
7997 ; AVX1-ONLY-NEXT: # xmm1 = mem[0,1,2],xmm1[3]
7998 ; AVX1-ONLY-NEXT: vbroadcastsd 136(%rax), %ymm2
7999 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
8000 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4],ymm0[5,6,7]
8001 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8002 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
8003 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
8004 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm12[3,3],ymm15[3,3],ymm12[7,7],ymm15[7,7]
8005 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
8006 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
8007 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
8008 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm2[3,3],ymm11[3,3],ymm2[7,7],ymm11[7,7]
8009 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm1, %xmm1
8010 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
8011 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8012 ; AVX1-ONLY-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
8013 ; AVX1-ONLY-NEXT: # ymm1 = ymm1[3,3],mem[3,3],ymm1[7,7],mem[7,7]
8014 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
8015 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm3[2,3],ymm1[1,2],ymm3[6,7],ymm1[5,6]
8016 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[2,3,2,3]
8017 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,2,3,1,4,6,7,5]
8018 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4],ymm1[5,6,7]
8019 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8020 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
8021 ; AVX1-ONLY-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm1 # 16-byte Folded Reload
8022 ; AVX1-ONLY-NEXT: # xmm1 = xmm0[3,3],mem[3,3]
8023 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
8024 ; AVX1-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm3 # 16-byte Folded Reload
8025 ; AVX1-ONLY-NEXT: # xmm3 = xmm0[2],mem[2],xmm0[3],mem[3]
8026 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm3, %ymm3
8027 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
8028 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm3[0,1,2,3,4],ymm1[5,6],ymm3[7]
8029 ; AVX1-ONLY-NEXT: vpermilps $170, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
8030 ; AVX1-ONLY-NEXT: # xmm3 = mem[2,2,2,2]
8031 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
8032 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm3 = xmm13[0,1,2],xmm3[3]
8033 ; AVX1-ONLY-NEXT: vbroadcastsd 168(%rax), %ymm14
8034 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm14[4,5,6,7]
8035 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm3[2,3,4],ymm1[5,6,7]
8036 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8037 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
8038 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm10[3,3],ymm9[3,3],ymm10[7,7],ymm9[7,7]
8039 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[2,3,2,3]
8040 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
8041 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8042 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm3 = ymm0[3,3],ymm9[3,3],ymm0[7,7],ymm9[7,7]
8043 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm3, %xmm3
8044 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm3[0,1,2],ymm1[3,4,5,6,7]
8045 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
8046 ; AVX1-ONLY-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
8047 ; AVX1-ONLY-NEXT: # ymm3 = ymm3[3,3],mem[3,3],ymm3[7,7],mem[7,7]
8048 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
8049 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm3 = ymm5[2,3],ymm3[1,2],ymm5[6,7],ymm3[5,6]
8050 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm3 = ymm3[2,3,2,3]
8051 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm3 = ymm3[0,2,3,1,4,6,7,5]
8052 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm3[0],ymm1[1,2,3,4],ymm3[5,6,7]
8053 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8054 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
8055 ; AVX1-ONLY-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
8056 ; AVX1-ONLY-NEXT: # xmm1 = xmm1[3,3],mem[3,3]
8057 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
8058 ; AVX1-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5, %xmm3 # 16-byte Folded Reload
8059 ; AVX1-ONLY-NEXT: # xmm3 = xmm5[2],mem[2],xmm5[3],mem[3]
8060 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm5, %ymm3, %ymm3
8061 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
8062 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm3[0,1,2,3,4],ymm1[5,6],ymm3[7]
8063 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
8064 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm3 = xmm8[2,2,2,2]
8065 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
8066 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm3 = xmm7[0,1,2],xmm3[3]
8067 ; AVX1-ONLY-NEXT: vbroadcastsd 200(%rax), %ymm5
8068 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm5[4,5,6,7]
8069 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm3[2,3,4],ymm1[5,6,7]
8070 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8071 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8072 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} ymm1 = ymm1[0],ymm6[0],ymm1[1],ymm6[1],ymm1[4],ymm6[4],ymm1[5],ymm6[5]
8073 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
8074 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
8075 ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm3 = ymm3[0],ymm6[0],ymm3[2],ymm6[2]
8076 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm3 = ymm6[3,1],ymm3[0,2],ymm6[7,5],ymm3[4,6]
8077 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm1[4,5],ymm3[6,7]
8078 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
8079 ; AVX1-ONLY-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm3 # 16-byte Folded Reload
8080 ; AVX1-ONLY-NEXT: # xmm3 = xmm3[3,3],mem[3,3]
8081 ; AVX1-ONLY-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm3 # 16-byte Folded Reload
8082 ; AVX1-ONLY-NEXT: # xmm3 = xmm3[0,1,2],mem[3]
8083 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm14 = ymm1[0],ymm3[1,2,3],ymm1[4,5,6,7]
8084 ; AVX1-ONLY-NEXT: vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm1 # 32-byte Folded Reload
8085 ; AVX1-ONLY-NEXT: # ymm1 = ymm4[0],mem[0],ymm4[1],mem[1],ymm4[4],mem[4],ymm4[5],mem[5]
8086 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
8087 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
8088 ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm3 = ymm3[0],ymm4[0],ymm3[2],ymm4[2]
8089 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm3 = ymm4[3,1],ymm3[0,2],ymm4[7,5],ymm3[4,6]
8090 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm1[4,5],ymm3[6,7]
8091 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
8092 ; AVX1-ONLY-NEXT: vshufps $255, (%rsp), %xmm3, %xmm3 # 16-byte Folded Reload
8093 ; AVX1-ONLY-NEXT: # xmm3 = xmm3[3,3],mem[3,3]
8094 ; AVX1-ONLY-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm3 # 16-byte Folded Reload
8095 ; AVX1-ONLY-NEXT: # xmm3 = xmm3[0,1,2],mem[3]
8096 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm6 = ymm1[0],ymm3[1,2,3],ymm1[4,5,6,7]
8097 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8098 ; AVX1-ONLY-NEXT: vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
8099 ; AVX1-ONLY-NEXT: # ymm1 = ymm1[0],mem[0],ymm1[1],mem[1],ymm1[4],mem[4],ymm1[5],mem[5]
8100 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
8101 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
8102 ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm3 = ymm3[0],ymm4[0],ymm3[2],ymm4[2]
8103 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm3 = ymm4[3,1],ymm3[0,2],ymm4[7,5],ymm3[4,6]
8104 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm1[4,5],ymm3[6,7]
8105 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
8106 ; AVX1-ONLY-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm3 # 16-byte Folded Reload
8107 ; AVX1-ONLY-NEXT: # xmm3 = xmm3[3,3],mem[3,3]
8108 ; AVX1-ONLY-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm3 # 16-byte Folded Reload
8109 ; AVX1-ONLY-NEXT: # xmm3 = xmm3[0,1,2],mem[3]
8110 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm5 = ymm1[0],ymm3[1,2,3],ymm1[4,5,6,7]
8111 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8112 ; AVX1-ONLY-NEXT: vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
8113 ; AVX1-ONLY-NEXT: # ymm1 = ymm1[0],mem[0],ymm1[1],mem[1],ymm1[4],mem[4],ymm1[5],mem[5]
8114 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
8115 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
8116 ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm3 = ymm3[0],ymm4[0],ymm3[2],ymm4[2]
8117 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm3 = ymm4[3,1],ymm3[0,2],ymm4[7,5],ymm3[4,6]
8118 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm1[4,5],ymm3[6,7]
8119 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
8120 ; AVX1-ONLY-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm3 # 16-byte Folded Reload
8121 ; AVX1-ONLY-NEXT: # xmm3 = xmm3[3,3],mem[3,3]
8122 ; AVX1-ONLY-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm3 # 16-byte Folded Reload
8123 ; AVX1-ONLY-NEXT: # xmm3 = xmm3[0,1,2],mem[3]
8124 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm3[1,2,3],ymm1[4,5,6,7]
8125 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} ymm3 = ymm2[0],ymm11[0],ymm2[1],ymm11[1],ymm2[4],ymm11[4],ymm2[5],ymm11[5]
8126 ; AVX1-ONLY-NEXT: vmovaps %ymm12, %ymm2
8127 ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm12 = ymm15[0],ymm12[0],ymm15[2],ymm12[2]
8128 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm12 = ymm2[3,1],ymm12[0,2],ymm2[7,5],ymm12[4,6]
8129 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm3 = ymm12[0,1,2,3],ymm3[4,5],ymm12[6,7]
8130 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
8131 ; AVX1-ONLY-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm12 # 16-byte Folded Reload
8132 ; AVX1-ONLY-NEXT: # xmm12 = xmm2[3,3],mem[3,3]
8133 ; AVX1-ONLY-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm12, %xmm12 # 16-byte Folded Reload
8134 ; AVX1-ONLY-NEXT: # xmm12 = xmm12[0,1,2],mem[3]
8135 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0],ymm12[1,2,3],ymm3[4,5,6,7]
8136 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} ymm12 = ymm0[0],ymm9[0],ymm0[1],ymm9[1],ymm0[4],ymm9[4],ymm0[5],ymm9[5]
8137 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
8138 ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm15 = ymm2[0],ymm10[0],ymm2[2],ymm10[2]
8139 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm11 = ymm10[3,1],ymm15[0,2],ymm10[7,5],ymm15[4,6]
8140 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm11 = ymm11[0,1,2,3],ymm12[4,5],ymm11[6,7]
8141 ; AVX1-ONLY-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm13, %xmm12 # 16-byte Folded Reload
8142 ; AVX1-ONLY-NEXT: # xmm12 = xmm13[3,3],mem[3,3]
8143 ; AVX1-ONLY-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm12, %xmm12 # 16-byte Folded Reload
8144 ; AVX1-ONLY-NEXT: # xmm12 = xmm12[0,1,2],mem[3]
8145 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm11 = ymm11[0],ymm12[1,2,3],ymm11[4,5,6,7]
8146 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8147 ; AVX1-ONLY-NEXT: vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm12 # 32-byte Folded Reload
8148 ; AVX1-ONLY-NEXT: # ymm12 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5]
8149 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8150 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
8151 ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm13 = ymm2[0],ymm0[0],ymm2[2],ymm0[2]
8152 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm10 = ymm0[3,1],ymm13[0,2],ymm0[7,5],ymm13[4,6]
8153 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0,1,2,3],ymm12[4,5],ymm10[6,7]
8154 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm12 = xmm7[3,3],xmm8[3,3]
8155 ; AVX1-ONLY-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm12, %xmm12 # 16-byte Folded Reload
8156 ; AVX1-ONLY-NEXT: # xmm12 = xmm12[0,1,2],mem[3]
8157 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0],ymm12[1,2,3],ymm10[4,5,6,7]
8158 ; AVX1-ONLY-NEXT: movq {{[0-9]+}}(%rsp), %rax
8159 ; AVX1-ONLY-NEXT: vmovaps %ymm10, 1440(%rax)
8160 ; AVX1-ONLY-NEXT: vmovaps %ymm11, 1216(%rax)
8161 ; AVX1-ONLY-NEXT: vmovaps %ymm3, 992(%rax)
8162 ; AVX1-ONLY-NEXT: vmovaps %ymm1, 768(%rax)
8163 ; AVX1-ONLY-NEXT: vmovaps %ymm5, 544(%rax)
8164 ; AVX1-ONLY-NEXT: vmovaps %ymm6, 320(%rax)
8165 ; AVX1-ONLY-NEXT: vmovaps %ymm14, 96(%rax)
8166 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8167 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 1504(%rax)
8168 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8169 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 1408(%rax)
8170 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8171 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 1312(%rax)
8172 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8173 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 1280(%rax)
8174 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8175 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 1184(%rax)
8176 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8177 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 1088(%rax)
8178 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8179 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 1056(%rax)
8180 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8181 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 960(%rax)
8182 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8183 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 864(%rax)
8184 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8185 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 832(%rax)
8186 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8187 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 736(%rax)
8188 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8189 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 640(%rax)
8190 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8191 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 608(%rax)
8192 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8193 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 512(%rax)
8194 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8195 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 416(%rax)
8196 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8197 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 384(%rax)
8198 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8199 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 288(%rax)
8200 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8201 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 192(%rax)
8202 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8203 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 160(%rax)
8204 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8205 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 64(%rax)
8206 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8207 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 1696(%rax)
8208 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8209 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 1664(%rax)
8210 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8211 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 1472(%rax)
8212 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8213 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 1376(%rax)
8214 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8215 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 1344(%rax)
8216 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8217 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 1248(%rax)
8218 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8219 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 1152(%rax)
8220 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8221 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 1120(%rax)
8222 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8223 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 1024(%rax)
8224 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8225 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 928(%rax)
8226 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8227 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 896(%rax)
8228 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8229 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 800(%rax)
8230 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8231 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 704(%rax)
8232 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8233 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 672(%rax)
8234 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8235 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 576(%rax)
8236 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8237 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 480(%rax)
8238 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8239 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 448(%rax)
8240 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8241 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 352(%rax)
8242 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8243 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 256(%rax)
8244 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8245 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 224(%rax)
8246 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8247 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 128(%rax)
8248 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8249 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 32(%rax)
8250 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8251 ; AVX1-ONLY-NEXT: vmovaps %ymm0, (%rax)
8252 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8253 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 1760(%rax)
8254 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8255 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 1728(%rax)
8256 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8257 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 1568(%rax)
8258 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8259 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 1536(%rax)
8260 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8261 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 1632(%rax)
8262 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8263 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 1600(%rax)
8264 ; AVX1-ONLY-NEXT: addq $3432, %rsp # imm = 0xD68
8265 ; AVX1-ONLY-NEXT: vzeroupper
8266 ; AVX1-ONLY-NEXT: retq
8268 ; AVX2-SLOW-LABEL: store_i32_stride7_vf64:
8269 ; AVX2-SLOW: # %bb.0:
8270 ; AVX2-SLOW-NEXT: subq $2968, %rsp # imm = 0xB98
8271 ; AVX2-SLOW-NEXT: movq {{[0-9]+}}(%rsp), %rax
8272 ; AVX2-SLOW-NEXT: vmovaps (%rax), %xmm0
8273 ; AVX2-SLOW-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8274 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
8275 ; AVX2-SLOW-NEXT: vmovaps (%r8), %xmm13
8276 ; AVX2-SLOW-NEXT: vmovaps 32(%r8), %xmm4
8277 ; AVX2-SLOW-NEXT: vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8278 ; AVX2-SLOW-NEXT: vmovaps (%r9), %xmm1
8279 ; AVX2-SLOW-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8280 ; AVX2-SLOW-NEXT: vmovaps 32(%r9), %xmm5
8281 ; AVX2-SLOW-NEXT: vmovaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8282 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,1,1,1]
8283 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0],xmm13[1],xmm1[2,3]
8284 ; AVX2-SLOW-NEXT: vbroadcastsd %xmm1, %ymm1
8285 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5],ymm1[6,7]
8286 ; AVX2-SLOW-NEXT: vmovaps (%rcx), %xmm10
8287 ; AVX2-SLOW-NEXT: vmovaps 32(%rcx), %xmm3
8288 ; AVX2-SLOW-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8289 ; AVX2-SLOW-NEXT: vmovaps (%rdx), %xmm9
8290 ; AVX2-SLOW-NEXT: vinsertps {{.*#+}} xmm1 = zero,xmm9[1],xmm10[1],zero
8291 ; AVX2-SLOW-NEXT: vmovaps (%rdi), %xmm7
8292 ; AVX2-SLOW-NEXT: vmovaps 32(%rdi), %xmm8
8293 ; AVX2-SLOW-NEXT: vmovaps %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8294 ; AVX2-SLOW-NEXT: vmovaps (%rsi), %xmm6
8295 ; AVX2-SLOW-NEXT: vmovaps 32(%rsi), %xmm11
8296 ; AVX2-SLOW-NEXT: vmovaps %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8297 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm2 = xmm6[1,1,2,2]
8298 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm2 = xmm2[0,1],xmm7[2],xmm2[3]
8299 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,1,2,1]
8300 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0],ymm1[1,2],ymm2[3,4,5,6,7]
8301 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
8302 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8303 ; AVX2-SLOW-NEXT: vmovaps 32(%rax), %xmm0
8304 ; AVX2-SLOW-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8305 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
8306 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm5[1,1,1,1]
8307 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0],xmm4[1],xmm1[2,3]
8308 ; AVX2-SLOW-NEXT: vbroadcastsd %xmm1, %ymm1
8309 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5],ymm1[6,7]
8310 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm11[1,1,2,2]
8311 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm8[2],xmm1[3]
8312 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1]
8313 ; AVX2-SLOW-NEXT: vmovaps 32(%rdx), %xmm8
8314 ; AVX2-SLOW-NEXT: vinsertps {{.*#+}} xmm2 = zero,xmm8[1],xmm3[1],zero
8315 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2],ymm1[3,4,5,6,7]
8316 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
8317 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8318 ; AVX2-SLOW-NEXT: vmovaps 64(%r8), %xmm1
8319 ; AVX2-SLOW-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8320 ; AVX2-SLOW-NEXT: vmovaps 64(%r9), %xmm0
8321 ; AVX2-SLOW-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8322 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
8323 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
8324 ; AVX2-SLOW-NEXT: vbroadcastsd %xmm0, %ymm0
8325 ; AVX2-SLOW-NEXT: vmovaps 64(%rax), %xmm1
8326 ; AVX2-SLOW-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8327 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
8328 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
8329 ; AVX2-SLOW-NEXT: vmovaps 64(%rdi), %xmm2
8330 ; AVX2-SLOW-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8331 ; AVX2-SLOW-NEXT: vmovaps 64(%rsi), %xmm1
8332 ; AVX2-SLOW-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8333 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,1,2,2]
8334 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3]
8335 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1]
8336 ; AVX2-SLOW-NEXT: vmovaps 64(%rcx), %xmm3
8337 ; AVX2-SLOW-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8338 ; AVX2-SLOW-NEXT: vmovaps 64(%rdx), %xmm2
8339 ; AVX2-SLOW-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8340 ; AVX2-SLOW-NEXT: vinsertps {{.*#+}} xmm2 = zero,xmm2[1],xmm3[1],zero
8341 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2],ymm1[3,4,5,6,7]
8342 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
8343 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8344 ; AVX2-SLOW-NEXT: vmovaps 96(%r8), %xmm1
8345 ; AVX2-SLOW-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8346 ; AVX2-SLOW-NEXT: vmovaps 96(%r9), %xmm0
8347 ; AVX2-SLOW-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8348 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
8349 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
8350 ; AVX2-SLOW-NEXT: vbroadcastsd %xmm0, %ymm0
8351 ; AVX2-SLOW-NEXT: vmovaps 96(%rax), %xmm1
8352 ; AVX2-SLOW-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8353 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
8354 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
8355 ; AVX2-SLOW-NEXT: vmovaps 96(%rdi), %xmm2
8356 ; AVX2-SLOW-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8357 ; AVX2-SLOW-NEXT: vmovaps 96(%rsi), %xmm1
8358 ; AVX2-SLOW-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8359 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,1,2,2]
8360 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3]
8361 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1]
8362 ; AVX2-SLOW-NEXT: vmovaps 96(%rcx), %xmm3
8363 ; AVX2-SLOW-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8364 ; AVX2-SLOW-NEXT: vmovaps 96(%rdx), %xmm2
8365 ; AVX2-SLOW-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8366 ; AVX2-SLOW-NEXT: vinsertps {{.*#+}} xmm2 = zero,xmm2[1],xmm3[1],zero
8367 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2],ymm1[3,4,5,6,7]
8368 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
8369 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8370 ; AVX2-SLOW-NEXT: vmovaps 128(%r8), %xmm1
8371 ; AVX2-SLOW-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8372 ; AVX2-SLOW-NEXT: vmovaps 128(%r9), %xmm0
8373 ; AVX2-SLOW-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
8374 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
8375 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
8376 ; AVX2-SLOW-NEXT: vbroadcastsd %xmm0, %ymm0
8377 ; AVX2-SLOW-NEXT: vmovaps 128(%rax), %xmm1
8378 ; AVX2-SLOW-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8379 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
8380 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
8381 ; AVX2-SLOW-NEXT: vmovaps 128(%rdi), %xmm2
8382 ; AVX2-SLOW-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8383 ; AVX2-SLOW-NEXT: vmovaps 128(%rsi), %xmm1
8384 ; AVX2-SLOW-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8385 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,1,2,2]
8386 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3]
8387 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1]
8388 ; AVX2-SLOW-NEXT: vmovaps 128(%rcx), %xmm3
8389 ; AVX2-SLOW-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8390 ; AVX2-SLOW-NEXT: vmovaps 128(%rdx), %xmm2
8391 ; AVX2-SLOW-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8392 ; AVX2-SLOW-NEXT: vinsertps {{.*#+}} xmm2 = zero,xmm2[1],xmm3[1],zero
8393 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2],ymm1[3,4,5,6,7]
8394 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
8395 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8396 ; AVX2-SLOW-NEXT: vmovaps 160(%r8), %xmm1
8397 ; AVX2-SLOW-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8398 ; AVX2-SLOW-NEXT: vmovaps 160(%r9), %xmm0
8399 ; AVX2-SLOW-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8400 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
8401 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
8402 ; AVX2-SLOW-NEXT: vbroadcastsd %xmm0, %ymm0
8403 ; AVX2-SLOW-NEXT: vmovaps 160(%rax), %xmm1
8404 ; AVX2-SLOW-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8405 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
8406 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
8407 ; AVX2-SLOW-NEXT: vmovaps 160(%rdi), %xmm2
8408 ; AVX2-SLOW-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8409 ; AVX2-SLOW-NEXT: vmovaps 160(%rsi), %xmm1
8410 ; AVX2-SLOW-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8411 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,1,2,2]
8412 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3]
8413 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1]
8414 ; AVX2-SLOW-NEXT: vmovaps 160(%rcx), %xmm3
8415 ; AVX2-SLOW-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8416 ; AVX2-SLOW-NEXT: vmovaps 160(%rdx), %xmm2
8417 ; AVX2-SLOW-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8418 ; AVX2-SLOW-NEXT: vinsertps {{.*#+}} xmm2 = zero,xmm2[1],xmm3[1],zero
8419 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2],ymm1[3,4,5,6,7]
8420 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
8421 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8422 ; AVX2-SLOW-NEXT: vmovaps 192(%r9), %xmm0
8423 ; AVX2-SLOW-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8424 ; AVX2-SLOW-NEXT: vmovaps 192(%r8), %xmm1
8425 ; AVX2-SLOW-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8426 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
8427 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
8428 ; AVX2-SLOW-NEXT: vbroadcastsd %xmm0, %ymm0
8429 ; AVX2-SLOW-NEXT: vmovaps 192(%rax), %xmm1
8430 ; AVX2-SLOW-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8431 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
8432 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
8433 ; AVX2-SLOW-NEXT: vmovaps 192(%rdi), %xmm2
8434 ; AVX2-SLOW-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8435 ; AVX2-SLOW-NEXT: vmovaps 192(%rsi), %xmm1
8436 ; AVX2-SLOW-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8437 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,1,2,2]
8438 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3]
8439 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1]
8440 ; AVX2-SLOW-NEXT: vmovaps 192(%rcx), %xmm3
8441 ; AVX2-SLOW-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8442 ; AVX2-SLOW-NEXT: vmovaps 192(%rdx), %xmm2
8443 ; AVX2-SLOW-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8444 ; AVX2-SLOW-NEXT: vinsertps {{.*#+}} xmm2 = zero,xmm2[1],xmm3[1],zero
8445 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2],ymm1[3,4,5,6,7]
8446 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
8447 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8448 ; AVX2-SLOW-NEXT: vmovaps (%rdi), %ymm0
8449 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8450 ; AVX2-SLOW-NEXT: vmovaps (%rsi), %ymm1
8451 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8452 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
8453 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
8454 ; AVX2-SLOW-NEXT: vmovaps (%rdx), %ymm2
8455 ; AVX2-SLOW-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8456 ; AVX2-SLOW-NEXT: vmovaps (%rcx), %ymm1
8457 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8458 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm1 = ymm2[2],ymm1[2],ymm2[3],ymm1[3],ymm2[6],ymm1[6],ymm2[7],ymm1[7]
8459 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
8460 ; AVX2-SLOW-NEXT: vmovaps (%r8), %ymm2
8461 ; AVX2-SLOW-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8462 ; AVX2-SLOW-NEXT: vmovaps (%r9), %ymm1
8463 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8464 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm1 = ymm1[1,1,2,2,5,5,6,6]
8465 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4,5],ymm2[6],ymm1[7]
8466 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
8467 ; AVX2-SLOW-NEXT: vmovaps 16(%rax), %xmm2
8468 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7]
8469 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
8470 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8471 ; AVX2-SLOW-NEXT: vmovaps 32(%rdi), %ymm0
8472 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8473 ; AVX2-SLOW-NEXT: vmovaps 32(%rsi), %ymm1
8474 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8475 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
8476 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
8477 ; AVX2-SLOW-NEXT: vmovaps 32(%rdx), %ymm1
8478 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8479 ; AVX2-SLOW-NEXT: vmovaps 32(%rcx), %ymm2
8480 ; AVX2-SLOW-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8481 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm1 = ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[6],ymm2[6],ymm1[7],ymm2[7]
8482 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
8483 ; AVX2-SLOW-NEXT: vmovaps 32(%r8), %ymm2
8484 ; AVX2-SLOW-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8485 ; AVX2-SLOW-NEXT: vmovaps 32(%r9), %ymm1
8486 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8487 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm1 = ymm1[1,1,2,2,5,5,6,6]
8488 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4,5],ymm2[6],ymm1[7]
8489 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
8490 ; AVX2-SLOW-NEXT: vmovaps 48(%rax), %xmm2
8491 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7]
8492 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
8493 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8494 ; AVX2-SLOW-NEXT: vmovaps 64(%rdi), %ymm1
8495 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8496 ; AVX2-SLOW-NEXT: vmovaps 64(%rsi), %ymm0
8497 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8498 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm0 = ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[6],ymm0[6],ymm1[7],ymm0[7]
8499 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
8500 ; AVX2-SLOW-NEXT: vmovaps 64(%rdx), %ymm1
8501 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8502 ; AVX2-SLOW-NEXT: vmovaps 64(%rcx), %ymm2
8503 ; AVX2-SLOW-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8504 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm1 = ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[6],ymm2[6],ymm1[7],ymm2[7]
8505 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
8506 ; AVX2-SLOW-NEXT: vmovaps 64(%r8), %ymm2
8507 ; AVX2-SLOW-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8508 ; AVX2-SLOW-NEXT: vmovaps 64(%r9), %ymm1
8509 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8510 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm1 = ymm1[1,1,2,2,5,5,6,6]
8511 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4,5],ymm2[6],ymm1[7]
8512 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
8513 ; AVX2-SLOW-NEXT: vmovaps 80(%rax), %xmm2
8514 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7]
8515 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
8516 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8517 ; AVX2-SLOW-NEXT: vmovaps 96(%rdi), %ymm1
8518 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8519 ; AVX2-SLOW-NEXT: vmovaps 96(%rsi), %ymm0
8520 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8521 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm0 = ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[6],ymm0[6],ymm1[7],ymm0[7]
8522 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
8523 ; AVX2-SLOW-NEXT: vmovaps 96(%rdx), %ymm1
8524 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8525 ; AVX2-SLOW-NEXT: vmovaps 96(%rcx), %ymm2
8526 ; AVX2-SLOW-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8527 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm1 = ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[6],ymm2[6],ymm1[7],ymm2[7]
8528 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
8529 ; AVX2-SLOW-NEXT: vmovaps 96(%r8), %ymm2
8530 ; AVX2-SLOW-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8531 ; AVX2-SLOW-NEXT: vmovaps 96(%r9), %ymm1
8532 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8533 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm1 = ymm1[1,1,2,2,5,5,6,6]
8534 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4,5],ymm2[6],ymm1[7]
8535 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
8536 ; AVX2-SLOW-NEXT: vmovaps 112(%rax), %xmm2
8537 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7]
8538 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
8539 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8540 ; AVX2-SLOW-NEXT: vmovaps 128(%rdi), %ymm1
8541 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8542 ; AVX2-SLOW-NEXT: vmovaps 128(%rsi), %ymm0
8543 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8544 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm0 = ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[6],ymm0[6],ymm1[7],ymm0[7]
8545 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
8546 ; AVX2-SLOW-NEXT: vmovaps 128(%rdx), %ymm1
8547 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8548 ; AVX2-SLOW-NEXT: vmovaps 128(%rcx), %ymm2
8549 ; AVX2-SLOW-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8550 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm1 = ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[6],ymm2[6],ymm1[7],ymm2[7]
8551 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
8552 ; AVX2-SLOW-NEXT: vmovaps 128(%r8), %ymm2
8553 ; AVX2-SLOW-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8554 ; AVX2-SLOW-NEXT: vmovaps 128(%r9), %ymm1
8555 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8556 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm1 = ymm1[1,1,2,2,5,5,6,6]
8557 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4,5],ymm2[6],ymm1[7]
8558 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
8559 ; AVX2-SLOW-NEXT: vmovaps 144(%rax), %xmm2
8560 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7]
8561 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
8562 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8563 ; AVX2-SLOW-NEXT: vmovaps 160(%rdi), %ymm1
8564 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8565 ; AVX2-SLOW-NEXT: vmovaps 160(%rsi), %ymm0
8566 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8567 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm0 = ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[6],ymm0[6],ymm1[7],ymm0[7]
8568 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
8569 ; AVX2-SLOW-NEXT: vmovaps 160(%rdx), %ymm1
8570 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8571 ; AVX2-SLOW-NEXT: vmovaps 160(%rcx), %ymm2
8572 ; AVX2-SLOW-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8573 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm1 = ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[6],ymm2[6],ymm1[7],ymm2[7]
8574 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
8575 ; AVX2-SLOW-NEXT: vmovaps 160(%r8), %ymm2
8576 ; AVX2-SLOW-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8577 ; AVX2-SLOW-NEXT: vmovaps 160(%r9), %ymm1
8578 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8579 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm1 = ymm1[1,1,2,2,5,5,6,6]
8580 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4,5],ymm2[6],ymm1[7]
8581 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
8582 ; AVX2-SLOW-NEXT: vmovaps 176(%rax), %xmm2
8583 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7]
8584 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
8585 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8586 ; AVX2-SLOW-NEXT: vmovaps 192(%rdi), %ymm1
8587 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8588 ; AVX2-SLOW-NEXT: vmovaps 192(%rsi), %ymm0
8589 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8590 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm0 = ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[6],ymm0[6],ymm1[7],ymm0[7]
8591 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
8592 ; AVX2-SLOW-NEXT: vmovaps 192(%rdx), %ymm1
8593 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8594 ; AVX2-SLOW-NEXT: vmovaps 192(%rcx), %ymm2
8595 ; AVX2-SLOW-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8596 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm1 = ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[6],ymm2[6],ymm1[7],ymm2[7]
8597 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
8598 ; AVX2-SLOW-NEXT: vmovaps 192(%r8), %ymm2
8599 ; AVX2-SLOW-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8600 ; AVX2-SLOW-NEXT: vmovaps 192(%r9), %ymm1
8601 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8602 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm1 = ymm1[1,1,2,2,5,5,6,6]
8603 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4,5],ymm2[6],ymm1[7]
8604 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
8605 ; AVX2-SLOW-NEXT: vmovaps 208(%rax), %xmm2
8606 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7]
8607 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
8608 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8609 ; AVX2-SLOW-NEXT: vmovaps 224(%rcx), %xmm0
8610 ; AVX2-SLOW-NEXT: vbroadcastss %xmm0, %xmm2
8611 ; AVX2-SLOW-NEXT: vmovaps 224(%rdx), %xmm1
8612 ; AVX2-SLOW-NEXT: vbroadcastss %xmm1, %xmm3
8613 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
8614 ; AVX2-SLOW-NEXT: vmovaps 224(%rsi), %xmm4
8615 ; AVX2-SLOW-NEXT: vmovaps 224(%rdi), %xmm5
8616 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} xmm3 = xmm5[0],xmm4[0],xmm5[1],xmm4[1]
8617 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm3 = xmm3[0,1,2,2]
8618 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[0,1,2,1]
8619 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3],ymm3[4,5,6,7]
8620 ; AVX2-SLOW-NEXT: vbroadcastsd 224(%r8), %ymm3
8621 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5],ymm2[6,7]
8622 ; AVX2-SLOW-NEXT: vmovaps 224(%r9), %xmm3
8623 ; AVX2-SLOW-NEXT: vbroadcastss %xmm3, %ymm15
8624 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm15[5],ymm2[6,7]
8625 ; AVX2-SLOW-NEXT: vbroadcastss 224(%rax), %ymm15
8626 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5],ymm15[6],ymm2[7]
8627 ; AVX2-SLOW-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8628 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm2 = xmm4[1,1,2,2]
8629 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm2 = xmm2[0,1],xmm5[2],xmm2[3]
8630 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,1,2,1]
8631 ; AVX2-SLOW-NEXT: vinsertps {{.*#+}} xmm15 = zero,xmm1[1],xmm0[1],zero
8632 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0],ymm15[1,2],ymm2[3,4,5,6,7]
8633 ; AVX2-SLOW-NEXT: vbroadcastss 228(%r8), %ymm14
8634 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2],ymm14[3],ymm2[4,5,6,7]
8635 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm14 = xmm3[1,1,1,1]
8636 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm14, %ymm0, %ymm14
8637 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm14[4,5],ymm2[6,7]
8638 ; AVX2-SLOW-NEXT: vinsertf128 $1, 224(%rax), %ymm15, %ymm14
8639 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0],ymm14[1],ymm2[2,3,4],ymm14[5],ymm2[6,7]
8640 ; AVX2-SLOW-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8641 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm2 = xmm5[3,3],xmm4[3,3]
8642 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} xmm0 = xmm1[2],xmm0[2],xmm1[3],xmm0[3]
8643 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm1
8644 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,1,2,2]
8645 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,2,1]
8646 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6],ymm0[7]
8647 ; AVX2-SLOW-NEXT: vmovaps 224(%r8), %ymm5
8648 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm5[2,3],ymm0[4,5,6,7]
8649 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm3[2,2,2,2]
8650 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3],ymm0[4,5,6,7]
8651 ; AVX2-SLOW-NEXT: vbroadcastss 232(%rax), %ymm1
8652 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4],ymm0[5,6,7]
8653 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8654 ; AVX2-SLOW-NEXT: vmovaps 224(%rdi), %ymm11
8655 ; AVX2-SLOW-NEXT: vmovaps 224(%rsi), %ymm4
8656 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm0 = ymm4[1,1,1,1,5,5,5,5]
8657 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm11[1],ymm0[2,3,4],ymm11[5],ymm0[6,7]
8658 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm14 = ymm0[2,2,2,2]
8659 ; AVX2-SLOW-NEXT: vmovaps 224(%rdx), %ymm12
8660 ; AVX2-SLOW-NEXT: vmovaps 224(%rcx), %ymm2
8661 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm15 = ymm12[1,1],ymm2[1,1],ymm12[5,5],ymm2[5,5]
8662 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm14 = ymm14[0,1,2,3,4],ymm15[5,6],ymm14[7]
8663 ; AVX2-SLOW-NEXT: vbroadcastsd 240(%r8), %ymm15
8664 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm14 = ymm15[0],ymm14[1,2,3,4,5,6],ymm15[7]
8665 ; AVX2-SLOW-NEXT: vbroadcastss 240(%r9), %xmm15
8666 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm14 = ymm14[0],ymm15[1],ymm14[2,3,4,5,6,7]
8667 ; AVX2-SLOW-NEXT: vbroadcastss 240(%rax), %ymm15
8668 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm14[0,1],ymm15[2],ymm14[3,4,5,6,7]
8669 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8670 ; AVX2-SLOW-NEXT: vbroadcastss %xmm10, %xmm14
8671 ; AVX2-SLOW-NEXT: vbroadcastss %xmm9, %xmm15
8672 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} xmm14 = xmm15[0],xmm14[0],xmm15[1],xmm14[1]
8673 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} xmm15 = xmm7[0],xmm6[0],xmm7[1],xmm6[1]
8674 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm15 = xmm15[0,1,2,2]
8675 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm15 = ymm15[0,1,2,1]
8676 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm14 = ymm15[0,1],ymm14[2,3],ymm15[4,5,6,7]
8677 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
8678 ; AVX2-SLOW-NEXT: vmovaps %xmm13, %xmm1
8679 ; AVX2-SLOW-NEXT: vmovaps %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8680 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} xmm15 = xmm13[0],xmm0[0],xmm13[1],xmm0[1]
8681 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm15, %ymm0, %ymm15
8682 ; AVX2-SLOW-NEXT: vbroadcastsd {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 16-byte Folded Reload
8683 ; AVX2-SLOW-NEXT: vunpcklpd {{.*#+}} ymm13 = ymm15[0],ymm13[0],ymm15[2],ymm13[2]
8684 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm13 = ymm14[0,1,2,3],ymm13[4,5,6],ymm14[7]
8685 ; AVX2-SLOW-NEXT: vmovups %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8686 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm6 = xmm7[3,3],xmm6[3,3]
8687 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} xmm7 = xmm9[2],xmm10[2],xmm9[3],xmm10[3]
8688 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm6, %ymm0, %ymm6
8689 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm7 = xmm7[0,1,2,2]
8690 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm7 = ymm7[0,1,2,1]
8691 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm6 = ymm7[0,1,2,3,4],ymm6[5,6],ymm7[7]
8692 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm7 = xmm0[2,2,2,2]
8693 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm7 = xmm1[0,1,2],xmm7[3]
8694 ; AVX2-SLOW-NEXT: vbroadcastsd 8(%rax), %ymm9
8695 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm9[4,5,6,7]
8696 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm6[0,1],ymm7[2,3,4],ymm6[5,6,7]
8697 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8698 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
8699 ; AVX2-SLOW-NEXT: vbroadcastss %xmm1, %xmm6
8700 ; AVX2-SLOW-NEXT: vbroadcastss %xmm8, %xmm7
8701 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} xmm6 = xmm7[0],xmm6[0],xmm7[1],xmm6[1]
8702 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
8703 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
8704 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} xmm7 = xmm14[0],xmm15[0],xmm14[1],xmm15[1]
8705 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm7 = xmm7[0,1,2,2]
8706 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm7 = ymm7[0,1,2,1]
8707 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm6 = ymm7[0,1],ymm6[2,3],ymm7[4,5,6,7]
8708 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
8709 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
8710 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} xmm7 = xmm10[0],xmm13[0],xmm10[1],xmm13[1]
8711 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm7, %ymm0, %ymm7
8712 ; AVX2-SLOW-NEXT: vbroadcastsd {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 16-byte Folded Reload
8713 ; AVX2-SLOW-NEXT: vunpcklpd {{.*#+}} ymm7 = ymm7[0],ymm9[0],ymm7[2],ymm9[2]
8714 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm6[0,1,2,3],ymm7[4,5,6],ymm6[7]
8715 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8716 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm6 = xmm14[3,3],xmm15[3,3]
8717 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} xmm7 = xmm8[2],xmm1[2],xmm8[3],xmm1[3]
8718 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm6, %ymm0, %ymm6
8719 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm7 = xmm7[0,1,2,2]
8720 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm7 = ymm7[0,1,2,1]
8721 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm6 = ymm7[0,1,2,3,4],ymm6[5,6],ymm7[7]
8722 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm7 = xmm13[2,2,2,2]
8723 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm7 = xmm10[0,1,2],xmm7[3]
8724 ; AVX2-SLOW-NEXT: vbroadcastsd 40(%rax), %ymm8
8725 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm8[4,5,6,7]
8726 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm6[0,1],ymm7[2,3,4],ymm6[5,6,7]
8727 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8728 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
8729 ; AVX2-SLOW-NEXT: vbroadcastss %xmm15, %xmm6
8730 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
8731 ; AVX2-SLOW-NEXT: vbroadcastss %xmm10, %xmm7
8732 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} xmm6 = xmm7[0],xmm6[0],xmm7[1],xmm6[1]
8733 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
8734 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
8735 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} xmm7 = xmm9[0],xmm1[0],xmm9[1],xmm1[1]
8736 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm7 = xmm7[0,1,2,2]
8737 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm7 = ymm7[0,1,2,1]
8738 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm6 = ymm7[0,1],ymm6[2,3],ymm7[4,5,6,7]
8739 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
8740 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
8741 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} xmm7 = xmm14[0],xmm13[0],xmm14[1],xmm13[1]
8742 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm7, %ymm0, %ymm7
8743 ; AVX2-SLOW-NEXT: vbroadcastsd {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 16-byte Folded Reload
8744 ; AVX2-SLOW-NEXT: vunpcklpd {{.*#+}} ymm7 = ymm7[0],ymm8[0],ymm7[2],ymm8[2]
8745 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm6[0,1,2,3],ymm7[4,5,6],ymm6[7]
8746 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8747 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm6 = xmm9[3,3],xmm1[3,3]
8748 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} xmm7 = xmm10[2],xmm15[2],xmm10[3],xmm15[3]
8749 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm6, %ymm0, %ymm6
8750 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm7 = xmm7[0,1,2,2]
8751 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm7 = ymm7[0,1,2,1]
8752 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm6 = ymm7[0,1,2,3,4],ymm6[5,6],ymm7[7]
8753 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm7 = xmm13[2,2,2,2]
8754 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm7 = xmm14[0,1,2],xmm7[3]
8755 ; AVX2-SLOW-NEXT: vbroadcastsd 72(%rax), %ymm8
8756 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm8[4,5,6,7]
8757 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm6[0,1],ymm7[2,3,4],ymm6[5,6,7]
8758 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8759 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
8760 ; AVX2-SLOW-NEXT: vbroadcastss %xmm15, %xmm6
8761 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
8762 ; AVX2-SLOW-NEXT: vbroadcastss %xmm14, %xmm7
8763 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} xmm6 = xmm7[0],xmm6[0],xmm7[1],xmm6[1]
8764 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
8765 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
8766 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} xmm7 = xmm10[0],xmm9[0],xmm10[1],xmm9[1]
8767 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm7 = xmm7[0,1,2,2]
8768 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm7 = ymm7[0,1,2,1]
8769 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm6 = ymm7[0,1],ymm6[2,3],ymm7[4,5,6,7]
8770 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
8771 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
8772 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} xmm7 = xmm0[0],xmm13[0],xmm0[1],xmm13[1]
8773 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm7, %ymm0, %ymm7
8774 ; AVX2-SLOW-NEXT: vbroadcastsd {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 16-byte Folded Reload
8775 ; AVX2-SLOW-NEXT: vunpcklpd {{.*#+}} ymm7 = ymm7[0],ymm8[0],ymm7[2],ymm8[2]
8776 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm6[0,1,2,3],ymm7[4,5,6],ymm6[7]
8777 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8778 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm6 = xmm10[3,3],xmm9[3,3]
8779 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} xmm7 = xmm14[2],xmm15[2],xmm14[3],xmm15[3]
8780 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm6, %ymm0, %ymm6
8781 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm7 = xmm7[0,1,2,2]
8782 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm7 = ymm7[0,1,2,1]
8783 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm6 = ymm7[0,1,2,3,4],ymm6[5,6],ymm7[7]
8784 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm7 = xmm13[2,2,2,2]
8785 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm7 = xmm0[0,1,2],xmm7[3]
8786 ; AVX2-SLOW-NEXT: vbroadcastsd 104(%rax), %ymm8
8787 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm8[4,5,6,7]
8788 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm6[0,1],ymm7[2,3,4],ymm6[5,6,7]
8789 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8790 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
8791 ; AVX2-SLOW-NEXT: vbroadcastss %xmm15, %xmm6
8792 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
8793 ; AVX2-SLOW-NEXT: vbroadcastss %xmm14, %xmm7
8794 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} xmm6 = xmm7[0],xmm6[0],xmm7[1],xmm6[1]
8795 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
8796 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
8797 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} xmm7 = xmm10[0],xmm9[0],xmm10[1],xmm9[1]
8798 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm7 = xmm7[0,1,2,2]
8799 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm7 = ymm7[0,1,2,1]
8800 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm6 = ymm7[0,1],ymm6[2,3],ymm7[4,5,6,7]
8801 ; AVX2-SLOW-NEXT: vmovaps (%rsp), %xmm13 # 16-byte Reload
8802 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
8803 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} xmm7 = xmm0[0],xmm13[0],xmm0[1],xmm13[1]
8804 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm7, %ymm0, %ymm7
8805 ; AVX2-SLOW-NEXT: vbroadcastsd {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 16-byte Folded Reload
8806 ; AVX2-SLOW-NEXT: vunpcklpd {{.*#+}} ymm7 = ymm7[0],ymm8[0],ymm7[2],ymm8[2]
8807 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm6[0,1,2,3],ymm7[4,5,6],ymm6[7]
8808 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8809 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm6 = xmm10[3,3],xmm9[3,3]
8810 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} xmm7 = xmm14[2],xmm15[2],xmm14[3],xmm15[3]
8811 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm6, %ymm0, %ymm6
8812 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm7 = xmm7[0,1,2,2]
8813 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm7 = ymm7[0,1,2,1]
8814 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm6 = ymm7[0,1,2,3,4],ymm6[5,6],ymm7[7]
8815 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm7 = xmm13[2,2,2,2]
8816 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm7 = xmm0[0,1,2],xmm7[3]
8817 ; AVX2-SLOW-NEXT: vbroadcastsd 136(%rax), %ymm8
8818 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm8[4,5,6,7]
8819 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm6[0,1],ymm7[2,3,4],ymm6[5,6,7]
8820 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8821 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
8822 ; AVX2-SLOW-NEXT: vbroadcastss %xmm14, %xmm6
8823 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
8824 ; AVX2-SLOW-NEXT: vbroadcastss %xmm10, %xmm7
8825 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} xmm6 = xmm7[0],xmm6[0],xmm7[1],xmm6[1]
8826 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
8827 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
8828 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} xmm7 = xmm9[0],xmm1[0],xmm9[1],xmm1[1]
8829 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm7 = xmm7[0,1,2,2]
8830 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm7 = ymm7[0,1,2,1]
8831 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm6 = ymm7[0,1],ymm6[2,3],ymm7[4,5,6,7]
8832 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
8833 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
8834 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} xmm7 = xmm0[0],xmm13[0],xmm0[1],xmm13[1]
8835 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm7, %ymm0, %ymm7
8836 ; AVX2-SLOW-NEXT: vbroadcastsd {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 16-byte Folded Reload
8837 ; AVX2-SLOW-NEXT: vunpcklpd {{.*#+}} ymm7 = ymm7[0],ymm8[0],ymm7[2],ymm8[2]
8838 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm7[4,5,6],ymm6[7]
8839 ; AVX2-SLOW-NEXT: vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8840 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm6 = xmm9[3,3],xmm1[3,3]
8841 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} xmm7 = xmm10[2],xmm14[2],xmm10[3],xmm14[3]
8842 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm6, %ymm0, %ymm6
8843 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm7 = xmm7[0,1,2,2]
8844 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm7 = ymm7[0,1,2,1]
8845 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm6 = ymm7[0,1,2,3,4],ymm6[5,6],ymm7[7]
8846 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm7 = xmm13[2,2,2,2]
8847 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm7 = xmm0[0,1,2],xmm7[3]
8848 ; AVX2-SLOW-NEXT: vbroadcastsd 168(%rax), %ymm8
8849 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm8[4,5,6,7]
8850 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm6[0,1],ymm7[2,3,4],ymm6[5,6,7]
8851 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8852 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
8853 ; AVX2-SLOW-NEXT: vbroadcastss %xmm14, %xmm6
8854 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
8855 ; AVX2-SLOW-NEXT: vbroadcastss %xmm13, %xmm7
8856 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} xmm6 = xmm7[0],xmm6[0],xmm7[1],xmm6[1]
8857 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
8858 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
8859 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} xmm7 = xmm10[0],xmm9[0],xmm10[1],xmm9[1]
8860 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm7 = xmm7[0,1,2,2]
8861 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm7 = ymm7[0,1,2,1]
8862 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm6 = ymm7[0,1],ymm6[2,3],ymm7[4,5,6,7]
8863 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
8864 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
8865 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} xmm7 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
8866 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm7, %ymm0, %ymm7
8867 ; AVX2-SLOW-NEXT: vbroadcastsd {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 16-byte Folded Reload
8868 ; AVX2-SLOW-NEXT: vunpcklpd {{.*#+}} ymm7 = ymm7[0],ymm8[0],ymm7[2],ymm8[2]
8869 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm7[4,5,6],ymm6[7]
8870 ; AVX2-SLOW-NEXT: vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8871 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm6 = xmm10[3,3],xmm9[3,3]
8872 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} xmm7 = xmm13[2],xmm14[2],xmm13[3],xmm14[3]
8873 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm6, %ymm0, %ymm6
8874 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm7 = xmm7[0,1,2,2]
8875 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm7 = ymm7[0,1,2,1]
8876 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm6 = ymm7[0,1,2,3,4],ymm6[5,6],ymm7[7]
8877 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm7 = xmm0[2,2,2,2]
8878 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm7 = xmm1[0,1,2],xmm7[3]
8879 ; AVX2-SLOW-NEXT: vbroadcastsd 200(%rax), %ymm8
8880 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm8[4,5,6,7]
8881 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm6[0,1],ymm7[2,3,4],ymm6[5,6,7]
8882 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8883 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
8884 ; AVX2-SLOW-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm6 # 32-byte Folded Reload
8885 ; AVX2-SLOW-NEXT: # ymm6 = ymm10[2],mem[2],ymm10[3],mem[3],ymm10[6],mem[6],ymm10[7],mem[7]
8886 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[3,3,3,3]
8887 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
8888 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
8889 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm7 = ymm8[2],ymm9[2],ymm8[3],ymm9[3],ymm8[6],ymm9[6],ymm8[7],ymm9[7]
8890 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm7 = ymm7[3,3,3,3]
8891 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm6 = ymm7[0,1,2],ymm6[3,4,5,6,7]
8892 ; AVX2-SLOW-NEXT: vbroadcastss 220(%r8), %ymm7
8893 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3,4],ymm7[5],ymm6[6,7]
8894 ; AVX2-SLOW-NEXT: vbroadcastss 220(%r9), %ymm7
8895 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3,4,5],ymm7[6,7]
8896 ; AVX2-SLOW-NEXT: vbroadcastsd 216(%rax), %ymm7
8897 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm7[0],ymm6[1,2,3,4,5,6],ymm7[7]
8898 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8899 ; AVX2-SLOW-NEXT: vbroadcastss 240(%rdx), %ymm6
8900 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm7 = ymm2[3,1,2,0,7,5,6,4]
8901 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm6 = ymm7[0,1,2,3,4,5],ymm6[6],ymm7[7]
8902 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} ymm7 = ymm11[0],ymm4[0],ymm11[1],ymm4[1],ymm11[4],ymm4[4],ymm11[5],ymm4[5]
8903 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm7[4,5],ymm6[6,7]
8904 ; AVX2-SLOW-NEXT: vbroadcastss 236(%r8), %ymm7
8905 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0],ymm7[1],ymm6[2,3,4,5,6,7]
8906 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm3 = xmm3[2,2,3,3]
8907 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm3 = ymm6[0,1],ymm3[2,3],ymm6[4,5,6,7]
8908 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm6 = ymm11[2],ymm4[2],ymm11[3],ymm4[3],ymm11[6],ymm4[6],ymm11[7],ymm4[7]
8909 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[2,2,2,2]
8910 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm7 = ymm12[2],ymm2[2],ymm12[3],ymm2[3],ymm12[6],ymm2[6],ymm12[7],ymm2[7]
8911 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm7[4,5,6,7]
8912 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm5 = ymm6[0,1,2,3,4,5],ymm5[6,7]
8913 ; AVX2-SLOW-NEXT: vpermilps {{.*#+}} ymm6 = mem[1,2,2,3,5,6,6,7]
8914 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[2,2,2,2]
8915 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm5 = ymm6[0],ymm5[1,2,3,4,5,6],ymm6[7]
8916 ; AVX2-SLOW-NEXT: vmovaps 224(%rax), %ymm6
8917 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm3[0,1,2],ymm6[3],ymm3[4,5,6,7]
8918 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8919 ; AVX2-SLOW-NEXT: vperm2f128 {{.*#+}} ymm3 = ymm6[2,3],ymm7[2,3]
8920 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm5[0],ymm3[1],ymm5[2,3,4],ymm3[5],ymm5[6,7]
8921 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8922 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm0 = ymm2[2],ymm12[2],ymm2[3],ymm12[3],ymm2[6],ymm12[6],ymm2[7],ymm12[7]
8923 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm1 = ymm4[2],ymm11[2],ymm4[3],ymm11[3],ymm4[6],ymm11[6],ymm4[7],ymm11[7]
8924 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,3,3,3]
8925 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[3,3,3,3]
8926 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
8927 ; AVX2-SLOW-NEXT: vbroadcastss 252(%r8), %ymm1
8928 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
8929 ; AVX2-SLOW-NEXT: vbroadcastss 252(%r9), %ymm1
8930 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
8931 ; AVX2-SLOW-NEXT: vbroadcastsd 248(%rax), %ymm1
8932 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5,6],ymm1[7]
8933 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8934 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
8935 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm0 = ymm7[1,1,1,1,5,5,5,5]
8936 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
8937 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm5[1],ymm0[2,3,4],ymm5[5],ymm0[6,7]
8938 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
8939 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
8940 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
8941 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm1 = ymm12[1,1],ymm6[1,1],ymm12[5,5],ymm6[5,5]
8942 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6],ymm0[7]
8943 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
8944 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm1 = ymm14[0,0,0,0,4,4,4,4]
8945 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
8946 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm2 = ymm13[0,1,0,1,4,5,4,5]
8947 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2,3,4],ymm1[5],ymm2[6,7]
8948 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
8949 ; AVX2-SLOW-NEXT: vbroadcastsd 16(%rax), %ymm2
8950 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3],ymm1[4,5,6,7]
8951 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6],ymm1[7]
8952 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8953 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
8954 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm0 = ymm15[1,1,1,1,5,5,5,5]
8955 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
8956 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm4[1],ymm0[2,3,4],ymm4[5],ymm0[6,7]
8957 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
8958 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
8959 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8960 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm1 = ymm1[1,1],ymm11[1,1],ymm1[5,5],ymm11[5,5]
8961 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6],ymm0[7]
8962 ; AVX2-SLOW-NEXT: vpermilps $0, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
8963 ; AVX2-SLOW-NEXT: # ymm1 = mem[0,0,0,0,4,4,4,4]
8964 ; AVX2-SLOW-NEXT: vpermilps $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
8965 ; AVX2-SLOW-NEXT: # ymm2 = mem[0,1,0,1,4,5,4,5]
8966 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2,3,4],ymm1[5],ymm2[6,7]
8967 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
8968 ; AVX2-SLOW-NEXT: vbroadcastsd 48(%rax), %ymm2
8969 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3],ymm1[4,5,6,7]
8970 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6],ymm1[7]
8971 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8972 ; AVX2-SLOW-NEXT: vpermilps $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
8973 ; AVX2-SLOW-NEXT: # ymm0 = mem[1,1,1,1,5,5,5,5]
8974 ; AVX2-SLOW-NEXT: vblendps $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
8975 ; AVX2-SLOW-NEXT: # ymm0 = ymm0[0],mem[1],ymm0[2,3,4],mem[5],ymm0[6,7]
8976 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
8977 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8978 ; AVX2-SLOW-NEXT: vshufps $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
8979 ; AVX2-SLOW-NEXT: # ymm1 = ymm1[1,1],mem[1,1],ymm1[5,5],mem[5,5]
8980 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6],ymm0[7]
8981 ; AVX2-SLOW-NEXT: vpermilps $0, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
8982 ; AVX2-SLOW-NEXT: # ymm1 = mem[0,0,0,0,4,4,4,4]
8983 ; AVX2-SLOW-NEXT: vpermilps $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
8984 ; AVX2-SLOW-NEXT: # ymm2 = mem[0,1,0,1,4,5,4,5]
8985 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2,3,4],ymm1[5],ymm2[6,7]
8986 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
8987 ; AVX2-SLOW-NEXT: vbroadcastsd 80(%rax), %ymm2
8988 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3],ymm1[4,5,6,7]
8989 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6],ymm1[7]
8990 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8991 ; AVX2-SLOW-NEXT: vpermilps $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
8992 ; AVX2-SLOW-NEXT: # ymm0 = mem[1,1,1,1,5,5,5,5]
8993 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
8994 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm3[1],ymm0[2,3,4],ymm3[5],ymm0[6,7]
8995 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
8996 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8997 ; AVX2-SLOW-NEXT: vshufps $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
8998 ; AVX2-SLOW-NEXT: # ymm1 = ymm1[1,1],mem[1,1],ymm1[5,5],mem[5,5]
8999 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6],ymm0[7]
9000 ; AVX2-SLOW-NEXT: vpermilps $0, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
9001 ; AVX2-SLOW-NEXT: # ymm1 = mem[0,0,0,0,4,4,4,4]
9002 ; AVX2-SLOW-NEXT: vpermilps $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
9003 ; AVX2-SLOW-NEXT: # ymm2 = mem[0,1,0,1,4,5,4,5]
9004 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2,3,4],ymm1[5],ymm2[6,7]
9005 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
9006 ; AVX2-SLOW-NEXT: vbroadcastsd 112(%rax), %ymm2
9007 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3],ymm1[4,5,6,7]
9008 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6],ymm1[7]
9009 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9010 ; AVX2-SLOW-NEXT: vpermilps $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
9011 ; AVX2-SLOW-NEXT: # ymm0 = mem[1,1,1,1,5,5,5,5]
9012 ; AVX2-SLOW-NEXT: vblendps $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
9013 ; AVX2-SLOW-NEXT: # ymm0 = ymm0[0],mem[1],ymm0[2,3,4],mem[5],ymm0[6,7]
9014 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
9015 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
9016 ; AVX2-SLOW-NEXT: vshufps $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
9017 ; AVX2-SLOW-NEXT: # ymm1 = ymm1[1,1],mem[1,1],ymm1[5,5],mem[5,5]
9018 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6],ymm0[7]
9019 ; AVX2-SLOW-NEXT: vpermilps $0, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
9020 ; AVX2-SLOW-NEXT: # ymm1 = mem[0,0,0,0,4,4,4,4]
9021 ; AVX2-SLOW-NEXT: vpermilps $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
9022 ; AVX2-SLOW-NEXT: # ymm2 = mem[0,1,0,1,4,5,4,5]
9023 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2,3,4],ymm1[5],ymm2[6,7]
9024 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
9025 ; AVX2-SLOW-NEXT: vbroadcastsd 144(%rax), %ymm2
9026 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3],ymm1[4,5,6,7]
9027 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6],ymm1[7]
9028 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9029 ; AVX2-SLOW-NEXT: vpermilps $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
9030 ; AVX2-SLOW-NEXT: # ymm0 = mem[1,1,1,1,5,5,5,5]
9031 ; AVX2-SLOW-NEXT: vblendps $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
9032 ; AVX2-SLOW-NEXT: # ymm0 = ymm0[0],mem[1],ymm0[2,3,4],mem[5],ymm0[6,7]
9033 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
9034 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
9035 ; AVX2-SLOW-NEXT: vshufps $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
9036 ; AVX2-SLOW-NEXT: # ymm1 = ymm1[1,1],mem[1,1],ymm1[5,5],mem[5,5]
9037 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6],ymm0[7]
9038 ; AVX2-SLOW-NEXT: vpermilps $0, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
9039 ; AVX2-SLOW-NEXT: # ymm1 = mem[0,0,0,0,4,4,4,4]
9040 ; AVX2-SLOW-NEXT: vpermilps $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
9041 ; AVX2-SLOW-NEXT: # ymm2 = mem[0,1,0,1,4,5,4,5]
9042 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2,3,4],ymm1[5],ymm2[6,7]
9043 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
9044 ; AVX2-SLOW-NEXT: vbroadcastsd 176(%rax), %ymm2
9045 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3],ymm1[4,5,6,7]
9046 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6],ymm1[7]
9047 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9048 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9049 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm0 = ymm0[1,1],ymm10[1,1],ymm0[5,5],ymm10[5,5]
9050 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm1 = ymm8[1,1,1,1,5,5,5,5]
9051 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm9[1],ymm1[2,3,4],ymm9[5],ymm1[6,7]
9052 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,2,2,2]
9053 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5,6],ymm1[7]
9054 ; AVX2-SLOW-NEXT: vpermilps $0, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
9055 ; AVX2-SLOW-NEXT: # ymm1 = mem[0,0,0,0,4,4,4,4]
9056 ; AVX2-SLOW-NEXT: vpermilps $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
9057 ; AVX2-SLOW-NEXT: # ymm2 = mem[0,1,0,1,4,5,4,5]
9058 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2,3,4],ymm1[5],ymm2[6,7]
9059 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
9060 ; AVX2-SLOW-NEXT: vbroadcastsd 208(%rax), %ymm2
9061 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3],ymm1[4,5,6,7]
9062 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm9 = ymm1[0,1,2],ymm0[3,4,5,6],ymm1[7]
9063 ; AVX2-SLOW-NEXT: vbroadcastss 16(%rdx), %ymm0
9064 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm1 = ymm6[3,1,2,0,7,5,6,4]
9065 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6],ymm1[7]
9066 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} ymm1 = ymm5[0],ymm7[0],ymm5[1],ymm7[1],ymm5[4],ymm7[4],ymm5[5],ymm7[5]
9067 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
9068 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
9069 ; AVX2-SLOW-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
9070 ; AVX2-SLOW-NEXT: # xmm1 = xmm1[3,3],mem[3,3]
9071 ; AVX2-SLOW-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
9072 ; AVX2-SLOW-NEXT: # xmm1 = xmm1[0,1,2],mem[3]
9073 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm10 = ymm0[0],ymm1[1,2,3],ymm0[4,5,6,7]
9074 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm0 = ymm6[2],ymm12[2],ymm6[3],ymm12[3],ymm6[6],ymm12[6],ymm6[7],ymm12[7]
9075 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm1 = ymm7[2],ymm5[2],ymm7[3],ymm5[3],ymm7[6],ymm5[6],ymm7[7],ymm5[7]
9076 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,3,3,3]
9077 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[3,3,3,3]
9078 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
9079 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm1 = ymm13[3,3],ymm14[3,3],ymm13[7,7],ymm14[7,7]
9080 ; AVX2-SLOW-NEXT: vpermilps {{.*#+}} ymm2 = mem[2,3,2,3,6,7,6,7]
9081 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0],ymm1[1,2],ymm2[3,4],ymm1[5,6],ymm2[7]
9082 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
9083 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm12 = ymm1[0],ymm0[1,2,3,4],ymm1[5,6,7]
9084 ; AVX2-SLOW-NEXT: vbroadcastss 48(%rdx), %ymm0
9085 ; AVX2-SLOW-NEXT: vmovaps %ymm11, %ymm6
9086 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm1 = ymm11[3,1,2,0,7,5,6,4]
9087 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6],ymm1[7]
9088 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} ymm1 = ymm4[0],ymm15[0],ymm4[1],ymm15[1],ymm4[4],ymm15[4],ymm4[5],ymm15[5]
9089 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
9090 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
9091 ; AVX2-SLOW-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
9092 ; AVX2-SLOW-NEXT: # xmm1 = xmm1[3,3],mem[3,3]
9093 ; AVX2-SLOW-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
9094 ; AVX2-SLOW-NEXT: # xmm1 = xmm1[0,1,2],mem[3]
9095 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm11 = ymm0[0],ymm1[1,2,3],ymm0[4,5,6,7]
9096 ; AVX2-SLOW-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm0 # 32-byte Folded Reload
9097 ; AVX2-SLOW-NEXT: # ymm0 = ymm6[2],mem[2],ymm6[3],mem[3],ymm6[6],mem[6],ymm6[7],mem[7]
9098 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm1 = ymm15[2],ymm4[2],ymm15[3],ymm4[3],ymm15[6],ymm4[6],ymm15[7],ymm4[7]
9099 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,3,3,3]
9100 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[3,3,3,3]
9101 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
9102 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
9103 ; AVX2-SLOW-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
9104 ; AVX2-SLOW-NEXT: # ymm1 = ymm1[3,3],mem[3,3],ymm1[7,7],mem[7,7]
9105 ; AVX2-SLOW-NEXT: vpermilps {{.*#+}} ymm2 = mem[2,3,2,3,6,7,6,7]
9106 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0],ymm1[1,2],ymm2[3,4],ymm1[5,6],ymm2[7]
9107 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
9108 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4],ymm1[5,6,7]
9109 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9110 ; AVX2-SLOW-NEXT: vbroadcastss 80(%rdx), %ymm0
9111 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
9112 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm1 = ymm6[3,1,2,0,7,5,6,4]
9113 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6],ymm1[7]
9114 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
9115 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
9116 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} ymm1 = ymm4[0],ymm2[0],ymm4[1],ymm2[1],ymm4[4],ymm2[4],ymm4[5],ymm2[5]
9117 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
9118 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
9119 ; AVX2-SLOW-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
9120 ; AVX2-SLOW-NEXT: # xmm1 = xmm1[3,3],mem[3,3]
9121 ; AVX2-SLOW-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
9122 ; AVX2-SLOW-NEXT: # xmm1 = xmm1[0,1,2],mem[3]
9123 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4,5,6,7]
9124 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9125 ; AVX2-SLOW-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm0 # 32-byte Folded Reload
9126 ; AVX2-SLOW-NEXT: # ymm0 = ymm6[2],mem[2],ymm6[3],mem[3],ymm6[6],mem[6],ymm6[7],mem[7]
9127 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm1 = ymm2[2],ymm4[2],ymm2[3],ymm4[3],ymm2[6],ymm4[6],ymm2[7],ymm4[7]
9128 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,3,3,3]
9129 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[3,3,3,3]
9130 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
9131 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
9132 ; AVX2-SLOW-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
9133 ; AVX2-SLOW-NEXT: # ymm1 = ymm1[3,3],mem[3,3],ymm1[7,7],mem[7,7]
9134 ; AVX2-SLOW-NEXT: vpermilps {{.*#+}} ymm2 = mem[2,3,2,3,6,7,6,7]
9135 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0],ymm1[1,2],ymm2[3,4],ymm1[5,6],ymm2[7]
9136 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
9137 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm8 = ymm1[0],ymm0[1,2,3,4],ymm1[5,6,7]
9138 ; AVX2-SLOW-NEXT: vbroadcastss 112(%rdx), %ymm0
9139 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
9140 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm1 = ymm4[3,1,2,0,7,5,6,4]
9141 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6],ymm1[7]
9142 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
9143 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} ymm1 = ymm3[0],ymm2[0],ymm3[1],ymm2[1],ymm3[4],ymm2[4],ymm3[5],ymm2[5]
9144 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
9145 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
9146 ; AVX2-SLOW-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
9147 ; AVX2-SLOW-NEXT: # xmm1 = xmm1[3,3],mem[3,3]
9148 ; AVX2-SLOW-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
9149 ; AVX2-SLOW-NEXT: # xmm1 = xmm1[0,1,2],mem[3]
9150 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm6 = ymm0[0],ymm1[1,2,3],ymm0[4,5,6,7]
9151 ; AVX2-SLOW-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm0 # 32-byte Folded Reload
9152 ; AVX2-SLOW-NEXT: # ymm0 = ymm4[2],mem[2],ymm4[3],mem[3],ymm4[6],mem[6],ymm4[7],mem[7]
9153 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm1 = ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[6],ymm3[6],ymm2[7],ymm3[7]
9154 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,3,3,3]
9155 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[3,3,3,3]
9156 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
9157 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
9158 ; AVX2-SLOW-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
9159 ; AVX2-SLOW-NEXT: # ymm1 = ymm1[3,3],mem[3,3],ymm1[7,7],mem[7,7]
9160 ; AVX2-SLOW-NEXT: vpermilps {{.*#+}} ymm2 = mem[2,3,2,3,6,7,6,7]
9161 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0],ymm1[1,2],ymm2[3,4],ymm1[5,6],ymm2[7]
9162 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
9163 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm5 = ymm1[0],ymm0[1,2,3,4],ymm1[5,6,7]
9164 ; AVX2-SLOW-NEXT: vbroadcastss 144(%rdx), %ymm0
9165 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
9166 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm1 = ymm3[3,1,2,0,7,5,6,4]
9167 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6],ymm1[7]
9168 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
9169 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
9170 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} ymm1 = ymm15[0],ymm2[0],ymm15[1],ymm2[1],ymm15[4],ymm2[4],ymm15[5],ymm2[5]
9171 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
9172 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
9173 ; AVX2-SLOW-NEXT: vshufps $255, (%rsp), %xmm1, %xmm1 # 16-byte Folded Reload
9174 ; AVX2-SLOW-NEXT: # xmm1 = xmm1[3,3],mem[3,3]
9175 ; AVX2-SLOW-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
9176 ; AVX2-SLOW-NEXT: # xmm1 = xmm1[0,1,2],mem[3]
9177 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm4 = ymm0[0],ymm1[1,2,3],ymm0[4,5,6,7]
9178 ; AVX2-SLOW-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm1 # 32-byte Folded Reload
9179 ; AVX2-SLOW-NEXT: # ymm1 = ymm3[2],mem[2],ymm3[3],mem[3],ymm3[6],mem[6],ymm3[7],mem[7]
9180 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm0 = ymm2[2],ymm15[2],ymm2[3],ymm15[3],ymm2[6],ymm15[6],ymm2[7],ymm15[7]
9181 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[3,3,3,3]
9182 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,3,3,3]
9183 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
9184 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
9185 ; AVX2-SLOW-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
9186 ; AVX2-SLOW-NEXT: # ymm1 = ymm1[3,3],mem[3,3],ymm1[7,7],mem[7,7]
9187 ; AVX2-SLOW-NEXT: vpermilps {{.*#+}} ymm15 = mem[2,3,2,3,6,7,6,7]
9188 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm15[0],ymm1[1,2],ymm15[3,4],ymm1[5,6],ymm15[7]
9189 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
9190 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm3 = ymm1[0],ymm0[1,2,3,4],ymm1[5,6,7]
9191 ; AVX2-SLOW-NEXT: vbroadcastss 176(%rdx), %ymm0
9192 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
9193 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm15 = ymm1[3,1,2,0,7,5,6,4]
9194 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm15[0,1,2,3,4,5],ymm0[6],ymm15[7]
9195 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9196 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
9197 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} ymm15 = ymm14[0],ymm0[0],ymm14[1],ymm0[1],ymm14[4],ymm0[4],ymm14[5],ymm0[5]
9198 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm15[4,5],ymm2[6,7]
9199 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
9200 ; AVX2-SLOW-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7, %xmm15 # 16-byte Folded Reload
9201 ; AVX2-SLOW-NEXT: # xmm15 = xmm7[3,3],mem[3,3]
9202 ; AVX2-SLOW-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm15, %xmm15 # 16-byte Folded Reload
9203 ; AVX2-SLOW-NEXT: # xmm15 = xmm15[0,1,2],mem[3]
9204 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0],ymm15[1,2,3],ymm2[4,5,6,7]
9205 ; AVX2-SLOW-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm15 # 32-byte Folded Reload
9206 ; AVX2-SLOW-NEXT: # ymm15 = ymm1[2],mem[2],ymm1[3],mem[3],ymm1[6],mem[6],ymm1[7],mem[7]
9207 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm14[2],ymm0[3],ymm14[3],ymm0[6],ymm14[6],ymm0[7],ymm14[7]
9208 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm15 = ymm15[3,3,3,3]
9209 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,3,3,3]
9210 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm15[3,4,5,6,7]
9211 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
9212 ; AVX2-SLOW-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm15 # 32-byte Folded Reload
9213 ; AVX2-SLOW-NEXT: # ymm15 = ymm1[3,3],mem[3,3],ymm1[7,7],mem[7,7]
9214 ; AVX2-SLOW-NEXT: vpermilps {{.*#+}} ymm14 = mem[2,3,2,3,6,7,6,7]
9215 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm14 = ymm14[0],ymm15[1,2],ymm14[3,4],ymm15[5,6],ymm14[7]
9216 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm14 = ymm14[2,1,2,3]
9217 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm14[0],ymm0[1,2,3,4],ymm14[5,6,7]
9218 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
9219 ; AVX2-SLOW-NEXT: vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm14 # 32-byte Folded Reload
9220 ; AVX2-SLOW-NEXT: # ymm14 = ymm1[0],mem[0],ymm1[1],mem[1],ymm1[4],mem[4],ymm1[5],mem[5]
9221 ; AVX2-SLOW-NEXT: vpermilps $39, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Folded Reload
9222 ; AVX2-SLOW-NEXT: # ymm15 = mem[3,1,2,0,7,5,6,4]
9223 ; AVX2-SLOW-NEXT: vbroadcastss 208(%rdx), %ymm13
9224 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm13 = ymm15[0,1,2,3,4,5],ymm13[6],ymm15[7]
9225 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm13 = ymm13[0,1,2,3],ymm14[4,5],ymm13[6,7]
9226 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
9227 ; AVX2-SLOW-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm14 # 16-byte Folded Reload
9228 ; AVX2-SLOW-NEXT: # xmm14 = xmm1[3,3],mem[3,3]
9229 ; AVX2-SLOW-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm14, %xmm14 # 16-byte Folded Reload
9230 ; AVX2-SLOW-NEXT: # xmm14 = xmm14[0,1,2],mem[3]
9231 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm13 = ymm13[0],ymm14[1,2,3],ymm13[4,5,6,7]
9232 ; AVX2-SLOW-NEXT: movq {{[0-9]+}}(%rsp), %rax
9233 ; AVX2-SLOW-NEXT: vmovaps %ymm13, 1440(%rax)
9234 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 1312(%rax)
9235 ; AVX2-SLOW-NEXT: vmovaps %ymm2, 1216(%rax)
9236 ; AVX2-SLOW-NEXT: vmovaps %ymm3, 1088(%rax)
9237 ; AVX2-SLOW-NEXT: vmovaps %ymm4, 992(%rax)
9238 ; AVX2-SLOW-NEXT: vmovaps %ymm5, 864(%rax)
9239 ; AVX2-SLOW-NEXT: vmovaps %ymm6, 768(%rax)
9240 ; AVX2-SLOW-NEXT: vmovaps %ymm8, 640(%rax)
9241 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9242 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 544(%rax)
9243 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9244 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 416(%rax)
9245 ; AVX2-SLOW-NEXT: vmovaps %ymm11, 320(%rax)
9246 ; AVX2-SLOW-NEXT: vmovaps %ymm12, 192(%rax)
9247 ; AVX2-SLOW-NEXT: vmovaps %ymm10, 96(%rax)
9248 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9249 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 1504(%rax)
9250 ; AVX2-SLOW-NEXT: vmovaps %ymm9, 1472(%rax)
9251 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9252 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 1280(%rax)
9253 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9254 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 1248(%rax)
9255 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9256 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 1056(%rax)
9257 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9258 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 1024(%rax)
9259 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9260 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 832(%rax)
9261 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9262 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 800(%rax)
9263 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9264 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 608(%rax)
9265 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9266 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 576(%rax)
9267 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9268 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 384(%rax)
9269 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9270 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 352(%rax)
9271 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9272 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 160(%rax)
9273 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9274 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 128(%rax)
9275 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9276 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 1760(%rax)
9277 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9278 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 1728(%rax)
9279 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9280 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 1664(%rax)
9281 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9282 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 1536(%rax)
9283 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9284 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 1408(%rax)
9285 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9286 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 1376(%rax)
9287 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9288 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 1344(%rax)
9289 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9290 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 1184(%rax)
9291 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9292 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 1152(%rax)
9293 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9294 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 1120(%rax)
9295 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9296 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 960(%rax)
9297 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9298 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 928(%rax)
9299 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9300 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 896(%rax)
9301 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9302 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 736(%rax)
9303 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9304 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 704(%rax)
9305 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9306 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 672(%rax)
9307 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9308 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 512(%rax)
9309 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9310 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 480(%rax)
9311 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9312 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 448(%rax)
9313 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9314 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 288(%rax)
9315 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9316 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 256(%rax)
9317 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9318 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 224(%rax)
9319 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9320 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 64(%rax)
9321 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9322 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 32(%rax)
9323 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9324 ; AVX2-SLOW-NEXT: vmovaps %ymm0, (%rax)
9325 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9326 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 1696(%rax)
9327 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9328 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 1632(%rax)
9329 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9330 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 1600(%rax)
9331 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9332 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 1568(%rax)
9333 ; AVX2-SLOW-NEXT: addq $2968, %rsp # imm = 0xB98
9334 ; AVX2-SLOW-NEXT: vzeroupper
9335 ; AVX2-SLOW-NEXT: retq
9337 ; AVX2-FAST-LABEL: store_i32_stride7_vf64:
9338 ; AVX2-FAST: # %bb.0:
9339 ; AVX2-FAST-NEXT: subq $3080, %rsp # imm = 0xC08
9340 ; AVX2-FAST-NEXT: movq {{[0-9]+}}(%rsp), %rax
9341 ; AVX2-FAST-NEXT: vmovaps (%rax), %xmm0
9342 ; AVX2-FAST-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9343 ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
9344 ; AVX2-FAST-NEXT: vmovaps (%r8), %xmm2
9345 ; AVX2-FAST-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9346 ; AVX2-FAST-NEXT: vmovaps 32(%r8), %xmm3
9347 ; AVX2-FAST-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9348 ; AVX2-FAST-NEXT: vmovaps (%r9), %xmm1
9349 ; AVX2-FAST-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9350 ; AVX2-FAST-NEXT: vmovaps 32(%r9), %xmm4
9351 ; AVX2-FAST-NEXT: vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9352 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,1,1,1]
9353 ; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2,3]
9354 ; AVX2-FAST-NEXT: vbroadcastsd %xmm1, %ymm1
9355 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5],ymm1[6,7]
9356 ; AVX2-FAST-NEXT: vmovaps (%rcx), %xmm2
9357 ; AVX2-FAST-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9358 ; AVX2-FAST-NEXT: vmovaps 32(%rcx), %xmm5
9359 ; AVX2-FAST-NEXT: vmovaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9360 ; AVX2-FAST-NEXT: vmovaps (%rdx), %xmm1
9361 ; AVX2-FAST-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9362 ; AVX2-FAST-NEXT: vinsertps {{.*#+}} xmm1 = zero,xmm1[1],xmm2[1],zero
9363 ; AVX2-FAST-NEXT: vmovaps (%rdi), %xmm8
9364 ; AVX2-FAST-NEXT: vmovaps %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9365 ; AVX2-FAST-NEXT: vmovaps 32(%rdi), %xmm6
9366 ; AVX2-FAST-NEXT: vmovaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9367 ; AVX2-FAST-NEXT: vmovaps (%rsi), %xmm2
9368 ; AVX2-FAST-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9369 ; AVX2-FAST-NEXT: vmovaps 32(%rsi), %xmm7
9370 ; AVX2-FAST-NEXT: vmovaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9371 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm2 = xmm2[1,1,2,2]
9372 ; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm2 = xmm2[0,1],xmm8[2],xmm2[3]
9373 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,1,2,1]
9374 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0],ymm1[1,2],ymm2[3,4,5,6,7]
9375 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
9376 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9377 ; AVX2-FAST-NEXT: vmovaps 32(%rax), %xmm0
9378 ; AVX2-FAST-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9379 ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
9380 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm1 = xmm4[1,1,1,1]
9381 ; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0],xmm3[1],xmm1[2,3]
9382 ; AVX2-FAST-NEXT: vbroadcastsd %xmm1, %ymm1
9383 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5],ymm1[6,7]
9384 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm1 = xmm7[1,1,2,2]
9385 ; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm6[2],xmm1[3]
9386 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1]
9387 ; AVX2-FAST-NEXT: vmovaps 32(%rdx), %xmm2
9388 ; AVX2-FAST-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9389 ; AVX2-FAST-NEXT: vinsertps {{.*#+}} xmm2 = zero,xmm2[1],xmm5[1],zero
9390 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2],ymm1[3,4,5,6,7]
9391 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
9392 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9393 ; AVX2-FAST-NEXT: vmovaps 64(%r8), %xmm1
9394 ; AVX2-FAST-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9395 ; AVX2-FAST-NEXT: vmovaps 64(%r9), %xmm0
9396 ; AVX2-FAST-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9397 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
9398 ; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
9399 ; AVX2-FAST-NEXT: vbroadcastsd %xmm0, %ymm0
9400 ; AVX2-FAST-NEXT: vmovaps 64(%rax), %xmm1
9401 ; AVX2-FAST-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9402 ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
9403 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
9404 ; AVX2-FAST-NEXT: vmovaps 64(%rdi), %xmm2
9405 ; AVX2-FAST-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9406 ; AVX2-FAST-NEXT: vmovaps 64(%rsi), %xmm1
9407 ; AVX2-FAST-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9408 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,1,2,2]
9409 ; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3]
9410 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1]
9411 ; AVX2-FAST-NEXT: vmovaps 64(%rcx), %xmm3
9412 ; AVX2-FAST-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9413 ; AVX2-FAST-NEXT: vmovaps 64(%rdx), %xmm2
9414 ; AVX2-FAST-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9415 ; AVX2-FAST-NEXT: vinsertps {{.*#+}} xmm2 = zero,xmm2[1],xmm3[1],zero
9416 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2],ymm1[3,4,5,6,7]
9417 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
9418 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9419 ; AVX2-FAST-NEXT: vmovaps 96(%r8), %xmm1
9420 ; AVX2-FAST-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9421 ; AVX2-FAST-NEXT: vmovaps 96(%r9), %xmm0
9422 ; AVX2-FAST-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9423 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
9424 ; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
9425 ; AVX2-FAST-NEXT: vbroadcastsd %xmm0, %ymm0
9426 ; AVX2-FAST-NEXT: vmovaps 96(%rax), %xmm1
9427 ; AVX2-FAST-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9428 ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
9429 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
9430 ; AVX2-FAST-NEXT: vmovaps 96(%rdi), %xmm2
9431 ; AVX2-FAST-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9432 ; AVX2-FAST-NEXT: vmovaps 96(%rsi), %xmm1
9433 ; AVX2-FAST-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9434 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,1,2,2]
9435 ; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3]
9436 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1]
9437 ; AVX2-FAST-NEXT: vmovaps 96(%rcx), %xmm3
9438 ; AVX2-FAST-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9439 ; AVX2-FAST-NEXT: vmovaps 96(%rdx), %xmm2
9440 ; AVX2-FAST-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9441 ; AVX2-FAST-NEXT: vinsertps {{.*#+}} xmm2 = zero,xmm2[1],xmm3[1],zero
9442 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2],ymm1[3,4,5,6,7]
9443 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
9444 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9445 ; AVX2-FAST-NEXT: vmovaps 128(%r8), %xmm1
9446 ; AVX2-FAST-NEXT: vmovaps %xmm1, (%rsp) # 16-byte Spill
9447 ; AVX2-FAST-NEXT: vmovaps 128(%r9), %xmm0
9448 ; AVX2-FAST-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9449 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
9450 ; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
9451 ; AVX2-FAST-NEXT: vbroadcastsd %xmm0, %ymm0
9452 ; AVX2-FAST-NEXT: vmovaps 128(%rax), %xmm1
9453 ; AVX2-FAST-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9454 ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
9455 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
9456 ; AVX2-FAST-NEXT: vmovaps 128(%rdi), %xmm2
9457 ; AVX2-FAST-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9458 ; AVX2-FAST-NEXT: vmovaps 128(%rsi), %xmm1
9459 ; AVX2-FAST-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9460 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,1,2,2]
9461 ; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3]
9462 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1]
9463 ; AVX2-FAST-NEXT: vmovaps 128(%rcx), %xmm3
9464 ; AVX2-FAST-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9465 ; AVX2-FAST-NEXT: vmovaps 128(%rdx), %xmm2
9466 ; AVX2-FAST-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9467 ; AVX2-FAST-NEXT: vinsertps {{.*#+}} xmm2 = zero,xmm2[1],xmm3[1],zero
9468 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2],ymm1[3,4,5,6,7]
9469 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
9470 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9471 ; AVX2-FAST-NEXT: vmovaps 160(%r8), %xmm1
9472 ; AVX2-FAST-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9473 ; AVX2-FAST-NEXT: vmovaps 160(%r9), %xmm0
9474 ; AVX2-FAST-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9475 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
9476 ; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
9477 ; AVX2-FAST-NEXT: vbroadcastsd %xmm0, %ymm0
9478 ; AVX2-FAST-NEXT: vmovaps 160(%rax), %xmm1
9479 ; AVX2-FAST-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9480 ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
9481 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
9482 ; AVX2-FAST-NEXT: vmovaps 160(%rdi), %xmm2
9483 ; AVX2-FAST-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9484 ; AVX2-FAST-NEXT: vmovaps 160(%rsi), %xmm1
9485 ; AVX2-FAST-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9486 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,1,2,2]
9487 ; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3]
9488 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1]
9489 ; AVX2-FAST-NEXT: vmovaps 160(%rcx), %xmm3
9490 ; AVX2-FAST-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9491 ; AVX2-FAST-NEXT: vmovaps 160(%rdx), %xmm2
9492 ; AVX2-FAST-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9493 ; AVX2-FAST-NEXT: vinsertps {{.*#+}} xmm2 = zero,xmm2[1],xmm3[1],zero
9494 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2],ymm1[3,4,5,6,7]
9495 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
9496 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9497 ; AVX2-FAST-NEXT: vmovaps 192(%r9), %xmm0
9498 ; AVX2-FAST-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9499 ; AVX2-FAST-NEXT: vmovaps 192(%r8), %xmm1
9500 ; AVX2-FAST-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9501 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
9502 ; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
9503 ; AVX2-FAST-NEXT: vbroadcastsd %xmm0, %ymm0
9504 ; AVX2-FAST-NEXT: vmovaps 192(%rax), %xmm1
9505 ; AVX2-FAST-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9506 ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
9507 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
9508 ; AVX2-FAST-NEXT: vmovaps 192(%rdi), %xmm2
9509 ; AVX2-FAST-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9510 ; AVX2-FAST-NEXT: vmovaps 192(%rsi), %xmm1
9511 ; AVX2-FAST-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9512 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,1,2,2]
9513 ; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3]
9514 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1]
9515 ; AVX2-FAST-NEXT: vmovaps 192(%rcx), %xmm3
9516 ; AVX2-FAST-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9517 ; AVX2-FAST-NEXT: vmovaps 192(%rdx), %xmm2
9518 ; AVX2-FAST-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9519 ; AVX2-FAST-NEXT: vinsertps {{.*#+}} xmm2 = zero,xmm2[1],xmm3[1],zero
9520 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2],ymm1[3,4,5,6,7]
9521 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
9522 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9523 ; AVX2-FAST-NEXT: vmovaps (%rdi), %ymm0
9524 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9525 ; AVX2-FAST-NEXT: vmovaps (%rsi), %ymm1
9526 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9527 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
9528 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
9529 ; AVX2-FAST-NEXT: vmovaps (%rdx), %ymm2
9530 ; AVX2-FAST-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9531 ; AVX2-FAST-NEXT: vmovaps (%rcx), %ymm1
9532 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9533 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm1 = ymm2[2],ymm1[2],ymm2[3],ymm1[3],ymm2[6],ymm1[6],ymm2[7],ymm1[7]
9534 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
9535 ; AVX2-FAST-NEXT: vmovaps (%r8), %ymm15
9536 ; AVX2-FAST-NEXT: vmovaps (%r9), %ymm13
9537 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm1 = ymm13[1,1,2,2,5,5,6,6]
9538 ; AVX2-FAST-NEXT: vmovups %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9539 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm15[2],ymm1[3,4,5],ymm15[6],ymm1[7]
9540 ; AVX2-FAST-NEXT: vmovups %ymm15, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9541 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
9542 ; AVX2-FAST-NEXT: vmovaps 16(%rax), %xmm2
9543 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7]
9544 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
9545 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9546 ; AVX2-FAST-NEXT: vmovaps 32(%rdi), %ymm0
9547 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9548 ; AVX2-FAST-NEXT: vmovaps 32(%rsi), %ymm1
9549 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9550 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
9551 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
9552 ; AVX2-FAST-NEXT: vmovaps 32(%rdx), %ymm1
9553 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9554 ; AVX2-FAST-NEXT: vmovaps 32(%rcx), %ymm2
9555 ; AVX2-FAST-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9556 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm1 = ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[6],ymm2[6],ymm1[7],ymm2[7]
9557 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
9558 ; AVX2-FAST-NEXT: vmovaps 32(%r8), %ymm2
9559 ; AVX2-FAST-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9560 ; AVX2-FAST-NEXT: vmovaps 32(%r9), %ymm1
9561 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9562 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm1 = ymm1[1,1,2,2,5,5,6,6]
9563 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4,5],ymm2[6],ymm1[7]
9564 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
9565 ; AVX2-FAST-NEXT: vmovaps 48(%rax), %xmm2
9566 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7]
9567 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
9568 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9569 ; AVX2-FAST-NEXT: vmovaps 64(%rdi), %ymm1
9570 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9571 ; AVX2-FAST-NEXT: vmovaps 64(%rsi), %ymm0
9572 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9573 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm0 = ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[6],ymm0[6],ymm1[7],ymm0[7]
9574 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
9575 ; AVX2-FAST-NEXT: vmovaps 64(%rdx), %ymm1
9576 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9577 ; AVX2-FAST-NEXT: vmovaps 64(%rcx), %ymm2
9578 ; AVX2-FAST-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9579 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm1 = ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[6],ymm2[6],ymm1[7],ymm2[7]
9580 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
9581 ; AVX2-FAST-NEXT: vmovaps 64(%r8), %ymm2
9582 ; AVX2-FAST-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9583 ; AVX2-FAST-NEXT: vmovaps 64(%r9), %ymm1
9584 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9585 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm1 = ymm1[1,1,2,2,5,5,6,6]
9586 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4,5],ymm2[6],ymm1[7]
9587 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
9588 ; AVX2-FAST-NEXT: vmovaps 80(%rax), %xmm2
9589 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7]
9590 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
9591 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9592 ; AVX2-FAST-NEXT: vmovaps 96(%rdi), %ymm0
9593 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9594 ; AVX2-FAST-NEXT: vmovaps 96(%rsi), %ymm1
9595 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9596 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
9597 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
9598 ; AVX2-FAST-NEXT: vmovaps 96(%rdx), %ymm1
9599 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9600 ; AVX2-FAST-NEXT: vmovaps 96(%rcx), %ymm2
9601 ; AVX2-FAST-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9602 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm1 = ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[6],ymm2[6],ymm1[7],ymm2[7]
9603 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
9604 ; AVX2-FAST-NEXT: vmovaps 96(%r8), %ymm2
9605 ; AVX2-FAST-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9606 ; AVX2-FAST-NEXT: vmovaps 96(%r9), %ymm1
9607 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9608 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm1 = ymm1[1,1,2,2,5,5,6,6]
9609 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4,5],ymm2[6],ymm1[7]
9610 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
9611 ; AVX2-FAST-NEXT: vmovaps 112(%rax), %xmm2
9612 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7]
9613 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
9614 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9615 ; AVX2-FAST-NEXT: vmovaps 128(%rdi), %ymm0
9616 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9617 ; AVX2-FAST-NEXT: vmovaps 128(%rsi), %ymm1
9618 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9619 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
9620 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
9621 ; AVX2-FAST-NEXT: vmovaps 128(%rdx), %ymm1
9622 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9623 ; AVX2-FAST-NEXT: vmovaps 128(%rcx), %ymm2
9624 ; AVX2-FAST-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9625 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm1 = ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[6],ymm2[6],ymm1[7],ymm2[7]
9626 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
9627 ; AVX2-FAST-NEXT: vmovaps 128(%r8), %ymm2
9628 ; AVX2-FAST-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9629 ; AVX2-FAST-NEXT: vmovaps 128(%r9), %ymm1
9630 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9631 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm1 = ymm1[1,1,2,2,5,5,6,6]
9632 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4,5],ymm2[6],ymm1[7]
9633 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
9634 ; AVX2-FAST-NEXT: vmovaps 144(%rax), %xmm2
9635 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7]
9636 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
9637 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9638 ; AVX2-FAST-NEXT: vmovaps 160(%rdi), %ymm0
9639 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9640 ; AVX2-FAST-NEXT: vmovaps 160(%rsi), %ymm1
9641 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9642 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
9643 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
9644 ; AVX2-FAST-NEXT: vmovaps 160(%rdx), %ymm1
9645 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9646 ; AVX2-FAST-NEXT: vmovaps 160(%rcx), %ymm2
9647 ; AVX2-FAST-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9648 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm1 = ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[6],ymm2[6],ymm1[7],ymm2[7]
9649 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
9650 ; AVX2-FAST-NEXT: vmovaps 160(%r8), %ymm2
9651 ; AVX2-FAST-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9652 ; AVX2-FAST-NEXT: vmovaps 160(%r9), %ymm1
9653 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9654 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm1 = ymm1[1,1,2,2,5,5,6,6]
9655 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4,5],ymm2[6],ymm1[7]
9656 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
9657 ; AVX2-FAST-NEXT: vmovaps 176(%rax), %xmm2
9658 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7]
9659 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
9660 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9661 ; AVX2-FAST-NEXT: vmovaps 192(%rdi), %ymm1
9662 ; AVX2-FAST-NEXT: vmovaps 192(%rsi), %ymm12
9663 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm0 = ymm1[2],ymm12[2],ymm1[3],ymm12[3],ymm1[6],ymm12[6],ymm1[7],ymm12[7]
9664 ; AVX2-FAST-NEXT: vmovups %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9665 ; AVX2-FAST-NEXT: vmovaps %ymm1, %ymm10
9666 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9667 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
9668 ; AVX2-FAST-NEXT: vmovaps 192(%rdx), %ymm7
9669 ; AVX2-FAST-NEXT: vmovaps 192(%rcx), %ymm8
9670 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm1 = ymm7[2],ymm8[2],ymm7[3],ymm8[3],ymm7[6],ymm8[6],ymm7[7],ymm8[7]
9671 ; AVX2-FAST-NEXT: vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9672 ; AVX2-FAST-NEXT: vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9673 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
9674 ; AVX2-FAST-NEXT: vmovaps 192(%r8), %ymm2
9675 ; AVX2-FAST-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9676 ; AVX2-FAST-NEXT: vmovaps 192(%r9), %ymm1
9677 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9678 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm1 = ymm1[1,1,2,2,5,5,6,6]
9679 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4,5],ymm2[6],ymm1[7]
9680 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
9681 ; AVX2-FAST-NEXT: vmovaps 208(%rax), %xmm2
9682 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7]
9683 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
9684 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9685 ; AVX2-FAST-NEXT: vmovaps 224(%rdi), %xmm0
9686 ; AVX2-FAST-NEXT: vmovaps 224(%rsi), %xmm1
9687 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm2 = xmm1[1,1,2,2]
9688 ; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm2 = xmm2[0,1],xmm0[2],xmm2[3]
9689 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,1,2,1]
9690 ; AVX2-FAST-NEXT: vmovaps 224(%rcx), %xmm3
9691 ; AVX2-FAST-NEXT: vmovaps 224(%rdx), %xmm6
9692 ; AVX2-FAST-NEXT: vinsertps {{.*#+}} xmm5 = zero,xmm6[1],xmm3[1],zero
9693 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0],ymm5[1,2],ymm2[3,4,5,6,7]
9694 ; AVX2-FAST-NEXT: vbroadcastss 228(%r8), %ymm4
9695 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2],ymm4[3],ymm2[4,5,6,7]
9696 ; AVX2-FAST-NEXT: vmovaps 224(%r9), %xmm4
9697 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm11 = xmm4[1,1,1,1]
9698 ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm11, %ymm0, %ymm11
9699 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm11[4,5],ymm2[6,7]
9700 ; AVX2-FAST-NEXT: vinsertf128 $1, 224(%rax), %ymm5, %ymm5
9701 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0],ymm5[1],ymm2[2,3,4],ymm5[5],ymm2[6,7]
9702 ; AVX2-FAST-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9703 ; AVX2-FAST-NEXT: vbroadcastss %xmm3, %xmm2
9704 ; AVX2-FAST-NEXT: vbroadcastss %xmm6, %xmm5
9705 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} xmm2 = xmm5[0],xmm2[0],xmm5[1],xmm2[1]
9706 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} xmm11 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
9707 ; AVX2-FAST-NEXT: vbroadcastf128 {{.*#+}} ymm9 = [0,1,2,2,0,1,2,2]
9708 ; AVX2-FAST-NEXT: # ymm9 = mem[0,1,0,1]
9709 ; AVX2-FAST-NEXT: vpermps %ymm11, %ymm9, %ymm11
9710 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm11[0,1],ymm2[2,3],ymm11[4,5,6,7]
9711 ; AVX2-FAST-NEXT: vbroadcastsd 224(%r8), %ymm11
9712 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm11[4,5],ymm2[6,7]
9713 ; AVX2-FAST-NEXT: vbroadcastss %xmm4, %ymm11
9714 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm11[5],ymm2[6,7]
9715 ; AVX2-FAST-NEXT: vbroadcastss 224(%rax), %ymm11
9716 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5],ymm11[6],ymm2[7]
9717 ; AVX2-FAST-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9718 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm0 = xmm0[3,3],xmm1[3,3]
9719 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} xmm1 = xmm6[2],xmm3[2],xmm6[3],xmm3[3]
9720 ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
9721 ; AVX2-FAST-NEXT: vpermps %ymm1, %ymm9, %ymm1
9722 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5,6],ymm1[7]
9723 ; AVX2-FAST-NEXT: vmovaps 224(%r8), %ymm6
9724 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm6[2,3],ymm0[4,5,6,7]
9725 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm1 = xmm4[2,2,2,2]
9726 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3],ymm0[4,5,6,7]
9727 ; AVX2-FAST-NEXT: vbroadcastss 232(%rax), %ymm1
9728 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4],ymm0[5,6,7]
9729 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9730 ; AVX2-FAST-NEXT: vmovaps 224(%rdi), %ymm2
9731 ; AVX2-FAST-NEXT: vmovaps 224(%rsi), %ymm1
9732 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm0 = ymm1[1,1,1,1,5,5,5,5]
9733 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2,3,4],ymm2[5],ymm0[6,7]
9734 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm11 = ymm0[2,2,2,2]
9735 ; AVX2-FAST-NEXT: vmovaps 224(%rdx), %ymm3
9736 ; AVX2-FAST-NEXT: vmovaps 224(%rcx), %ymm0
9737 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm14 = ymm3[1,1],ymm0[1,1],ymm3[5,5],ymm0[5,5]
9738 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm11 = ymm11[0,1,2,3,4],ymm14[5,6],ymm11[7]
9739 ; AVX2-FAST-NEXT: vbroadcastsd 240(%r8), %ymm14
9740 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm11 = ymm14[0],ymm11[1,2,3,4,5,6],ymm14[7]
9741 ; AVX2-FAST-NEXT: vbroadcastss 240(%r9), %xmm14
9742 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm11 = ymm11[0],ymm14[1],ymm11[2,3,4,5,6,7]
9743 ; AVX2-FAST-NEXT: vbroadcastss 240(%rax), %ymm14
9744 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm11 = ymm11[0,1],ymm14[2],ymm11[3,4,5,6,7]
9745 ; AVX2-FAST-NEXT: vmovups %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9746 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm11 = ymm8[2],ymm7[2],ymm8[3],ymm7[3],ymm8[6],ymm7[6],ymm8[7],ymm7[7]
9747 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm11 = ymm11[3,3,3,3]
9748 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm14 = ymm12[2],ymm10[2],ymm12[3],ymm10[3],ymm12[6],ymm10[6],ymm12[7],ymm10[7]
9749 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm14 = ymm14[3,3,3,3]
9750 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm11 = ymm14[0,1,2],ymm11[3,4,5,6,7]
9751 ; AVX2-FAST-NEXT: vbroadcastss 220(%r8), %ymm14
9752 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm11 = ymm11[0,1,2,3,4],ymm14[5],ymm11[6,7]
9753 ; AVX2-FAST-NEXT: vbroadcastss 220(%r9), %ymm14
9754 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm11 = ymm11[0,1,2,3,4,5],ymm14[6,7]
9755 ; AVX2-FAST-NEXT: vbroadcastsd 216(%rax), %ymm14
9756 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm5 = ymm14[0],ymm11[1,2,3,4,5,6],ymm14[7]
9757 ; AVX2-FAST-NEXT: vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9758 ; AVX2-FAST-NEXT: vbroadcastss 240(%rdx), %ymm11
9759 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm14 = ymm0[3,1,2,0,7,5,6,4]
9760 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm11 = ymm14[0,1,2,3,4,5],ymm11[6],ymm14[7]
9761 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} ymm14 = ymm2[0],ymm1[0],ymm2[1],ymm1[1],ymm2[4],ymm1[4],ymm2[5],ymm1[5]
9762 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm11 = ymm11[0,1,2,3],ymm14[4,5],ymm11[6,7]
9763 ; AVX2-FAST-NEXT: vbroadcastss 236(%r8), %ymm14
9764 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm11 = ymm11[0],ymm14[1],ymm11[2,3,4,5,6,7]
9765 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm4 = xmm4[2,2,3,3]
9766 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm4 = ymm11[0,1],ymm4[2,3],ymm11[4,5,6,7]
9767 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm11 = ymm2[2],ymm1[2],ymm2[3],ymm1[3],ymm2[6],ymm1[6],ymm2[7],ymm1[7]
9768 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm11 = ymm11[2,2,2,2]
9769 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm14 = ymm3[2],ymm0[2],ymm3[3],ymm0[3],ymm3[6],ymm0[6],ymm3[7],ymm0[7]
9770 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm11 = ymm11[0,1,2,3],ymm14[4,5,6,7]
9771 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm6 = ymm11[0,1,2,3,4,5],ymm6[6,7]
9772 ; AVX2-FAST-NEXT: vbroadcastsd {{.*#+}} ymm11 = [5,6,5,6,5,6,5,6]
9773 ; AVX2-FAST-NEXT: vpermps 224(%r9), %ymm11, %ymm11
9774 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm6 = ymm11[0],ymm6[1,2,3,4,5,6],ymm11[7]
9775 ; AVX2-FAST-NEXT: vmovaps 224(%rax), %ymm11
9776 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2],ymm11[3],ymm4[4,5,6,7]
9777 ; AVX2-FAST-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9778 ; AVX2-FAST-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm11[2,3],ymm14[2,3]
9779 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm4 = ymm6[0],ymm4[1],ymm6[2,3,4],ymm4[5],ymm6[6,7]
9780 ; AVX2-FAST-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9781 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm3[2],ymm0[3],ymm3[3],ymm0[6],ymm3[6],ymm0[7],ymm3[7]
9782 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm1 = ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[6],ymm2[6],ymm1[7],ymm2[7]
9783 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,3,3,3]
9784 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[3,3,3,3]
9785 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
9786 ; AVX2-FAST-NEXT: vbroadcastss 252(%r8), %ymm1
9787 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
9788 ; AVX2-FAST-NEXT: vbroadcastss 252(%r9), %ymm1
9789 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
9790 ; AVX2-FAST-NEXT: vbroadcastsd 248(%rax), %ymm1
9791 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5,6],ymm1[7]
9792 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9793 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
9794 ; AVX2-FAST-NEXT: vbroadcastss %xmm5, %xmm0
9795 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
9796 ; AVX2-FAST-NEXT: vbroadcastss %xmm4, %xmm1
9797 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
9798 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
9799 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
9800 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} xmm1 = xmm7[0],xmm6[0],xmm7[1],xmm6[1]
9801 ; AVX2-FAST-NEXT: vpermps %ymm1, %ymm9, %ymm1
9802 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5,6,7]
9803 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
9804 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
9805 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} xmm1 = xmm3[0],xmm8[0],xmm3[1],xmm8[1]
9806 ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
9807 ; AVX2-FAST-NEXT: vbroadcastsd {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 16-byte Folded Reload
9808 ; AVX2-FAST-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[2],ymm2[2]
9809 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6],ymm0[7]
9810 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9811 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm0 = xmm7[3,3],xmm6[3,3]
9812 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} xmm1 = xmm4[2],xmm5[2],xmm4[3],xmm5[3]
9813 ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
9814 ; AVX2-FAST-NEXT: vpermps %ymm1, %ymm9, %ymm1
9815 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5,6],ymm1[7]
9816 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm1 = xmm8[2,2,2,2]
9817 ; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm1 = xmm3[0,1,2],xmm1[3]
9818 ; AVX2-FAST-NEXT: vbroadcastsd 8(%rax), %ymm2
9819 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
9820 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4],ymm0[5,6,7]
9821 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9822 ; AVX2-FAST-NEXT: vpermilps $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
9823 ; AVX2-FAST-NEXT: # ymm0 = mem[1,1,1,1,5,5,5,5]
9824 ; AVX2-FAST-NEXT: vblendps $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
9825 ; AVX2-FAST-NEXT: # ymm0 = ymm0[0],mem[1],ymm0[2,3,4],mem[5],ymm0[6,7]
9826 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
9827 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
9828 ; AVX2-FAST-NEXT: vshufps $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
9829 ; AVX2-FAST-NEXT: # ymm1 = ymm1[1,1],mem[1,1],ymm1[5,5],mem[5,5]
9830 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6],ymm0[7]
9831 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm1 = ymm13[0,0,0,0,4,4,4,4]
9832 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm2 = ymm15[0,1,0,1,4,5,4,5]
9833 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2,3,4],ymm1[5],ymm2[6,7]
9834 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
9835 ; AVX2-FAST-NEXT: vbroadcastsd 16(%rax), %ymm2
9836 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3],ymm1[4,5,6,7]
9837 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6],ymm1[7]
9838 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9839 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
9840 ; AVX2-FAST-NEXT: vbroadcastss %xmm3, %xmm0
9841 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
9842 ; AVX2-FAST-NEXT: vbroadcastss %xmm7, %xmm1
9843 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
9844 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
9845 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
9846 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} xmm1 = xmm5[0],xmm6[0],xmm5[1],xmm6[1]
9847 ; AVX2-FAST-NEXT: vpermps %ymm1, %ymm9, %ymm1
9848 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5,6,7]
9849 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
9850 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
9851 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} xmm1 = xmm4[0],xmm8[0],xmm4[1],xmm8[1]
9852 ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
9853 ; AVX2-FAST-NEXT: vbroadcastsd {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 16-byte Folded Reload
9854 ; AVX2-FAST-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[2],ymm2[2]
9855 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6],ymm0[7]
9856 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9857 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm0 = xmm5[3,3],xmm6[3,3]
9858 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} xmm1 = xmm7[2],xmm3[2],xmm7[3],xmm3[3]
9859 ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
9860 ; AVX2-FAST-NEXT: vpermps %ymm1, %ymm9, %ymm1
9861 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5,6],ymm1[7]
9862 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm1 = xmm8[2,2,2,2]
9863 ; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm1 = xmm4[0,1,2],xmm1[3]
9864 ; AVX2-FAST-NEXT: vbroadcastsd 40(%rax), %ymm2
9865 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
9866 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4],ymm0[5,6,7]
9867 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9868 ; AVX2-FAST-NEXT: vpermilps $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
9869 ; AVX2-FAST-NEXT: # ymm0 = mem[1,1,1,1,5,5,5,5]
9870 ; AVX2-FAST-NEXT: vblendps $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
9871 ; AVX2-FAST-NEXT: # ymm0 = ymm0[0],mem[1],ymm0[2,3,4],mem[5],ymm0[6,7]
9872 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
9873 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
9874 ; AVX2-FAST-NEXT: vshufps $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
9875 ; AVX2-FAST-NEXT: # ymm1 = ymm1[1,1],mem[1,1],ymm1[5,5],mem[5,5]
9876 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6],ymm0[7]
9877 ; AVX2-FAST-NEXT: vpermilps $0, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
9878 ; AVX2-FAST-NEXT: # ymm1 = mem[0,0,0,0,4,4,4,4]
9879 ; AVX2-FAST-NEXT: vpermilps $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
9880 ; AVX2-FAST-NEXT: # ymm2 = mem[0,1,0,1,4,5,4,5]
9881 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2,3,4],ymm1[5],ymm2[6,7]
9882 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
9883 ; AVX2-FAST-NEXT: vbroadcastsd 48(%rax), %ymm2
9884 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3],ymm1[4,5,6,7]
9885 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6],ymm1[7]
9886 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9887 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
9888 ; AVX2-FAST-NEXT: vbroadcastss %xmm7, %xmm0
9889 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
9890 ; AVX2-FAST-NEXT: vbroadcastss %xmm6, %xmm1
9891 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
9892 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
9893 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
9894 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} xmm1 = xmm5[0],xmm3[0],xmm5[1],xmm3[1]
9895 ; AVX2-FAST-NEXT: vpermps %ymm1, %ymm9, %ymm1
9896 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5,6,7]
9897 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
9898 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
9899 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} xmm1 = xmm8[0],xmm4[0],xmm8[1],xmm4[1]
9900 ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
9901 ; AVX2-FAST-NEXT: vbroadcastsd {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 16-byte Folded Reload
9902 ; AVX2-FAST-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[2],ymm2[2]
9903 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6],ymm0[7]
9904 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9905 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm0 = xmm5[3,3],xmm3[3,3]
9906 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} xmm1 = xmm6[2],xmm7[2],xmm6[3],xmm7[3]
9907 ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
9908 ; AVX2-FAST-NEXT: vpermps %ymm1, %ymm9, %ymm1
9909 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5,6],ymm1[7]
9910 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm1 = xmm4[2,2,2,2]
9911 ; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm1 = xmm8[0,1,2],xmm1[3]
9912 ; AVX2-FAST-NEXT: vbroadcastsd 72(%rax), %ymm2
9913 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
9914 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4],ymm0[5,6,7]
9915 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9916 ; AVX2-FAST-NEXT: vpermilps $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
9917 ; AVX2-FAST-NEXT: # ymm0 = mem[1,1,1,1,5,5,5,5]
9918 ; AVX2-FAST-NEXT: vblendps $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
9919 ; AVX2-FAST-NEXT: # ymm0 = ymm0[0],mem[1],ymm0[2,3,4],mem[5],ymm0[6,7]
9920 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
9921 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
9922 ; AVX2-FAST-NEXT: vshufps $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
9923 ; AVX2-FAST-NEXT: # ymm1 = ymm1[1,1],mem[1,1],ymm1[5,5],mem[5,5]
9924 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6],ymm0[7]
9925 ; AVX2-FAST-NEXT: vpermilps $0, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
9926 ; AVX2-FAST-NEXT: # ymm1 = mem[0,0,0,0,4,4,4,4]
9927 ; AVX2-FAST-NEXT: vpermilps $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
9928 ; AVX2-FAST-NEXT: # ymm2 = mem[0,1,0,1,4,5,4,5]
9929 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2,3,4],ymm1[5],ymm2[6,7]
9930 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
9931 ; AVX2-FAST-NEXT: vbroadcastsd 80(%rax), %ymm2
9932 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3],ymm1[4,5,6,7]
9933 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6],ymm1[7]
9934 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9935 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
9936 ; AVX2-FAST-NEXT: vbroadcastss %xmm8, %xmm0
9937 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
9938 ; AVX2-FAST-NEXT: vbroadcastss %xmm7, %xmm1
9939 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
9940 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
9941 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
9942 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} xmm1 = xmm6[0],xmm5[0],xmm6[1],xmm5[1]
9943 ; AVX2-FAST-NEXT: vpermps %ymm1, %ymm9, %ymm1
9944 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5,6,7]
9945 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
9946 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
9947 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} xmm1 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
9948 ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
9949 ; AVX2-FAST-NEXT: vbroadcastsd {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 16-byte Folded Reload
9950 ; AVX2-FAST-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[2],ymm2[2]
9951 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6],ymm0[7]
9952 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9953 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm0 = xmm6[3,3],xmm5[3,3]
9954 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} xmm1 = xmm7[2],xmm8[2],xmm7[3],xmm8[3]
9955 ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
9956 ; AVX2-FAST-NEXT: vpermps %ymm1, %ymm9, %ymm1
9957 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5,6],ymm1[7]
9958 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm1 = xmm4[2,2,2,2]
9959 ; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm1 = xmm3[0,1,2],xmm1[3]
9960 ; AVX2-FAST-NEXT: vbroadcastsd 104(%rax), %ymm2
9961 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
9962 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4],ymm0[5,6,7]
9963 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9964 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
9965 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm0 = ymm13[1,1,1,1,5,5,5,5]
9966 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
9967 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm10[1],ymm0[2,3,4],ymm10[5],ymm0[6,7]
9968 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
9969 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
9970 ; AVX2-FAST-NEXT: vshufps $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
9971 ; AVX2-FAST-NEXT: # ymm1 = ymm1[1,1],mem[1,1],ymm1[5,5],mem[5,5]
9972 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6],ymm0[7]
9973 ; AVX2-FAST-NEXT: vpermilps $0, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
9974 ; AVX2-FAST-NEXT: # ymm1 = mem[0,0,0,0,4,4,4,4]
9975 ; AVX2-FAST-NEXT: vpermilps $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
9976 ; AVX2-FAST-NEXT: # ymm2 = mem[0,1,0,1,4,5,4,5]
9977 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2,3,4],ymm1[5],ymm2[6,7]
9978 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
9979 ; AVX2-FAST-NEXT: vbroadcastsd 112(%rax), %ymm2
9980 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3],ymm1[4,5,6,7]
9981 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6],ymm1[7]
9982 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9983 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
9984 ; AVX2-FAST-NEXT: vbroadcastss %xmm8, %xmm0
9985 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
9986 ; AVX2-FAST-NEXT: vbroadcastss %xmm7, %xmm1
9987 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
9988 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
9989 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
9990 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} xmm1 = xmm6[0],xmm5[0],xmm6[1],xmm5[1]
9991 ; AVX2-FAST-NEXT: vpermps %ymm1, %ymm9, %ymm1
9992 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5,6,7]
9993 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
9994 ; AVX2-FAST-NEXT: vmovaps (%rsp), %xmm3 # 16-byte Reload
9995 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} xmm1 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
9996 ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
9997 ; AVX2-FAST-NEXT: vbroadcastsd {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 16-byte Folded Reload
9998 ; AVX2-FAST-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[2],ymm2[2]
9999 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6],ymm0[7]
10000 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10001 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm0 = xmm6[3,3],xmm5[3,3]
10002 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} xmm1 = xmm7[2],xmm8[2],xmm7[3],xmm8[3]
10003 ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
10004 ; AVX2-FAST-NEXT: vpermps %ymm1, %ymm9, %ymm1
10005 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5,6],ymm1[7]
10006 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm1 = xmm4[2,2,2,2]
10007 ; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm1 = xmm3[0,1,2],xmm1[3]
10008 ; AVX2-FAST-NEXT: vbroadcastsd 136(%rax), %ymm2
10009 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
10010 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4],ymm0[5,6,7]
10011 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10012 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
10013 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm0 = ymm15[1,1,1,1,5,5,5,5]
10014 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
10015 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm14[1],ymm0[2,3,4],ymm14[5],ymm0[6,7]
10016 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
10017 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
10018 ; AVX2-FAST-NEXT: vshufps $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
10019 ; AVX2-FAST-NEXT: # ymm1 = ymm1[1,1],mem[1,1],ymm1[5,5],mem[5,5]
10020 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6],ymm0[7]
10021 ; AVX2-FAST-NEXT: vpermilps $0, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
10022 ; AVX2-FAST-NEXT: # ymm1 = mem[0,0,0,0,4,4,4,4]
10023 ; AVX2-FAST-NEXT: vpermilps $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
10024 ; AVX2-FAST-NEXT: # ymm2 = mem[0,1,0,1,4,5,4,5]
10025 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2,3,4],ymm1[5],ymm2[6,7]
10026 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
10027 ; AVX2-FAST-NEXT: vbroadcastsd 144(%rax), %ymm2
10028 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3],ymm1[4,5,6,7]
10029 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6],ymm1[7]
10030 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10031 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
10032 ; AVX2-FAST-NEXT: vbroadcastss %xmm8, %xmm0
10033 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
10034 ; AVX2-FAST-NEXT: vbroadcastss %xmm7, %xmm1
10035 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
10036 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
10037 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
10038 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} xmm1 = xmm6[0],xmm5[0],xmm6[1],xmm5[1]
10039 ; AVX2-FAST-NEXT: vpermps %ymm1, %ymm9, %ymm1
10040 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5,6,7]
10041 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
10042 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
10043 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} xmm1 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
10044 ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
10045 ; AVX2-FAST-NEXT: vbroadcastsd {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 16-byte Folded Reload
10046 ; AVX2-FAST-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[2],ymm2[2]
10047 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6],ymm0[7]
10048 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10049 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm0 = xmm6[3,3],xmm5[3,3]
10050 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} xmm1 = xmm7[2],xmm8[2],xmm7[3],xmm8[3]
10051 ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
10052 ; AVX2-FAST-NEXT: vpermps %ymm1, %ymm9, %ymm1
10053 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5,6],ymm1[7]
10054 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm1 = xmm4[2,2,2,2]
10055 ; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm1 = xmm3[0,1,2],xmm1[3]
10056 ; AVX2-FAST-NEXT: vbroadcastsd 168(%rax), %ymm2
10057 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
10058 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4],ymm0[5,6,7]
10059 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10060 ; AVX2-FAST-NEXT: vpermilps $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
10061 ; AVX2-FAST-NEXT: # ymm0 = mem[1,1,1,1,5,5,5,5]
10062 ; AVX2-FAST-NEXT: vblendps $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
10063 ; AVX2-FAST-NEXT: # ymm0 = ymm0[0],mem[1],ymm0[2,3,4],mem[5],ymm0[6,7]
10064 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
10065 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
10066 ; AVX2-FAST-NEXT: vshufps $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
10067 ; AVX2-FAST-NEXT: # ymm1 = ymm1[1,1],mem[1,1],ymm1[5,5],mem[5,5]
10068 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6],ymm0[7]
10069 ; AVX2-FAST-NEXT: vpermilps $0, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
10070 ; AVX2-FAST-NEXT: # ymm1 = mem[0,0,0,0,4,4,4,4]
10071 ; AVX2-FAST-NEXT: vpermilps $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
10072 ; AVX2-FAST-NEXT: # ymm2 = mem[0,1,0,1,4,5,4,5]
10073 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2,3,4],ymm1[5],ymm2[6,7]
10074 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
10075 ; AVX2-FAST-NEXT: vbroadcastsd 176(%rax), %ymm2
10076 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3],ymm1[4,5,6,7]
10077 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6],ymm1[7]
10078 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10079 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
10080 ; AVX2-FAST-NEXT: vbroadcastss %xmm7, %xmm0
10081 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
10082 ; AVX2-FAST-NEXT: vbroadcastss %xmm6, %xmm1
10083 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} xmm2 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
10084 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
10085 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
10086 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} xmm1 = xmm0[0],xmm5[0],xmm0[1],xmm5[1]
10087 ; AVX2-FAST-NEXT: vpermps %ymm1, %ymm9, %ymm1
10088 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm8 = ymm1[0,1],ymm2[2,3],ymm1[4,5,6,7]
10089 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
10090 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
10091 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} xmm1 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
10092 ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
10093 ; AVX2-FAST-NEXT: vbroadcastsd {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 16-byte Folded Reload
10094 ; AVX2-FAST-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[2],ymm2[2]
10095 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm8[0,1,2,3],ymm1[4,5,6],ymm8[7]
10096 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10097 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm0 = xmm0[3,3],xmm5[3,3]
10098 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} xmm1 = xmm6[2],xmm7[2],xmm6[3],xmm7[3]
10099 ; AVX2-FAST-NEXT: vpermps %ymm1, %ymm9, %ymm1
10100 ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
10101 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5,6],ymm1[7]
10102 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm1 = xmm3[2,2,2,2]
10103 ; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm1 = xmm4[0,1,2],xmm1[3]
10104 ; AVX2-FAST-NEXT: vbroadcastsd 200(%rax), %ymm2
10105 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
10106 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4],ymm0[5,6,7]
10107 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10108 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
10109 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10110 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm0 = ymm0[1,1],ymm12[1,1],ymm0[5,5],ymm12[5,5]
10111 ; AVX2-FAST-NEXT: vpermilps $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
10112 ; AVX2-FAST-NEXT: # ymm1 = mem[1,1,1,1,5,5,5,5]
10113 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
10114 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm11[1],ymm1[2,3,4],ymm11[5],ymm1[6,7]
10115 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,2,2,2]
10116 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5,6],ymm1[7]
10117 ; AVX2-FAST-NEXT: vpermilps $0, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
10118 ; AVX2-FAST-NEXT: # ymm1 = mem[0,0,0,0,4,4,4,4]
10119 ; AVX2-FAST-NEXT: vpermilps $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
10120 ; AVX2-FAST-NEXT: # ymm2 = mem[0,1,0,1,4,5,4,5]
10121 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2,3,4],ymm1[5],ymm2[6,7]
10122 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
10123 ; AVX2-FAST-NEXT: vbroadcastsd 208(%rax), %ymm2
10124 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3],ymm1[4,5,6,7]
10125 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6],ymm1[7]
10126 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10127 ; AVX2-FAST-NEXT: vbroadcastss 16(%rdx), %ymm0
10128 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
10129 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm1 = ymm4[3,1,2,0,7,5,6,4]
10130 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6],ymm1[7]
10131 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
10132 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
10133 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} ymm1 = ymm3[0],ymm2[0],ymm3[1],ymm2[1],ymm3[4],ymm2[4],ymm3[5],ymm2[5]
10134 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
10135 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
10136 ; AVX2-FAST-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
10137 ; AVX2-FAST-NEXT: # xmm1 = xmm1[3,3],mem[3,3]
10138 ; AVX2-FAST-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
10139 ; AVX2-FAST-NEXT: # xmm1 = xmm1[0,1,2],mem[3]
10140 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4,5,6,7]
10141 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10142 ; AVX2-FAST-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm0 # 32-byte Folded Reload
10143 ; AVX2-FAST-NEXT: # ymm0 = ymm4[2],mem[2],ymm4[3],mem[3],ymm4[6],mem[6],ymm4[7],mem[7]
10144 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm5 = ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[6],ymm3[6],ymm2[7],ymm3[7]
10145 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,3,3,3]
10146 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[3,3,3,3]
10147 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm5[0,1,2],ymm0[3,4,5,6,7]
10148 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
10149 ; AVX2-FAST-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm5 # 32-byte Folded Reload
10150 ; AVX2-FAST-NEXT: # ymm5 = ymm1[3,3],mem[3,3],ymm1[7,7],mem[7,7]
10151 ; AVX2-FAST-NEXT: vpermilps {{.*#+}} ymm6 = mem[2,3,2,3,6,7,6,7]
10152 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm5 = ymm6[0],ymm5[1,2],ymm6[3,4],ymm5[5,6],ymm6[7]
10153 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[2,1,2,3]
10154 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm5[0],ymm0[1,2,3,4],ymm5[5,6,7]
10155 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10156 ; AVX2-FAST-NEXT: vbroadcastss 48(%rdx), %ymm0
10157 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
10158 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm6 = ymm4[3,1,2,0,7,5,6,4]
10159 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm6[0,1,2,3,4,5],ymm0[6],ymm6[7]
10160 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
10161 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
10162 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} ymm6 = ymm2[0],ymm1[0],ymm2[1],ymm1[1],ymm2[4],ymm1[4],ymm2[5],ymm1[5]
10163 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm6[4,5],ymm0[6,7]
10164 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
10165 ; AVX2-FAST-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm6 # 16-byte Folded Reload
10166 ; AVX2-FAST-NEXT: # xmm6 = xmm3[3,3],mem[3,3]
10167 ; AVX2-FAST-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6, %xmm6 # 16-byte Folded Reload
10168 ; AVX2-FAST-NEXT: # xmm6 = xmm6[0,1,2],mem[3]
10169 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm5 = ymm0[0],ymm6[1,2,3],ymm0[4,5,6,7]
10170 ; AVX2-FAST-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm0 # 32-byte Folded Reload
10171 ; AVX2-FAST-NEXT: # ymm0 = ymm4[2],mem[2],ymm4[3],mem[3],ymm4[6],mem[6],ymm4[7],mem[7]
10172 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm6 = ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[6],ymm2[6],ymm1[7],ymm2[7]
10173 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,3,3,3]
10174 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[3,3,3,3]
10175 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm6[0,1,2],ymm0[3,4,5,6,7]
10176 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
10177 ; AVX2-FAST-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm6 # 32-byte Folded Reload
10178 ; AVX2-FAST-NEXT: # ymm6 = ymm1[3,3],mem[3,3],ymm1[7,7],mem[7,7]
10179 ; AVX2-FAST-NEXT: vpermilps {{.*#+}} ymm7 = mem[2,3,2,3,6,7,6,7]
10180 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm6 = ymm7[0],ymm6[1,2],ymm7[3,4],ymm6[5,6],ymm7[7]
10181 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[2,1,2,3]
10182 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm7 = ymm6[0],ymm0[1,2,3,4],ymm6[5,6,7]
10183 ; AVX2-FAST-NEXT: vbroadcastss 80(%rdx), %ymm0
10184 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
10185 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm6 = ymm4[3,1,2,0,7,5,6,4]
10186 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm6[0,1,2,3,4,5],ymm0[6],ymm6[7]
10187 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
10188 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
10189 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} ymm6 = ymm2[0],ymm1[0],ymm2[1],ymm1[1],ymm2[4],ymm1[4],ymm2[5],ymm1[5]
10190 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm6[4,5],ymm0[6,7]
10191 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
10192 ; AVX2-FAST-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm6 # 16-byte Folded Reload
10193 ; AVX2-FAST-NEXT: # xmm6 = xmm3[3,3],mem[3,3]
10194 ; AVX2-FAST-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6, %xmm6 # 16-byte Folded Reload
10195 ; AVX2-FAST-NEXT: # xmm6 = xmm6[0,1,2],mem[3]
10196 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm3 = ymm0[0],ymm6[1,2,3],ymm0[4,5,6,7]
10197 ; AVX2-FAST-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm0 # 32-byte Folded Reload
10198 ; AVX2-FAST-NEXT: # ymm0 = ymm4[2],mem[2],ymm4[3],mem[3],ymm4[6],mem[6],ymm4[7],mem[7]
10199 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm6 = ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[6],ymm2[6],ymm1[7],ymm2[7]
10200 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,3,3,3]
10201 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[3,3,3,3]
10202 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm6[0,1,2],ymm0[3,4,5,6,7]
10203 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
10204 ; AVX2-FAST-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm6 # 32-byte Folded Reload
10205 ; AVX2-FAST-NEXT: # ymm6 = ymm1[3,3],mem[3,3],ymm1[7,7],mem[7,7]
10206 ; AVX2-FAST-NEXT: vpermilps {{.*#+}} ymm9 = mem[2,3,2,3,6,7,6,7]
10207 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm6 = ymm9[0],ymm6[1,2],ymm9[3,4],ymm6[5,6],ymm9[7]
10208 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[2,1,2,3]
10209 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0],ymm0[1,2,3,4],ymm6[5,6,7]
10210 ; AVX2-FAST-NEXT: vbroadcastss 112(%rdx), %ymm1
10211 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10212 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm9 = ymm0[3,1,2,0,7,5,6,4]
10213 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm9[0,1,2,3,4,5],ymm1[6],ymm9[7]
10214 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} ymm9 = ymm10[0],ymm13[0],ymm10[1],ymm13[1],ymm10[4],ymm13[4],ymm10[5],ymm13[5]
10215 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm9[4,5],ymm2[6,7]
10216 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
10217 ; AVX2-FAST-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8, %xmm9 # 16-byte Folded Reload
10218 ; AVX2-FAST-NEXT: # xmm9 = xmm8[3,3],mem[3,3]
10219 ; AVX2-FAST-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9, %xmm9 # 16-byte Folded Reload
10220 ; AVX2-FAST-NEXT: # xmm9 = xmm9[0,1,2],mem[3]
10221 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0],ymm9[1,2,3],ymm2[4,5,6,7]
10222 ; AVX2-FAST-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm9 # 32-byte Folded Reload
10223 ; AVX2-FAST-NEXT: # ymm9 = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7]
10224 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm10 = ymm13[2],ymm10[2],ymm13[3],ymm10[3],ymm13[6],ymm10[6],ymm13[7],ymm10[7]
10225 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm9 = ymm9[3,3,3,3]
10226 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm10 = ymm10[3,3,3,3]
10227 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm9 = ymm10[0,1,2],ymm9[3,4,5,6,7]
10228 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10229 ; AVX2-FAST-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm10 # 32-byte Folded Reload
10230 ; AVX2-FAST-NEXT: # ymm10 = ymm0[3,3],mem[3,3],ymm0[7,7],mem[7,7]
10231 ; AVX2-FAST-NEXT: vpermilps {{.*#+}} ymm13 = mem[2,3,2,3,6,7,6,7]
10232 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm10 = ymm13[0],ymm10[1,2],ymm13[3,4],ymm10[5,6],ymm13[7]
10233 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm10 = ymm10[2,1,2,3]
10234 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm9 = ymm10[0],ymm9[1,2,3,4],ymm10[5,6,7]
10235 ; AVX2-FAST-NEXT: vbroadcastss 144(%rdx), %ymm10
10236 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
10237 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm13 = ymm1[3,1,2,0,7,5,6,4]
10238 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm10 = ymm13[0,1,2,3,4,5],ymm10[6],ymm13[7]
10239 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} ymm13 = ymm14[0],ymm15[0],ymm14[1],ymm15[1],ymm14[4],ymm15[4],ymm14[5],ymm15[5]
10240 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0,1,2,3],ymm13[4,5],ymm10[6,7]
10241 ; AVX2-FAST-NEXT: vmovaps (%rsp), %xmm8 # 16-byte Reload
10242 ; AVX2-FAST-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8, %xmm13 # 16-byte Folded Reload
10243 ; AVX2-FAST-NEXT: # xmm13 = xmm8[3,3],mem[3,3]
10244 ; AVX2-FAST-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm13, %xmm13 # 16-byte Folded Reload
10245 ; AVX2-FAST-NEXT: # xmm13 = xmm13[0,1,2],mem[3]
10246 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0],ymm13[1,2,3],ymm10[4,5,6,7]
10247 ; AVX2-FAST-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm13 # 32-byte Folded Reload
10248 ; AVX2-FAST-NEXT: # ymm13 = ymm1[2],mem[2],ymm1[3],mem[3],ymm1[6],mem[6],ymm1[7],mem[7]
10249 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm14 = ymm15[2],ymm14[2],ymm15[3],ymm14[3],ymm15[6],ymm14[6],ymm15[7],ymm14[7]
10250 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm13 = ymm13[3,3,3,3]
10251 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm14 = ymm14[3,3,3,3]
10252 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm13 = ymm14[0,1,2],ymm13[3,4,5,6,7]
10253 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10254 ; AVX2-FAST-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm14 # 32-byte Folded Reload
10255 ; AVX2-FAST-NEXT: # ymm14 = ymm0[3,3],mem[3,3],ymm0[7,7],mem[7,7]
10256 ; AVX2-FAST-NEXT: vpermilps {{.*#+}} ymm15 = mem[2,3,2,3,6,7,6,7]
10257 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm14 = ymm15[0],ymm14[1,2],ymm15[3,4],ymm14[5,6],ymm15[7]
10258 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm14 = ymm14[2,1,2,3]
10259 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm13 = ymm14[0],ymm13[1,2,3,4],ymm14[5,6,7]
10260 ; AVX2-FAST-NEXT: vbroadcastss 176(%rdx), %ymm14
10261 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
10262 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm15 = ymm1[3,1,2,0,7,5,6,4]
10263 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm14 = ymm15[0,1,2,3,4,5],ymm14[6],ymm15[7]
10264 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10265 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
10266 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} ymm15 = ymm4[0],ymm0[0],ymm4[1],ymm0[1],ymm4[4],ymm0[4],ymm4[5],ymm0[5]
10267 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm14 = ymm14[0,1,2,3],ymm15[4,5],ymm14[6,7]
10268 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
10269 ; AVX2-FAST-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8, %xmm15 # 16-byte Folded Reload
10270 ; AVX2-FAST-NEXT: # xmm15 = xmm8[3,3],mem[3,3]
10271 ; AVX2-FAST-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm15, %xmm15 # 16-byte Folded Reload
10272 ; AVX2-FAST-NEXT: # xmm15 = xmm15[0,1,2],mem[3]
10273 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm14 = ymm14[0],ymm15[1,2,3],ymm14[4,5,6,7]
10274 ; AVX2-FAST-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm15 # 32-byte Folded Reload
10275 ; AVX2-FAST-NEXT: # ymm15 = ymm1[2],mem[2],ymm1[3],mem[3],ymm1[6],mem[6],ymm1[7],mem[7]
10276 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm4[2],ymm0[3],ymm4[3],ymm0[6],ymm4[6],ymm0[7],ymm4[7]
10277 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm15 = ymm15[3,3,3,3]
10278 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,3,3,3]
10279 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm15[3,4,5,6,7]
10280 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
10281 ; AVX2-FAST-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm15 # 32-byte Folded Reload
10282 ; AVX2-FAST-NEXT: # ymm15 = ymm1[3,3],mem[3,3],ymm1[7,7],mem[7,7]
10283 ; AVX2-FAST-NEXT: vpermilps {{.*#+}} ymm8 = mem[2,3,2,3,6,7,6,7]
10284 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0],ymm15[1,2],ymm8[3,4],ymm15[5,6],ymm8[7]
10285 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm8 = ymm8[2,1,2,3]
10286 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm8[0],ymm0[1,2,3,4],ymm8[5,6,7]
10287 ; AVX2-FAST-NEXT: vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm8 # 32-byte Folded Reload
10288 ; AVX2-FAST-NEXT: # ymm8 = ymm11[0],mem[0],ymm11[1],mem[1],ymm11[4],mem[4],ymm11[5],mem[5]
10289 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm15 = ymm12[3,1,2,0,7,5,6,4]
10290 ; AVX2-FAST-NEXT: vbroadcastss 208(%rdx), %ymm4
10291 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm4 = ymm15[0,1,2,3,4,5],ymm4[6],ymm15[7]
10292 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm8[4,5],ymm4[6,7]
10293 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
10294 ; AVX2-FAST-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm8 # 16-byte Folded Reload
10295 ; AVX2-FAST-NEXT: # xmm8 = xmm1[3,3],mem[3,3]
10296 ; AVX2-FAST-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8, %xmm8 # 16-byte Folded Reload
10297 ; AVX2-FAST-NEXT: # xmm8 = xmm8[0,1,2],mem[3]
10298 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0],ymm8[1,2,3],ymm4[4,5,6,7]
10299 ; AVX2-FAST-NEXT: movq {{[0-9]+}}(%rsp), %rax
10300 ; AVX2-FAST-NEXT: vmovaps %ymm4, 1440(%rax)
10301 ; AVX2-FAST-NEXT: vmovaps %ymm0, 1312(%rax)
10302 ; AVX2-FAST-NEXT: vmovaps %ymm14, 1216(%rax)
10303 ; AVX2-FAST-NEXT: vmovaps %ymm13, 1088(%rax)
10304 ; AVX2-FAST-NEXT: vmovaps %ymm10, 992(%rax)
10305 ; AVX2-FAST-NEXT: vmovaps %ymm9, 864(%rax)
10306 ; AVX2-FAST-NEXT: vmovaps %ymm2, 768(%rax)
10307 ; AVX2-FAST-NEXT: vmovaps %ymm6, 640(%rax)
10308 ; AVX2-FAST-NEXT: vmovaps %ymm3, 544(%rax)
10309 ; AVX2-FAST-NEXT: vmovaps %ymm7, 416(%rax)
10310 ; AVX2-FAST-NEXT: vmovaps %ymm5, 320(%rax)
10311 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10312 ; AVX2-FAST-NEXT: vmovaps %ymm0, 192(%rax)
10313 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10314 ; AVX2-FAST-NEXT: vmovaps %ymm0, 96(%rax)
10315 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10316 ; AVX2-FAST-NEXT: vmovaps %ymm0, 1504(%rax)
10317 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10318 ; AVX2-FAST-NEXT: vmovaps %ymm0, 1472(%rax)
10319 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10320 ; AVX2-FAST-NEXT: vmovaps %ymm0, 1408(%rax)
10321 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10322 ; AVX2-FAST-NEXT: vmovaps %ymm0, 1344(%rax)
10323 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10324 ; AVX2-FAST-NEXT: vmovaps %ymm0, 1280(%rax)
10325 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10326 ; AVX2-FAST-NEXT: vmovaps %ymm0, 1248(%rax)
10327 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10328 ; AVX2-FAST-NEXT: vmovaps %ymm0, 1184(%rax)
10329 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10330 ; AVX2-FAST-NEXT: vmovaps %ymm0, 1120(%rax)
10331 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10332 ; AVX2-FAST-NEXT: vmovaps %ymm0, 1056(%rax)
10333 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10334 ; AVX2-FAST-NEXT: vmovaps %ymm0, 1024(%rax)
10335 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10336 ; AVX2-FAST-NEXT: vmovaps %ymm0, 960(%rax)
10337 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10338 ; AVX2-FAST-NEXT: vmovaps %ymm0, 896(%rax)
10339 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10340 ; AVX2-FAST-NEXT: vmovaps %ymm0, 832(%rax)
10341 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10342 ; AVX2-FAST-NEXT: vmovaps %ymm0, 800(%rax)
10343 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10344 ; AVX2-FAST-NEXT: vmovaps %ymm0, 736(%rax)
10345 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10346 ; AVX2-FAST-NEXT: vmovaps %ymm0, 672(%rax)
10347 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10348 ; AVX2-FAST-NEXT: vmovaps %ymm0, 608(%rax)
10349 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10350 ; AVX2-FAST-NEXT: vmovaps %ymm0, 576(%rax)
10351 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10352 ; AVX2-FAST-NEXT: vmovaps %ymm0, 512(%rax)
10353 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10354 ; AVX2-FAST-NEXT: vmovaps %ymm0, 448(%rax)
10355 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10356 ; AVX2-FAST-NEXT: vmovaps %ymm0, 384(%rax)
10357 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10358 ; AVX2-FAST-NEXT: vmovaps %ymm0, 352(%rax)
10359 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10360 ; AVX2-FAST-NEXT: vmovaps %ymm0, 288(%rax)
10361 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10362 ; AVX2-FAST-NEXT: vmovaps %ymm0, 224(%rax)
10363 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10364 ; AVX2-FAST-NEXT: vmovaps %ymm0, 160(%rax)
10365 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10366 ; AVX2-FAST-NEXT: vmovaps %ymm0, 128(%rax)
10367 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10368 ; AVX2-FAST-NEXT: vmovaps %ymm0, 64(%rax)
10369 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10370 ; AVX2-FAST-NEXT: vmovaps %ymm0, (%rax)
10371 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10372 ; AVX2-FAST-NEXT: vmovaps %ymm0, 1760(%rax)
10373 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10374 ; AVX2-FAST-NEXT: vmovaps %ymm0, 1728(%rax)
10375 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10376 ; AVX2-FAST-NEXT: vmovaps %ymm0, 1664(%rax)
10377 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10378 ; AVX2-FAST-NEXT: vmovaps %ymm0, 1536(%rax)
10379 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10380 ; AVX2-FAST-NEXT: vmovaps %ymm0, 1376(%rax)
10381 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10382 ; AVX2-FAST-NEXT: vmovaps %ymm0, 1152(%rax)
10383 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10384 ; AVX2-FAST-NEXT: vmovaps %ymm0, 928(%rax)
10385 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10386 ; AVX2-FAST-NEXT: vmovaps %ymm0, 704(%rax)
10387 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10388 ; AVX2-FAST-NEXT: vmovaps %ymm0, 480(%rax)
10389 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10390 ; AVX2-FAST-NEXT: vmovaps %ymm0, 256(%rax)
10391 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10392 ; AVX2-FAST-NEXT: vmovaps %ymm0, 32(%rax)
10393 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10394 ; AVX2-FAST-NEXT: vmovaps %ymm0, 1696(%rax)
10395 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10396 ; AVX2-FAST-NEXT: vmovaps %ymm0, 1632(%rax)
10397 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10398 ; AVX2-FAST-NEXT: vmovaps %ymm0, 1568(%rax)
10399 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10400 ; AVX2-FAST-NEXT: vmovaps %ymm0, 1600(%rax)
10401 ; AVX2-FAST-NEXT: addq $3080, %rsp # imm = 0xC08
10402 ; AVX2-FAST-NEXT: vzeroupper
10403 ; AVX2-FAST-NEXT: retq
10405 ; AVX2-FAST-PERLANE-LABEL: store_i32_stride7_vf64:
10406 ; AVX2-FAST-PERLANE: # %bb.0:
10407 ; AVX2-FAST-PERLANE-NEXT: subq $2968, %rsp # imm = 0xB98
10408 ; AVX2-FAST-PERLANE-NEXT: movq {{[0-9]+}}(%rsp), %rax
10409 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rax), %xmm0
10410 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10411 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
10412 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%r8), %xmm13
10413 ; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%r8), %xmm4
10414 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10415 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%r9), %xmm1
10416 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10417 ; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%r9), %xmm5
10418 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10419 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,1,1,1]
10420 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0],xmm13[1],xmm1[2,3]
10421 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd %xmm1, %ymm1
10422 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5],ymm1[6,7]
10423 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rcx), %xmm10
10424 ; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%rcx), %xmm3
10425 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10426 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rdx), %xmm9
10427 ; AVX2-FAST-PERLANE-NEXT: vinsertps {{.*#+}} xmm1 = zero,xmm9[1],xmm10[1],zero
10428 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rdi), %xmm7
10429 ; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%rdi), %xmm8
10430 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10431 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rsi), %xmm6
10432 ; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%rsi), %xmm11
10433 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10434 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm2 = xmm6[1,1,2,2]
10435 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} xmm2 = xmm2[0,1],xmm7[2],xmm2[3]
10436 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,1,2,1]
10437 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0],ymm1[1,2],ymm2[3,4,5,6,7]
10438 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
10439 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10440 ; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%rax), %xmm0
10441 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10442 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
10443 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm1 = xmm5[1,1,1,1]
10444 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0],xmm4[1],xmm1[2,3]
10445 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd %xmm1, %ymm1
10446 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5],ymm1[6,7]
10447 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm1 = xmm11[1,1,2,2]
10448 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm8[2],xmm1[3]
10449 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1]
10450 ; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%rdx), %xmm8
10451 ; AVX2-FAST-PERLANE-NEXT: vinsertps {{.*#+}} xmm2 = zero,xmm8[1],xmm3[1],zero
10452 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2],ymm1[3,4,5,6,7]
10453 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
10454 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10455 ; AVX2-FAST-PERLANE-NEXT: vmovaps 64(%r8), %xmm1
10456 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10457 ; AVX2-FAST-PERLANE-NEXT: vmovaps 64(%r9), %xmm0
10458 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10459 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
10460 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
10461 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd %xmm0, %ymm0
10462 ; AVX2-FAST-PERLANE-NEXT: vmovaps 64(%rax), %xmm1
10463 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10464 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
10465 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
10466 ; AVX2-FAST-PERLANE-NEXT: vmovaps 64(%rdi), %xmm2
10467 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10468 ; AVX2-FAST-PERLANE-NEXT: vmovaps 64(%rsi), %xmm1
10469 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10470 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,1,2,2]
10471 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3]
10472 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1]
10473 ; AVX2-FAST-PERLANE-NEXT: vmovaps 64(%rcx), %xmm3
10474 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10475 ; AVX2-FAST-PERLANE-NEXT: vmovaps 64(%rdx), %xmm2
10476 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10477 ; AVX2-FAST-PERLANE-NEXT: vinsertps {{.*#+}} xmm2 = zero,xmm2[1],xmm3[1],zero
10478 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2],ymm1[3,4,5,6,7]
10479 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
10480 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10481 ; AVX2-FAST-PERLANE-NEXT: vmovaps 96(%r8), %xmm1
10482 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10483 ; AVX2-FAST-PERLANE-NEXT: vmovaps 96(%r9), %xmm0
10484 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10485 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
10486 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
10487 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd %xmm0, %ymm0
10488 ; AVX2-FAST-PERLANE-NEXT: vmovaps 96(%rax), %xmm1
10489 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10490 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
10491 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
10492 ; AVX2-FAST-PERLANE-NEXT: vmovaps 96(%rdi), %xmm2
10493 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10494 ; AVX2-FAST-PERLANE-NEXT: vmovaps 96(%rsi), %xmm1
10495 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10496 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,1,2,2]
10497 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3]
10498 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1]
10499 ; AVX2-FAST-PERLANE-NEXT: vmovaps 96(%rcx), %xmm3
10500 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10501 ; AVX2-FAST-PERLANE-NEXT: vmovaps 96(%rdx), %xmm2
10502 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10503 ; AVX2-FAST-PERLANE-NEXT: vinsertps {{.*#+}} xmm2 = zero,xmm2[1],xmm3[1],zero
10504 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2],ymm1[3,4,5,6,7]
10505 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
10506 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10507 ; AVX2-FAST-PERLANE-NEXT: vmovaps 128(%r8), %xmm1
10508 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10509 ; AVX2-FAST-PERLANE-NEXT: vmovaps 128(%r9), %xmm0
10510 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
10511 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
10512 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
10513 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd %xmm0, %ymm0
10514 ; AVX2-FAST-PERLANE-NEXT: vmovaps 128(%rax), %xmm1
10515 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10516 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
10517 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
10518 ; AVX2-FAST-PERLANE-NEXT: vmovaps 128(%rdi), %xmm2
10519 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10520 ; AVX2-FAST-PERLANE-NEXT: vmovaps 128(%rsi), %xmm1
10521 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10522 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,1,2,2]
10523 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3]
10524 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1]
10525 ; AVX2-FAST-PERLANE-NEXT: vmovaps 128(%rcx), %xmm3
10526 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10527 ; AVX2-FAST-PERLANE-NEXT: vmovaps 128(%rdx), %xmm2
10528 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10529 ; AVX2-FAST-PERLANE-NEXT: vinsertps {{.*#+}} xmm2 = zero,xmm2[1],xmm3[1],zero
10530 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2],ymm1[3,4,5,6,7]
10531 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
10532 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10533 ; AVX2-FAST-PERLANE-NEXT: vmovaps 160(%r8), %xmm1
10534 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10535 ; AVX2-FAST-PERLANE-NEXT: vmovaps 160(%r9), %xmm0
10536 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10537 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
10538 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
10539 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd %xmm0, %ymm0
10540 ; AVX2-FAST-PERLANE-NEXT: vmovaps 160(%rax), %xmm1
10541 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10542 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
10543 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
10544 ; AVX2-FAST-PERLANE-NEXT: vmovaps 160(%rdi), %xmm2
10545 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10546 ; AVX2-FAST-PERLANE-NEXT: vmovaps 160(%rsi), %xmm1
10547 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10548 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,1,2,2]
10549 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3]
10550 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1]
10551 ; AVX2-FAST-PERLANE-NEXT: vmovaps 160(%rcx), %xmm3
10552 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10553 ; AVX2-FAST-PERLANE-NEXT: vmovaps 160(%rdx), %xmm2
10554 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10555 ; AVX2-FAST-PERLANE-NEXT: vinsertps {{.*#+}} xmm2 = zero,xmm2[1],xmm3[1],zero
10556 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2],ymm1[3,4,5,6,7]
10557 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
10558 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10559 ; AVX2-FAST-PERLANE-NEXT: vmovaps 192(%r9), %xmm0
10560 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10561 ; AVX2-FAST-PERLANE-NEXT: vmovaps 192(%r8), %xmm1
10562 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10563 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
10564 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
10565 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd %xmm0, %ymm0
10566 ; AVX2-FAST-PERLANE-NEXT: vmovaps 192(%rax), %xmm1
10567 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10568 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
10569 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
10570 ; AVX2-FAST-PERLANE-NEXT: vmovaps 192(%rdi), %xmm2
10571 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10572 ; AVX2-FAST-PERLANE-NEXT: vmovaps 192(%rsi), %xmm1
10573 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10574 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,1,2,2]
10575 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3]
10576 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1]
10577 ; AVX2-FAST-PERLANE-NEXT: vmovaps 192(%rcx), %xmm3
10578 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10579 ; AVX2-FAST-PERLANE-NEXT: vmovaps 192(%rdx), %xmm2
10580 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10581 ; AVX2-FAST-PERLANE-NEXT: vinsertps {{.*#+}} xmm2 = zero,xmm2[1],xmm3[1],zero
10582 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2],ymm1[3,4,5,6,7]
10583 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
10584 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10585 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rdi), %ymm0
10586 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10587 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rsi), %ymm1
10588 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10589 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
10590 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
10591 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rdx), %ymm2
10592 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10593 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rcx), %ymm1
10594 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10595 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm1 = ymm2[2],ymm1[2],ymm2[3],ymm1[3],ymm2[6],ymm1[6],ymm2[7],ymm1[7]
10596 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
10597 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%r8), %ymm2
10598 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10599 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%r9), %ymm1
10600 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10601 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm1 = ymm1[1,1,2,2,5,5,6,6]
10602 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4,5],ymm2[6],ymm1[7]
10603 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
10604 ; AVX2-FAST-PERLANE-NEXT: vmovaps 16(%rax), %xmm2
10605 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7]
10606 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
10607 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10608 ; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%rdi), %ymm0
10609 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10610 ; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%rsi), %ymm1
10611 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10612 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
10613 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
10614 ; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%rdx), %ymm1
10615 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10616 ; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%rcx), %ymm2
10617 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10618 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm1 = ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[6],ymm2[6],ymm1[7],ymm2[7]
10619 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
10620 ; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%r8), %ymm2
10621 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10622 ; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%r9), %ymm1
10623 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10624 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm1 = ymm1[1,1,2,2,5,5,6,6]
10625 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4,5],ymm2[6],ymm1[7]
10626 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
10627 ; AVX2-FAST-PERLANE-NEXT: vmovaps 48(%rax), %xmm2
10628 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7]
10629 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
10630 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10631 ; AVX2-FAST-PERLANE-NEXT: vmovaps 64(%rdi), %ymm1
10632 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10633 ; AVX2-FAST-PERLANE-NEXT: vmovaps 64(%rsi), %ymm0
10634 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10635 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm0 = ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[6],ymm0[6],ymm1[7],ymm0[7]
10636 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
10637 ; AVX2-FAST-PERLANE-NEXT: vmovaps 64(%rdx), %ymm1
10638 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10639 ; AVX2-FAST-PERLANE-NEXT: vmovaps 64(%rcx), %ymm2
10640 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10641 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm1 = ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[6],ymm2[6],ymm1[7],ymm2[7]
10642 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
10643 ; AVX2-FAST-PERLANE-NEXT: vmovaps 64(%r8), %ymm2
10644 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10645 ; AVX2-FAST-PERLANE-NEXT: vmovaps 64(%r9), %ymm1
10646 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10647 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm1 = ymm1[1,1,2,2,5,5,6,6]
10648 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4,5],ymm2[6],ymm1[7]
10649 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
10650 ; AVX2-FAST-PERLANE-NEXT: vmovaps 80(%rax), %xmm2
10651 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7]
10652 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
10653 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10654 ; AVX2-FAST-PERLANE-NEXT: vmovaps 96(%rdi), %ymm1
10655 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10656 ; AVX2-FAST-PERLANE-NEXT: vmovaps 96(%rsi), %ymm0
10657 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10658 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm0 = ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[6],ymm0[6],ymm1[7],ymm0[7]
10659 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
10660 ; AVX2-FAST-PERLANE-NEXT: vmovaps 96(%rdx), %ymm1
10661 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10662 ; AVX2-FAST-PERLANE-NEXT: vmovaps 96(%rcx), %ymm2
10663 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10664 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm1 = ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[6],ymm2[6],ymm1[7],ymm2[7]
10665 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
10666 ; AVX2-FAST-PERLANE-NEXT: vmovaps 96(%r8), %ymm2
10667 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10668 ; AVX2-FAST-PERLANE-NEXT: vmovaps 96(%r9), %ymm1
10669 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10670 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm1 = ymm1[1,1,2,2,5,5,6,6]
10671 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4,5],ymm2[6],ymm1[7]
10672 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
10673 ; AVX2-FAST-PERLANE-NEXT: vmovaps 112(%rax), %xmm2
10674 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7]
10675 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
10676 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10677 ; AVX2-FAST-PERLANE-NEXT: vmovaps 128(%rdi), %ymm1
10678 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10679 ; AVX2-FAST-PERLANE-NEXT: vmovaps 128(%rsi), %ymm0
10680 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10681 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm0 = ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[6],ymm0[6],ymm1[7],ymm0[7]
10682 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
10683 ; AVX2-FAST-PERLANE-NEXT: vmovaps 128(%rdx), %ymm1
10684 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10685 ; AVX2-FAST-PERLANE-NEXT: vmovaps 128(%rcx), %ymm2
10686 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10687 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm1 = ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[6],ymm2[6],ymm1[7],ymm2[7]
10688 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
10689 ; AVX2-FAST-PERLANE-NEXT: vmovaps 128(%r8), %ymm2
10690 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10691 ; AVX2-FAST-PERLANE-NEXT: vmovaps 128(%r9), %ymm1
10692 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10693 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm1 = ymm1[1,1,2,2,5,5,6,6]
10694 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4,5],ymm2[6],ymm1[7]
10695 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
10696 ; AVX2-FAST-PERLANE-NEXT: vmovaps 144(%rax), %xmm2
10697 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7]
10698 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
10699 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10700 ; AVX2-FAST-PERLANE-NEXT: vmovaps 160(%rdi), %ymm1
10701 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10702 ; AVX2-FAST-PERLANE-NEXT: vmovaps 160(%rsi), %ymm0
10703 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10704 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm0 = ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[6],ymm0[6],ymm1[7],ymm0[7]
10705 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
10706 ; AVX2-FAST-PERLANE-NEXT: vmovaps 160(%rdx), %ymm1
10707 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10708 ; AVX2-FAST-PERLANE-NEXT: vmovaps 160(%rcx), %ymm2
10709 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10710 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm1 = ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[6],ymm2[6],ymm1[7],ymm2[7]
10711 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
10712 ; AVX2-FAST-PERLANE-NEXT: vmovaps 160(%r8), %ymm2
10713 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10714 ; AVX2-FAST-PERLANE-NEXT: vmovaps 160(%r9), %ymm1
10715 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10716 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm1 = ymm1[1,1,2,2,5,5,6,6]
10717 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4,5],ymm2[6],ymm1[7]
10718 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
10719 ; AVX2-FAST-PERLANE-NEXT: vmovaps 176(%rax), %xmm2
10720 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7]
10721 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
10722 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10723 ; AVX2-FAST-PERLANE-NEXT: vmovaps 192(%rdi), %ymm1
10724 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10725 ; AVX2-FAST-PERLANE-NEXT: vmovaps 192(%rsi), %ymm0
10726 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10727 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm0 = ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[6],ymm0[6],ymm1[7],ymm0[7]
10728 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
10729 ; AVX2-FAST-PERLANE-NEXT: vmovaps 192(%rdx), %ymm1
10730 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10731 ; AVX2-FAST-PERLANE-NEXT: vmovaps 192(%rcx), %ymm2
10732 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10733 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm1 = ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[6],ymm2[6],ymm1[7],ymm2[7]
10734 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
10735 ; AVX2-FAST-PERLANE-NEXT: vmovaps 192(%r8), %ymm2
10736 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10737 ; AVX2-FAST-PERLANE-NEXT: vmovaps 192(%r9), %ymm1
10738 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10739 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm1 = ymm1[1,1,2,2,5,5,6,6]
10740 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4,5],ymm2[6],ymm1[7]
10741 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
10742 ; AVX2-FAST-PERLANE-NEXT: vmovaps 208(%rax), %xmm2
10743 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7]
10744 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
10745 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10746 ; AVX2-FAST-PERLANE-NEXT: vmovaps 224(%rcx), %xmm0
10747 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss %xmm0, %xmm2
10748 ; AVX2-FAST-PERLANE-NEXT: vmovaps 224(%rdx), %xmm1
10749 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss %xmm1, %xmm3
10750 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
10751 ; AVX2-FAST-PERLANE-NEXT: vmovaps 224(%rsi), %xmm4
10752 ; AVX2-FAST-PERLANE-NEXT: vmovaps 224(%rdi), %xmm5
10753 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} xmm3 = xmm5[0],xmm4[0],xmm5[1],xmm4[1]
10754 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm3 = xmm3[0,1,2,2]
10755 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[0,1,2,1]
10756 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3],ymm3[4,5,6,7]
10757 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 224(%r8), %ymm3
10758 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5],ymm2[6,7]
10759 ; AVX2-FAST-PERLANE-NEXT: vmovaps 224(%r9), %xmm3
10760 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss %xmm3, %ymm15
10761 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm15[5],ymm2[6,7]
10762 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss 224(%rax), %ymm15
10763 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5],ymm15[6],ymm2[7]
10764 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10765 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm2 = xmm4[1,1,2,2]
10766 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} xmm2 = xmm2[0,1],xmm5[2],xmm2[3]
10767 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,1,2,1]
10768 ; AVX2-FAST-PERLANE-NEXT: vinsertps {{.*#+}} xmm15 = zero,xmm1[1],xmm0[1],zero
10769 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0],ymm15[1,2],ymm2[3,4,5,6,7]
10770 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss 228(%r8), %ymm14
10771 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2],ymm14[3],ymm2[4,5,6,7]
10772 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm14 = xmm3[1,1,1,1]
10773 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm14, %ymm0, %ymm14
10774 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm14[4,5],ymm2[6,7]
10775 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, 224(%rax), %ymm15, %ymm14
10776 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0],ymm14[1],ymm2[2,3,4],ymm14[5],ymm2[6,7]
10777 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10778 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm2 = xmm5[3,3],xmm4[3,3]
10779 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} xmm0 = xmm1[2],xmm0[2],xmm1[3],xmm0[3]
10780 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm1
10781 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,1,2,2]
10782 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,2,1]
10783 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6],ymm0[7]
10784 ; AVX2-FAST-PERLANE-NEXT: vmovaps 224(%r8), %ymm5
10785 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm5[2,3],ymm0[4,5,6,7]
10786 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm1 = xmm3[2,2,2,2]
10787 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3],ymm0[4,5,6,7]
10788 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss 232(%rax), %ymm1
10789 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4],ymm0[5,6,7]
10790 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10791 ; AVX2-FAST-PERLANE-NEXT: vmovaps 224(%rdi), %ymm11
10792 ; AVX2-FAST-PERLANE-NEXT: vmovaps 224(%rsi), %ymm4
10793 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm0 = ymm4[1,1,1,1,5,5,5,5]
10794 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm11[1],ymm0[2,3,4],ymm11[5],ymm0[6,7]
10795 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm14 = ymm0[2,2,2,2]
10796 ; AVX2-FAST-PERLANE-NEXT: vmovaps 224(%rdx), %ymm12
10797 ; AVX2-FAST-PERLANE-NEXT: vmovaps 224(%rcx), %ymm2
10798 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm15 = ymm12[1,1],ymm2[1,1],ymm12[5,5],ymm2[5,5]
10799 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm14 = ymm14[0,1,2,3,4],ymm15[5,6],ymm14[7]
10800 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 240(%r8), %ymm15
10801 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm14 = ymm15[0],ymm14[1,2,3,4,5,6],ymm15[7]
10802 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss 240(%r9), %xmm15
10803 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm14 = ymm14[0],ymm15[1],ymm14[2,3,4,5,6,7]
10804 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss 240(%rax), %ymm15
10805 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm14[0,1],ymm15[2],ymm14[3,4,5,6,7]
10806 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10807 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss %xmm10, %xmm14
10808 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss %xmm9, %xmm15
10809 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} xmm14 = xmm15[0],xmm14[0],xmm15[1],xmm14[1]
10810 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} xmm15 = xmm7[0],xmm6[0],xmm7[1],xmm6[1]
10811 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm15 = xmm15[0,1,2,2]
10812 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm15 = ymm15[0,1,2,1]
10813 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm14 = ymm15[0,1],ymm14[2,3],ymm15[4,5,6,7]
10814 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
10815 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm13, %xmm1
10816 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10817 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} xmm15 = xmm13[0],xmm0[0],xmm13[1],xmm0[1]
10818 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm15, %ymm0, %ymm15
10819 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 16-byte Folded Reload
10820 ; AVX2-FAST-PERLANE-NEXT: vunpcklpd {{.*#+}} ymm13 = ymm15[0],ymm13[0],ymm15[2],ymm13[2]
10821 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm13 = ymm14[0,1,2,3],ymm13[4,5,6],ymm14[7]
10822 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10823 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm6 = xmm7[3,3],xmm6[3,3]
10824 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} xmm7 = xmm9[2],xmm10[2],xmm9[3],xmm10[3]
10825 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm6, %ymm0, %ymm6
10826 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm7 = xmm7[0,1,2,2]
10827 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm7 = ymm7[0,1,2,1]
10828 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm6 = ymm7[0,1,2,3,4],ymm6[5,6],ymm7[7]
10829 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm7 = xmm0[2,2,2,2]
10830 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} xmm7 = xmm1[0,1,2],xmm7[3]
10831 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 8(%rax), %ymm9
10832 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm9[4,5,6,7]
10833 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm6[0,1],ymm7[2,3,4],ymm6[5,6,7]
10834 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10835 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
10836 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss %xmm1, %xmm6
10837 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss %xmm8, %xmm7
10838 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} xmm6 = xmm7[0],xmm6[0],xmm7[1],xmm6[1]
10839 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
10840 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
10841 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} xmm7 = xmm14[0],xmm15[0],xmm14[1],xmm15[1]
10842 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm7 = xmm7[0,1,2,2]
10843 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm7 = ymm7[0,1,2,1]
10844 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm6 = ymm7[0,1],ymm6[2,3],ymm7[4,5,6,7]
10845 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
10846 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
10847 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} xmm7 = xmm10[0],xmm13[0],xmm10[1],xmm13[1]
10848 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm7, %ymm0, %ymm7
10849 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 16-byte Folded Reload
10850 ; AVX2-FAST-PERLANE-NEXT: vunpcklpd {{.*#+}} ymm7 = ymm7[0],ymm9[0],ymm7[2],ymm9[2]
10851 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm6[0,1,2,3],ymm7[4,5,6],ymm6[7]
10852 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10853 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm6 = xmm14[3,3],xmm15[3,3]
10854 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} xmm7 = xmm8[2],xmm1[2],xmm8[3],xmm1[3]
10855 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm6, %ymm0, %ymm6
10856 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm7 = xmm7[0,1,2,2]
10857 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm7 = ymm7[0,1,2,1]
10858 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm6 = ymm7[0,1,2,3,4],ymm6[5,6],ymm7[7]
10859 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm7 = xmm13[2,2,2,2]
10860 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} xmm7 = xmm10[0,1,2],xmm7[3]
10861 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 40(%rax), %ymm8
10862 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm8[4,5,6,7]
10863 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm6[0,1],ymm7[2,3,4],ymm6[5,6,7]
10864 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10865 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
10866 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss %xmm15, %xmm6
10867 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
10868 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss %xmm10, %xmm7
10869 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} xmm6 = xmm7[0],xmm6[0],xmm7[1],xmm6[1]
10870 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
10871 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
10872 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} xmm7 = xmm9[0],xmm1[0],xmm9[1],xmm1[1]
10873 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm7 = xmm7[0,1,2,2]
10874 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm7 = ymm7[0,1,2,1]
10875 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm6 = ymm7[0,1],ymm6[2,3],ymm7[4,5,6,7]
10876 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
10877 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
10878 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} xmm7 = xmm14[0],xmm13[0],xmm14[1],xmm13[1]
10879 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm7, %ymm0, %ymm7
10880 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 16-byte Folded Reload
10881 ; AVX2-FAST-PERLANE-NEXT: vunpcklpd {{.*#+}} ymm7 = ymm7[0],ymm8[0],ymm7[2],ymm8[2]
10882 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm6[0,1,2,3],ymm7[4,5,6],ymm6[7]
10883 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10884 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm6 = xmm9[3,3],xmm1[3,3]
10885 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} xmm7 = xmm10[2],xmm15[2],xmm10[3],xmm15[3]
10886 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm6, %ymm0, %ymm6
10887 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm7 = xmm7[0,1,2,2]
10888 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm7 = ymm7[0,1,2,1]
10889 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm6 = ymm7[0,1,2,3,4],ymm6[5,6],ymm7[7]
10890 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm7 = xmm13[2,2,2,2]
10891 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} xmm7 = xmm14[0,1,2],xmm7[3]
10892 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 72(%rax), %ymm8
10893 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm8[4,5,6,7]
10894 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm6[0,1],ymm7[2,3,4],ymm6[5,6,7]
10895 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10896 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
10897 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss %xmm15, %xmm6
10898 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
10899 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss %xmm14, %xmm7
10900 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} xmm6 = xmm7[0],xmm6[0],xmm7[1],xmm6[1]
10901 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
10902 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
10903 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} xmm7 = xmm10[0],xmm9[0],xmm10[1],xmm9[1]
10904 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm7 = xmm7[0,1,2,2]
10905 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm7 = ymm7[0,1,2,1]
10906 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm6 = ymm7[0,1],ymm6[2,3],ymm7[4,5,6,7]
10907 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
10908 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
10909 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} xmm7 = xmm0[0],xmm13[0],xmm0[1],xmm13[1]
10910 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm7, %ymm0, %ymm7
10911 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 16-byte Folded Reload
10912 ; AVX2-FAST-PERLANE-NEXT: vunpcklpd {{.*#+}} ymm7 = ymm7[0],ymm8[0],ymm7[2],ymm8[2]
10913 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm6[0,1,2,3],ymm7[4,5,6],ymm6[7]
10914 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10915 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm6 = xmm10[3,3],xmm9[3,3]
10916 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} xmm7 = xmm14[2],xmm15[2],xmm14[3],xmm15[3]
10917 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm6, %ymm0, %ymm6
10918 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm7 = xmm7[0,1,2,2]
10919 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm7 = ymm7[0,1,2,1]
10920 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm6 = ymm7[0,1,2,3,4],ymm6[5,6],ymm7[7]
10921 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm7 = xmm13[2,2,2,2]
10922 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} xmm7 = xmm0[0,1,2],xmm7[3]
10923 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 104(%rax), %ymm8
10924 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm8[4,5,6,7]
10925 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm6[0,1],ymm7[2,3,4],ymm6[5,6,7]
10926 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10927 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
10928 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss %xmm15, %xmm6
10929 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
10930 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss %xmm14, %xmm7
10931 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} xmm6 = xmm7[0],xmm6[0],xmm7[1],xmm6[1]
10932 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
10933 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
10934 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} xmm7 = xmm10[0],xmm9[0],xmm10[1],xmm9[1]
10935 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm7 = xmm7[0,1,2,2]
10936 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm7 = ymm7[0,1,2,1]
10937 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm6 = ymm7[0,1],ymm6[2,3],ymm7[4,5,6,7]
10938 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rsp), %xmm13 # 16-byte Reload
10939 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
10940 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} xmm7 = xmm0[0],xmm13[0],xmm0[1],xmm13[1]
10941 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm7, %ymm0, %ymm7
10942 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 16-byte Folded Reload
10943 ; AVX2-FAST-PERLANE-NEXT: vunpcklpd {{.*#+}} ymm7 = ymm7[0],ymm8[0],ymm7[2],ymm8[2]
10944 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm6[0,1,2,3],ymm7[4,5,6],ymm6[7]
10945 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10946 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm6 = xmm10[3,3],xmm9[3,3]
10947 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} xmm7 = xmm14[2],xmm15[2],xmm14[3],xmm15[3]
10948 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm6, %ymm0, %ymm6
10949 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm7 = xmm7[0,1,2,2]
10950 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm7 = ymm7[0,1,2,1]
10951 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm6 = ymm7[0,1,2,3,4],ymm6[5,6],ymm7[7]
10952 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm7 = xmm13[2,2,2,2]
10953 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} xmm7 = xmm0[0,1,2],xmm7[3]
10954 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 136(%rax), %ymm8
10955 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm8[4,5,6,7]
10956 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm6[0,1],ymm7[2,3,4],ymm6[5,6,7]
10957 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10958 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
10959 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss %xmm14, %xmm6
10960 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
10961 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss %xmm10, %xmm7
10962 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} xmm6 = xmm7[0],xmm6[0],xmm7[1],xmm6[1]
10963 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
10964 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
10965 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} xmm7 = xmm9[0],xmm1[0],xmm9[1],xmm1[1]
10966 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm7 = xmm7[0,1,2,2]
10967 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm7 = ymm7[0,1,2,1]
10968 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm6 = ymm7[0,1],ymm6[2,3],ymm7[4,5,6,7]
10969 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
10970 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
10971 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} xmm7 = xmm0[0],xmm13[0],xmm0[1],xmm13[1]
10972 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm7, %ymm0, %ymm7
10973 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 16-byte Folded Reload
10974 ; AVX2-FAST-PERLANE-NEXT: vunpcklpd {{.*#+}} ymm7 = ymm7[0],ymm8[0],ymm7[2],ymm8[2]
10975 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm7[4,5,6],ymm6[7]
10976 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10977 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm6 = xmm9[3,3],xmm1[3,3]
10978 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} xmm7 = xmm10[2],xmm14[2],xmm10[3],xmm14[3]
10979 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm6, %ymm0, %ymm6
10980 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm7 = xmm7[0,1,2,2]
10981 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm7 = ymm7[0,1,2,1]
10982 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm6 = ymm7[0,1,2,3,4],ymm6[5,6],ymm7[7]
10983 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm7 = xmm13[2,2,2,2]
10984 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} xmm7 = xmm0[0,1,2],xmm7[3]
10985 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 168(%rax), %ymm8
10986 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm8[4,5,6,7]
10987 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm6[0,1],ymm7[2,3,4],ymm6[5,6,7]
10988 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10989 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
10990 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss %xmm14, %xmm6
10991 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
10992 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss %xmm13, %xmm7
10993 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} xmm6 = xmm7[0],xmm6[0],xmm7[1],xmm6[1]
10994 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
10995 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
10996 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} xmm7 = xmm10[0],xmm9[0],xmm10[1],xmm9[1]
10997 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm7 = xmm7[0,1,2,2]
10998 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm7 = ymm7[0,1,2,1]
10999 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm6 = ymm7[0,1],ymm6[2,3],ymm7[4,5,6,7]
11000 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
11001 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
11002 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} xmm7 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
11003 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm7, %ymm0, %ymm7
11004 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 16-byte Folded Reload
11005 ; AVX2-FAST-PERLANE-NEXT: vunpcklpd {{.*#+}} ymm7 = ymm7[0],ymm8[0],ymm7[2],ymm8[2]
11006 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm7[4,5,6],ymm6[7]
11007 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11008 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm6 = xmm10[3,3],xmm9[3,3]
11009 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} xmm7 = xmm13[2],xmm14[2],xmm13[3],xmm14[3]
11010 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm6, %ymm0, %ymm6
11011 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm7 = xmm7[0,1,2,2]
11012 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm7 = ymm7[0,1,2,1]
11013 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm6 = ymm7[0,1,2,3,4],ymm6[5,6],ymm7[7]
11014 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm7 = xmm0[2,2,2,2]
11015 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} xmm7 = xmm1[0,1,2],xmm7[3]
11016 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 200(%rax), %ymm8
11017 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm8[4,5,6,7]
11018 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm6[0,1],ymm7[2,3,4],ymm6[5,6,7]
11019 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11020 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
11021 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm6 # 32-byte Folded Reload
11022 ; AVX2-FAST-PERLANE-NEXT: # ymm6 = ymm10[2],mem[2],ymm10[3],mem[3],ymm10[6],mem[6],ymm10[7],mem[7]
11023 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[3,3,3,3]
11024 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
11025 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
11026 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm7 = ymm8[2],ymm9[2],ymm8[3],ymm9[3],ymm8[6],ymm9[6],ymm8[7],ymm9[7]
11027 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm7 = ymm7[3,3,3,3]
11028 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm6 = ymm7[0,1,2],ymm6[3,4,5,6,7]
11029 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss 220(%r8), %ymm7
11030 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3,4],ymm7[5],ymm6[6,7]
11031 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss 220(%r9), %ymm7
11032 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3,4,5],ymm7[6,7]
11033 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 216(%rax), %ymm7
11034 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm7[0],ymm6[1,2,3,4,5,6],ymm7[7]
11035 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11036 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss 240(%rdx), %ymm6
11037 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm7 = ymm2[3,1,2,0,7,5,6,4]
11038 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm6 = ymm7[0,1,2,3,4,5],ymm6[6],ymm7[7]
11039 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} ymm7 = ymm11[0],ymm4[0],ymm11[1],ymm4[1],ymm11[4],ymm4[4],ymm11[5],ymm4[5]
11040 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm7[4,5],ymm6[6,7]
11041 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss 236(%r8), %ymm7
11042 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0],ymm7[1],ymm6[2,3,4,5,6,7]
11043 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm3 = xmm3[2,2,3,3]
11044 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm3 = ymm6[0,1],ymm3[2,3],ymm6[4,5,6,7]
11045 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm6 = ymm11[2],ymm4[2],ymm11[3],ymm4[3],ymm11[6],ymm4[6],ymm11[7],ymm4[7]
11046 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[2,2,2,2]
11047 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm7 = ymm12[2],ymm2[2],ymm12[3],ymm2[3],ymm12[6],ymm2[6],ymm12[7],ymm2[7]
11048 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm7[4,5,6,7]
11049 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm5 = ymm6[0,1,2,3,4,5],ymm5[6,7]
11050 ; AVX2-FAST-PERLANE-NEXT: vpermilps {{.*#+}} ymm6 = mem[1,2,2,3,5,6,6,7]
11051 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[2,2,2,2]
11052 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm5 = ymm6[0],ymm5[1,2,3,4,5,6],ymm6[7]
11053 ; AVX2-FAST-PERLANE-NEXT: vmovaps 224(%rax), %ymm6
11054 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm3[0,1,2],ymm6[3],ymm3[4,5,6,7]
11055 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11056 ; AVX2-FAST-PERLANE-NEXT: vperm2f128 {{.*#+}} ymm3 = ymm6[2,3],ymm7[2,3]
11057 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm5[0],ymm3[1],ymm5[2,3,4],ymm3[5],ymm5[6,7]
11058 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11059 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm0 = ymm2[2],ymm12[2],ymm2[3],ymm12[3],ymm2[6],ymm12[6],ymm2[7],ymm12[7]
11060 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm1 = ymm4[2],ymm11[2],ymm4[3],ymm11[3],ymm4[6],ymm11[6],ymm4[7],ymm11[7]
11061 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,3,3,3]
11062 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[3,3,3,3]
11063 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
11064 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss 252(%r8), %ymm1
11065 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
11066 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss 252(%r9), %ymm1
11067 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
11068 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 248(%rax), %ymm1
11069 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5,6],ymm1[7]
11070 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11071 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
11072 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm0 = ymm7[1,1,1,1,5,5,5,5]
11073 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
11074 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm5[1],ymm0[2,3,4],ymm5[5],ymm0[6,7]
11075 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
11076 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
11077 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
11078 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm1 = ymm12[1,1],ymm6[1,1],ymm12[5,5],ymm6[5,5]
11079 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6],ymm0[7]
11080 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
11081 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm1 = ymm14[0,0,0,0,4,4,4,4]
11082 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
11083 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm2 = ymm13[0,1,0,1,4,5,4,5]
11084 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2,3,4],ymm1[5],ymm2[6,7]
11085 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
11086 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 16(%rax), %ymm2
11087 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3],ymm1[4,5,6,7]
11088 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6],ymm1[7]
11089 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11090 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
11091 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm0 = ymm15[1,1,1,1,5,5,5,5]
11092 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
11093 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm4[1],ymm0[2,3,4],ymm4[5],ymm0[6,7]
11094 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
11095 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
11096 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
11097 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm1 = ymm1[1,1],ymm11[1,1],ymm1[5,5],ymm11[5,5]
11098 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6],ymm0[7]
11099 ; AVX2-FAST-PERLANE-NEXT: vpermilps $0, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
11100 ; AVX2-FAST-PERLANE-NEXT: # ymm1 = mem[0,0,0,0,4,4,4,4]
11101 ; AVX2-FAST-PERLANE-NEXT: vpermilps $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
11102 ; AVX2-FAST-PERLANE-NEXT: # ymm2 = mem[0,1,0,1,4,5,4,5]
11103 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2,3,4],ymm1[5],ymm2[6,7]
11104 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
11105 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 48(%rax), %ymm2
11106 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3],ymm1[4,5,6,7]
11107 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6],ymm1[7]
11108 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11109 ; AVX2-FAST-PERLANE-NEXT: vpermilps $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
11110 ; AVX2-FAST-PERLANE-NEXT: # ymm0 = mem[1,1,1,1,5,5,5,5]
11111 ; AVX2-FAST-PERLANE-NEXT: vblendps $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
11112 ; AVX2-FAST-PERLANE-NEXT: # ymm0 = ymm0[0],mem[1],ymm0[2,3,4],mem[5],ymm0[6,7]
11113 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
11114 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
11115 ; AVX2-FAST-PERLANE-NEXT: vshufps $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
11116 ; AVX2-FAST-PERLANE-NEXT: # ymm1 = ymm1[1,1],mem[1,1],ymm1[5,5],mem[5,5]
11117 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6],ymm0[7]
11118 ; AVX2-FAST-PERLANE-NEXT: vpermilps $0, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
11119 ; AVX2-FAST-PERLANE-NEXT: # ymm1 = mem[0,0,0,0,4,4,4,4]
11120 ; AVX2-FAST-PERLANE-NEXT: vpermilps $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
11121 ; AVX2-FAST-PERLANE-NEXT: # ymm2 = mem[0,1,0,1,4,5,4,5]
11122 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2,3,4],ymm1[5],ymm2[6,7]
11123 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
11124 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 80(%rax), %ymm2
11125 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3],ymm1[4,5,6,7]
11126 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6],ymm1[7]
11127 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11128 ; AVX2-FAST-PERLANE-NEXT: vpermilps $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
11129 ; AVX2-FAST-PERLANE-NEXT: # ymm0 = mem[1,1,1,1,5,5,5,5]
11130 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
11131 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm3[1],ymm0[2,3,4],ymm3[5],ymm0[6,7]
11132 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
11133 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
11134 ; AVX2-FAST-PERLANE-NEXT: vshufps $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
11135 ; AVX2-FAST-PERLANE-NEXT: # ymm1 = ymm1[1,1],mem[1,1],ymm1[5,5],mem[5,5]
11136 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6],ymm0[7]
11137 ; AVX2-FAST-PERLANE-NEXT: vpermilps $0, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
11138 ; AVX2-FAST-PERLANE-NEXT: # ymm1 = mem[0,0,0,0,4,4,4,4]
11139 ; AVX2-FAST-PERLANE-NEXT: vpermilps $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
11140 ; AVX2-FAST-PERLANE-NEXT: # ymm2 = mem[0,1,0,1,4,5,4,5]
11141 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2,3,4],ymm1[5],ymm2[6,7]
11142 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
11143 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 112(%rax), %ymm2
11144 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3],ymm1[4,5,6,7]
11145 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6],ymm1[7]
11146 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11147 ; AVX2-FAST-PERLANE-NEXT: vpermilps $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
11148 ; AVX2-FAST-PERLANE-NEXT: # ymm0 = mem[1,1,1,1,5,5,5,5]
11149 ; AVX2-FAST-PERLANE-NEXT: vblendps $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
11150 ; AVX2-FAST-PERLANE-NEXT: # ymm0 = ymm0[0],mem[1],ymm0[2,3,4],mem[5],ymm0[6,7]
11151 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
11152 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
11153 ; AVX2-FAST-PERLANE-NEXT: vshufps $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
11154 ; AVX2-FAST-PERLANE-NEXT: # ymm1 = ymm1[1,1],mem[1,1],ymm1[5,5],mem[5,5]
11155 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6],ymm0[7]
11156 ; AVX2-FAST-PERLANE-NEXT: vpermilps $0, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
11157 ; AVX2-FAST-PERLANE-NEXT: # ymm1 = mem[0,0,0,0,4,4,4,4]
11158 ; AVX2-FAST-PERLANE-NEXT: vpermilps $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
11159 ; AVX2-FAST-PERLANE-NEXT: # ymm2 = mem[0,1,0,1,4,5,4,5]
11160 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2,3,4],ymm1[5],ymm2[6,7]
11161 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
11162 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 144(%rax), %ymm2
11163 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3],ymm1[4,5,6,7]
11164 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6],ymm1[7]
11165 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11166 ; AVX2-FAST-PERLANE-NEXT: vpermilps $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
11167 ; AVX2-FAST-PERLANE-NEXT: # ymm0 = mem[1,1,1,1,5,5,5,5]
11168 ; AVX2-FAST-PERLANE-NEXT: vblendps $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
11169 ; AVX2-FAST-PERLANE-NEXT: # ymm0 = ymm0[0],mem[1],ymm0[2,3,4],mem[5],ymm0[6,7]
11170 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
11171 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
11172 ; AVX2-FAST-PERLANE-NEXT: vshufps $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
11173 ; AVX2-FAST-PERLANE-NEXT: # ymm1 = ymm1[1,1],mem[1,1],ymm1[5,5],mem[5,5]
11174 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6],ymm0[7]
11175 ; AVX2-FAST-PERLANE-NEXT: vpermilps $0, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
11176 ; AVX2-FAST-PERLANE-NEXT: # ymm1 = mem[0,0,0,0,4,4,4,4]
11177 ; AVX2-FAST-PERLANE-NEXT: vpermilps $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
11178 ; AVX2-FAST-PERLANE-NEXT: # ymm2 = mem[0,1,0,1,4,5,4,5]
11179 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2,3,4],ymm1[5],ymm2[6,7]
11180 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
11181 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 176(%rax), %ymm2
11182 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3],ymm1[4,5,6,7]
11183 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6],ymm1[7]
11184 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11185 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11186 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm0 = ymm0[1,1],ymm10[1,1],ymm0[5,5],ymm10[5,5]
11187 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm1 = ymm8[1,1,1,1,5,5,5,5]
11188 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm9[1],ymm1[2,3,4],ymm9[5],ymm1[6,7]
11189 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,2,2,2]
11190 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5,6],ymm1[7]
11191 ; AVX2-FAST-PERLANE-NEXT: vpermilps $0, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
11192 ; AVX2-FAST-PERLANE-NEXT: # ymm1 = mem[0,0,0,0,4,4,4,4]
11193 ; AVX2-FAST-PERLANE-NEXT: vpermilps $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
11194 ; AVX2-FAST-PERLANE-NEXT: # ymm2 = mem[0,1,0,1,4,5,4,5]
11195 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2,3,4],ymm1[5],ymm2[6,7]
11196 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
11197 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 208(%rax), %ymm2
11198 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3],ymm1[4,5,6,7]
11199 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm9 = ymm1[0,1,2],ymm0[3,4,5,6],ymm1[7]
11200 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss 16(%rdx), %ymm0
11201 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm1 = ymm6[3,1,2,0,7,5,6,4]
11202 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6],ymm1[7]
11203 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} ymm1 = ymm5[0],ymm7[0],ymm5[1],ymm7[1],ymm5[4],ymm7[4],ymm5[5],ymm7[5]
11204 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
11205 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
11206 ; AVX2-FAST-PERLANE-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
11207 ; AVX2-FAST-PERLANE-NEXT: # xmm1 = xmm1[3,3],mem[3,3]
11208 ; AVX2-FAST-PERLANE-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
11209 ; AVX2-FAST-PERLANE-NEXT: # xmm1 = xmm1[0,1,2],mem[3]
11210 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm10 = ymm0[0],ymm1[1,2,3],ymm0[4,5,6,7]
11211 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm0 = ymm6[2],ymm12[2],ymm6[3],ymm12[3],ymm6[6],ymm12[6],ymm6[7],ymm12[7]
11212 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm1 = ymm7[2],ymm5[2],ymm7[3],ymm5[3],ymm7[6],ymm5[6],ymm7[7],ymm5[7]
11213 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,3,3,3]
11214 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[3,3,3,3]
11215 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
11216 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm1 = ymm13[3,3],ymm14[3,3],ymm13[7,7],ymm14[7,7]
11217 ; AVX2-FAST-PERLANE-NEXT: vpermilps {{.*#+}} ymm2 = mem[2,3,2,3,6,7,6,7]
11218 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0],ymm1[1,2],ymm2[3,4],ymm1[5,6],ymm2[7]
11219 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
11220 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm12 = ymm1[0],ymm0[1,2,3,4],ymm1[5,6,7]
11221 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss 48(%rdx), %ymm0
11222 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm11, %ymm6
11223 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm1 = ymm11[3,1,2,0,7,5,6,4]
11224 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6],ymm1[7]
11225 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} ymm1 = ymm4[0],ymm15[0],ymm4[1],ymm15[1],ymm4[4],ymm15[4],ymm4[5],ymm15[5]
11226 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
11227 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
11228 ; AVX2-FAST-PERLANE-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
11229 ; AVX2-FAST-PERLANE-NEXT: # xmm1 = xmm1[3,3],mem[3,3]
11230 ; AVX2-FAST-PERLANE-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
11231 ; AVX2-FAST-PERLANE-NEXT: # xmm1 = xmm1[0,1,2],mem[3]
11232 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm11 = ymm0[0],ymm1[1,2,3],ymm0[4,5,6,7]
11233 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm0 # 32-byte Folded Reload
11234 ; AVX2-FAST-PERLANE-NEXT: # ymm0 = ymm6[2],mem[2],ymm6[3],mem[3],ymm6[6],mem[6],ymm6[7],mem[7]
11235 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm1 = ymm15[2],ymm4[2],ymm15[3],ymm4[3],ymm15[6],ymm4[6],ymm15[7],ymm4[7]
11236 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,3,3,3]
11237 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[3,3,3,3]
11238 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
11239 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
11240 ; AVX2-FAST-PERLANE-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
11241 ; AVX2-FAST-PERLANE-NEXT: # ymm1 = ymm1[3,3],mem[3,3],ymm1[7,7],mem[7,7]
11242 ; AVX2-FAST-PERLANE-NEXT: vpermilps {{.*#+}} ymm2 = mem[2,3,2,3,6,7,6,7]
11243 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0],ymm1[1,2],ymm2[3,4],ymm1[5,6],ymm2[7]
11244 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
11245 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4],ymm1[5,6,7]
11246 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11247 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss 80(%rdx), %ymm0
11248 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
11249 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm1 = ymm6[3,1,2,0,7,5,6,4]
11250 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6],ymm1[7]
11251 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
11252 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
11253 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} ymm1 = ymm4[0],ymm2[0],ymm4[1],ymm2[1],ymm4[4],ymm2[4],ymm4[5],ymm2[5]
11254 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
11255 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
11256 ; AVX2-FAST-PERLANE-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
11257 ; AVX2-FAST-PERLANE-NEXT: # xmm1 = xmm1[3,3],mem[3,3]
11258 ; AVX2-FAST-PERLANE-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
11259 ; AVX2-FAST-PERLANE-NEXT: # xmm1 = xmm1[0,1,2],mem[3]
11260 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4,5,6,7]
11261 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11262 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm0 # 32-byte Folded Reload
11263 ; AVX2-FAST-PERLANE-NEXT: # ymm0 = ymm6[2],mem[2],ymm6[3],mem[3],ymm6[6],mem[6],ymm6[7],mem[7]
11264 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm1 = ymm2[2],ymm4[2],ymm2[3],ymm4[3],ymm2[6],ymm4[6],ymm2[7],ymm4[7]
11265 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,3,3,3]
11266 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[3,3,3,3]
11267 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
11268 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
11269 ; AVX2-FAST-PERLANE-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
11270 ; AVX2-FAST-PERLANE-NEXT: # ymm1 = ymm1[3,3],mem[3,3],ymm1[7,7],mem[7,7]
11271 ; AVX2-FAST-PERLANE-NEXT: vpermilps {{.*#+}} ymm2 = mem[2,3,2,3,6,7,6,7]
11272 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0],ymm1[1,2],ymm2[3,4],ymm1[5,6],ymm2[7]
11273 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
11274 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm8 = ymm1[0],ymm0[1,2,3,4],ymm1[5,6,7]
11275 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss 112(%rdx), %ymm0
11276 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
11277 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm1 = ymm4[3,1,2,0,7,5,6,4]
11278 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6],ymm1[7]
11279 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
11280 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} ymm1 = ymm3[0],ymm2[0],ymm3[1],ymm2[1],ymm3[4],ymm2[4],ymm3[5],ymm2[5]
11281 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
11282 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
11283 ; AVX2-FAST-PERLANE-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
11284 ; AVX2-FAST-PERLANE-NEXT: # xmm1 = xmm1[3,3],mem[3,3]
11285 ; AVX2-FAST-PERLANE-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
11286 ; AVX2-FAST-PERLANE-NEXT: # xmm1 = xmm1[0,1,2],mem[3]
11287 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm6 = ymm0[0],ymm1[1,2,3],ymm0[4,5,6,7]
11288 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm0 # 32-byte Folded Reload
11289 ; AVX2-FAST-PERLANE-NEXT: # ymm0 = ymm4[2],mem[2],ymm4[3],mem[3],ymm4[6],mem[6],ymm4[7],mem[7]
11290 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm1 = ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[6],ymm3[6],ymm2[7],ymm3[7]
11291 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,3,3,3]
11292 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[3,3,3,3]
11293 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
11294 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
11295 ; AVX2-FAST-PERLANE-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
11296 ; AVX2-FAST-PERLANE-NEXT: # ymm1 = ymm1[3,3],mem[3,3],ymm1[7,7],mem[7,7]
11297 ; AVX2-FAST-PERLANE-NEXT: vpermilps {{.*#+}} ymm2 = mem[2,3,2,3,6,7,6,7]
11298 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0],ymm1[1,2],ymm2[3,4],ymm1[5,6],ymm2[7]
11299 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
11300 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm5 = ymm1[0],ymm0[1,2,3,4],ymm1[5,6,7]
11301 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss 144(%rdx), %ymm0
11302 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
11303 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm1 = ymm3[3,1,2,0,7,5,6,4]
11304 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6],ymm1[7]
11305 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
11306 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
11307 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} ymm1 = ymm15[0],ymm2[0],ymm15[1],ymm2[1],ymm15[4],ymm2[4],ymm15[5],ymm2[5]
11308 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
11309 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
11310 ; AVX2-FAST-PERLANE-NEXT: vshufps $255, (%rsp), %xmm1, %xmm1 # 16-byte Folded Reload
11311 ; AVX2-FAST-PERLANE-NEXT: # xmm1 = xmm1[3,3],mem[3,3]
11312 ; AVX2-FAST-PERLANE-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
11313 ; AVX2-FAST-PERLANE-NEXT: # xmm1 = xmm1[0,1,2],mem[3]
11314 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm4 = ymm0[0],ymm1[1,2,3],ymm0[4,5,6,7]
11315 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm1 # 32-byte Folded Reload
11316 ; AVX2-FAST-PERLANE-NEXT: # ymm1 = ymm3[2],mem[2],ymm3[3],mem[3],ymm3[6],mem[6],ymm3[7],mem[7]
11317 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm0 = ymm2[2],ymm15[2],ymm2[3],ymm15[3],ymm2[6],ymm15[6],ymm2[7],ymm15[7]
11318 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[3,3,3,3]
11319 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,3,3,3]
11320 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
11321 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
11322 ; AVX2-FAST-PERLANE-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
11323 ; AVX2-FAST-PERLANE-NEXT: # ymm1 = ymm1[3,3],mem[3,3],ymm1[7,7],mem[7,7]
11324 ; AVX2-FAST-PERLANE-NEXT: vpermilps {{.*#+}} ymm15 = mem[2,3,2,3,6,7,6,7]
11325 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm15[0],ymm1[1,2],ymm15[3,4],ymm1[5,6],ymm15[7]
11326 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
11327 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm3 = ymm1[0],ymm0[1,2,3,4],ymm1[5,6,7]
11328 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss 176(%rdx), %ymm0
11329 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
11330 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm15 = ymm1[3,1,2,0,7,5,6,4]
11331 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm15[0,1,2,3,4,5],ymm0[6],ymm15[7]
11332 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11333 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
11334 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} ymm15 = ymm14[0],ymm0[0],ymm14[1],ymm0[1],ymm14[4],ymm0[4],ymm14[5],ymm0[5]
11335 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm15[4,5],ymm2[6,7]
11336 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
11337 ; AVX2-FAST-PERLANE-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7, %xmm15 # 16-byte Folded Reload
11338 ; AVX2-FAST-PERLANE-NEXT: # xmm15 = xmm7[3,3],mem[3,3]
11339 ; AVX2-FAST-PERLANE-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm15, %xmm15 # 16-byte Folded Reload
11340 ; AVX2-FAST-PERLANE-NEXT: # xmm15 = xmm15[0,1,2],mem[3]
11341 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0],ymm15[1,2,3],ymm2[4,5,6,7]
11342 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm15 # 32-byte Folded Reload
11343 ; AVX2-FAST-PERLANE-NEXT: # ymm15 = ymm1[2],mem[2],ymm1[3],mem[3],ymm1[6],mem[6],ymm1[7],mem[7]
11344 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm14[2],ymm0[3],ymm14[3],ymm0[6],ymm14[6],ymm0[7],ymm14[7]
11345 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm15 = ymm15[3,3,3,3]
11346 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,3,3,3]
11347 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm15[3,4,5,6,7]
11348 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
11349 ; AVX2-FAST-PERLANE-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm15 # 32-byte Folded Reload
11350 ; AVX2-FAST-PERLANE-NEXT: # ymm15 = ymm1[3,3],mem[3,3],ymm1[7,7],mem[7,7]
11351 ; AVX2-FAST-PERLANE-NEXT: vpermilps {{.*#+}} ymm14 = mem[2,3,2,3,6,7,6,7]
11352 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm14 = ymm14[0],ymm15[1,2],ymm14[3,4],ymm15[5,6],ymm14[7]
11353 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm14 = ymm14[2,1,2,3]
11354 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm14[0],ymm0[1,2,3,4],ymm14[5,6,7]
11355 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
11356 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm14 # 32-byte Folded Reload
11357 ; AVX2-FAST-PERLANE-NEXT: # ymm14 = ymm1[0],mem[0],ymm1[1],mem[1],ymm1[4],mem[4],ymm1[5],mem[5]
11358 ; AVX2-FAST-PERLANE-NEXT: vpermilps $39, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Folded Reload
11359 ; AVX2-FAST-PERLANE-NEXT: # ymm15 = mem[3,1,2,0,7,5,6,4]
11360 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss 208(%rdx), %ymm13
11361 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm13 = ymm15[0,1,2,3,4,5],ymm13[6],ymm15[7]
11362 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm13 = ymm13[0,1,2,3],ymm14[4,5],ymm13[6,7]
11363 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
11364 ; AVX2-FAST-PERLANE-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm14 # 16-byte Folded Reload
11365 ; AVX2-FAST-PERLANE-NEXT: # xmm14 = xmm1[3,3],mem[3,3]
11366 ; AVX2-FAST-PERLANE-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm14, %xmm14 # 16-byte Folded Reload
11367 ; AVX2-FAST-PERLANE-NEXT: # xmm14 = xmm14[0,1,2],mem[3]
11368 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm13 = ymm13[0],ymm14[1,2,3],ymm13[4,5,6,7]
11369 ; AVX2-FAST-PERLANE-NEXT: movq {{[0-9]+}}(%rsp), %rax
11370 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm13, 1440(%rax)
11371 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 1312(%rax)
11372 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm2, 1216(%rax)
11373 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm3, 1088(%rax)
11374 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm4, 992(%rax)
11375 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm5, 864(%rax)
11376 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm6, 768(%rax)
11377 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm8, 640(%rax)
11378 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11379 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 544(%rax)
11380 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11381 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 416(%rax)
11382 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm11, 320(%rax)
11383 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm12, 192(%rax)
11384 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm10, 96(%rax)
11385 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11386 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 1504(%rax)
11387 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm9, 1472(%rax)
11388 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11389 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 1280(%rax)
11390 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11391 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 1248(%rax)
11392 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11393 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 1056(%rax)
11394 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11395 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 1024(%rax)
11396 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11397 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 832(%rax)
11398 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11399 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 800(%rax)
11400 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11401 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 608(%rax)
11402 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11403 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 576(%rax)
11404 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11405 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 384(%rax)
11406 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11407 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 352(%rax)
11408 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11409 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 160(%rax)
11410 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11411 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 128(%rax)
11412 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11413 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 1760(%rax)
11414 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11415 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 1728(%rax)
11416 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11417 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 1664(%rax)
11418 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11419 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 1536(%rax)
11420 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11421 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 1408(%rax)
11422 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11423 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 1376(%rax)
11424 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11425 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 1344(%rax)
11426 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11427 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 1184(%rax)
11428 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11429 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 1152(%rax)
11430 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11431 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 1120(%rax)
11432 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11433 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 960(%rax)
11434 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11435 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 928(%rax)
11436 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11437 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 896(%rax)
11438 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11439 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 736(%rax)
11440 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11441 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 704(%rax)
11442 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11443 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 672(%rax)
11444 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11445 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 512(%rax)
11446 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11447 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 480(%rax)
11448 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11449 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 448(%rax)
11450 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11451 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 288(%rax)
11452 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11453 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 256(%rax)
11454 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11455 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 224(%rax)
11456 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11457 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 64(%rax)
11458 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11459 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 32(%rax)
11460 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11461 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, (%rax)
11462 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11463 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 1696(%rax)
11464 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11465 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 1632(%rax)
11466 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11467 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 1600(%rax)
11468 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11469 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 1568(%rax)
11470 ; AVX2-FAST-PERLANE-NEXT: addq $2968, %rsp # imm = 0xB98
11471 ; AVX2-FAST-PERLANE-NEXT: vzeroupper
11472 ; AVX2-FAST-PERLANE-NEXT: retq
11474 ; AVX512F-LABEL: store_i32_stride7_vf64:
11475 ; AVX512F: # %bb.0:
11476 ; AVX512F-NEXT: subq $3080, %rsp # imm = 0xC08
11477 ; AVX512F-NEXT: vmovdqa64 (%rdx), %zmm3
11478 ; AVX512F-NEXT: vmovdqa64 (%rcx), %zmm2
11479 ; AVX512F-NEXT: vmovdqa64 (%r8), %zmm0
11480 ; AVX512F-NEXT: vmovdqa64 64(%r8), %zmm25
11481 ; AVX512F-NEXT: vmovdqa64 128(%r8), %zmm22
11482 ; AVX512F-NEXT: vmovdqa64 (%r9), %zmm6
11483 ; AVX512F-NEXT: vmovdqa64 64(%r9), %zmm5
11484 ; AVX512F-NEXT: vmovdqa64 128(%r9), %zmm4
11485 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm1 = <u,u,0,16,u,u,u,u,u,1,17,u,u,u,u,u>
11486 ; AVX512F-NEXT: vmovdqa64 %zmm3, %zmm7
11487 ; AVX512F-NEXT: vpermt2d %zmm2, %zmm1, %zmm7
11488 ; AVX512F-NEXT: vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11489 ; AVX512F-NEXT: vmovdqa64 %zmm1, %zmm8
11490 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm1 = <u,u,u,u,0,16,u,u,u,u,u,1,17,u,u,u>
11491 ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm7
11492 ; AVX512F-NEXT: vpermt2d %zmm6, %zmm1, %zmm7
11493 ; AVX512F-NEXT: vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11494 ; AVX512F-NEXT: vmovdqa64 %zmm1, %zmm10
11495 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm7 = <2,18,u,u,u,u,u,3,19,u,u,u,u,u,4,20>
11496 ; AVX512F-NEXT: vmovdqa64 %zmm3, %zmm1
11497 ; AVX512F-NEXT: vpermt2d %zmm2, %zmm7, %zmm1
11498 ; AVX512F-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11499 ; AVX512F-NEXT: vmovdqa64 %zmm7, %zmm9
11500 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm1 = <u,u,2,18,u,u,u,u,u,3,19,u,u,u,u,u>
11501 ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm7
11502 ; AVX512F-NEXT: vpermt2d %zmm6, %zmm1, %zmm7
11503 ; AVX512F-NEXT: vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11504 ; AVX512F-NEXT: vmovdqa64 %zmm1, %zmm11
11505 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm1 = <4,20,u,u,u,u,u,5,21,u,u,u,u,u,6,22>
11506 ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm7
11507 ; AVX512F-NEXT: vpermt2d %zmm6, %zmm1, %zmm7
11508 ; AVX512F-NEXT: vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11509 ; AVX512F-NEXT: vmovdqa64 %zmm1, %zmm7
11510 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm1 = <u,u,u,u,u,7,23,u,u,u,u,u,8,24,u,u>
11511 ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm12
11512 ; AVX512F-NEXT: vpermt2d %zmm6, %zmm1, %zmm12
11513 ; AVX512F-NEXT: vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11514 ; AVX512F-NEXT: vmovdqa64 %zmm1, %zmm12
11515 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm1 = <u,u,u,9,25,u,u,u,u,u,10,26,u,u,u,u>
11516 ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm13
11517 ; AVX512F-NEXT: vpermt2d %zmm6, %zmm1, %zmm13
11518 ; AVX512F-NEXT: vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11519 ; AVX512F-NEXT: vmovdqa64 %zmm1, %zmm13
11520 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm1 = <u,11,27,u,u,u,u,u,12,28,u,u,u,u,u,13>
11521 ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm14
11522 ; AVX512F-NEXT: vpermt2d %zmm6, %zmm1, %zmm14
11523 ; AVX512F-NEXT: vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11524 ; AVX512F-NEXT: vmovdqa64 %zmm1, %zmm14
11525 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm1 = <13,u,u,u,u,u,30,14,u,u,u,u,u,31,15,u>
11526 ; AVX512F-NEXT: vpermt2d %zmm0, %zmm1, %zmm6
11527 ; AVX512F-NEXT: vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11528 ; AVX512F-NEXT: vmovdqa64 %zmm25, %zmm0
11529 ; AVX512F-NEXT: vpermt2d %zmm5, %zmm10, %zmm0
11530 ; AVX512F-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11531 ; AVX512F-NEXT: vmovdqa64 %zmm25, %zmm0
11532 ; AVX512F-NEXT: vpermt2d %zmm5, %zmm11, %zmm0
11533 ; AVX512F-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11534 ; AVX512F-NEXT: vmovdqa64 %zmm25, %zmm0
11535 ; AVX512F-NEXT: vpermt2d %zmm5, %zmm7, %zmm0
11536 ; AVX512F-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11537 ; AVX512F-NEXT: vmovdqa64 %zmm25, %zmm0
11538 ; AVX512F-NEXT: vpermt2d %zmm5, %zmm12, %zmm0
11539 ; AVX512F-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11540 ; AVX512F-NEXT: vmovdqa64 %zmm25, %zmm0
11541 ; AVX512F-NEXT: vpermt2d %zmm5, %zmm13, %zmm0
11542 ; AVX512F-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11543 ; AVX512F-NEXT: vmovdqa64 %zmm25, %zmm0
11544 ; AVX512F-NEXT: vpermt2d %zmm5, %zmm14, %zmm0
11545 ; AVX512F-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11546 ; AVX512F-NEXT: vpermt2d %zmm25, %zmm1, %zmm5
11547 ; AVX512F-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11548 ; AVX512F-NEXT: vmovdqa64 %zmm22, %zmm0
11549 ; AVX512F-NEXT: vpermt2d %zmm4, %zmm10, %zmm0
11550 ; AVX512F-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11551 ; AVX512F-NEXT: vmovdqa64 %zmm22, %zmm0
11552 ; AVX512F-NEXT: vpermt2d %zmm4, %zmm11, %zmm0
11553 ; AVX512F-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11554 ; AVX512F-NEXT: vmovdqa64 %zmm22, %zmm0
11555 ; AVX512F-NEXT: vpermt2d %zmm4, %zmm7, %zmm0
11556 ; AVX512F-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11557 ; AVX512F-NEXT: vpermi2d %zmm4, %zmm22, %zmm12
11558 ; AVX512F-NEXT: vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11559 ; AVX512F-NEXT: vpermi2d %zmm4, %zmm22, %zmm13
11560 ; AVX512F-NEXT: vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11561 ; AVX512F-NEXT: vpermi2d %zmm4, %zmm22, %zmm14
11562 ; AVX512F-NEXT: vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11563 ; AVX512F-NEXT: vpermt2d %zmm22, %zmm1, %zmm4
11564 ; AVX512F-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11565 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm0 = <u,u,u,u,u,5,21,u,u,u,u,u,6,22,u,u>
11566 ; AVX512F-NEXT: vmovdqa64 %zmm3, %zmm1
11567 ; AVX512F-NEXT: vpermt2d %zmm2, %zmm0, %zmm1
11568 ; AVX512F-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11569 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm11 = <u,u,u,7,23,u,u,u,u,u,8,24,u,u,u,u>
11570 ; AVX512F-NEXT: vmovdqa64 %zmm3, %zmm1
11571 ; AVX512F-NEXT: vpermt2d %zmm2, %zmm11, %zmm1
11572 ; AVX512F-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11573 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm30 = <u,9,25,u,u,u,u,u,10,26,u,u,u,u,u,11>
11574 ; AVX512F-NEXT: vmovdqa64 %zmm3, %zmm1
11575 ; AVX512F-NEXT: vpermt2d %zmm2, %zmm30, %zmm1
11576 ; AVX512F-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11577 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm4 = <11,u,u,u,u,u,28,12,u,u,u,u,u,29,13,u>
11578 ; AVX512F-NEXT: vmovdqa64 %zmm2, %zmm1
11579 ; AVX512F-NEXT: vpermt2d %zmm3, %zmm4, %zmm1
11580 ; AVX512F-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11581 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm1 = <u,u,u,u,14,30,u,u,u,u,u,15,31,u,u,u>
11582 ; AVX512F-NEXT: vpermt2d %zmm2, %zmm1, %zmm3
11583 ; AVX512F-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11584 ; AVX512F-NEXT: vmovdqa64 64(%rdx), %zmm24
11585 ; AVX512F-NEXT: vmovdqa64 64(%rcx), %zmm2
11586 ; AVX512F-NEXT: vmovdqa64 %zmm24, %zmm3
11587 ; AVX512F-NEXT: vpermt2d %zmm2, %zmm8, %zmm3
11588 ; AVX512F-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11589 ; AVX512F-NEXT: vmovdqa64 %zmm24, %zmm3
11590 ; AVX512F-NEXT: vpermt2d %zmm2, %zmm9, %zmm3
11591 ; AVX512F-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11592 ; AVX512F-NEXT: vmovdqa64 %zmm24, %zmm3
11593 ; AVX512F-NEXT: vpermt2d %zmm2, %zmm0, %zmm3
11594 ; AVX512F-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11595 ; AVX512F-NEXT: vmovdqa64 %zmm24, %zmm3
11596 ; AVX512F-NEXT: vpermt2d %zmm2, %zmm11, %zmm3
11597 ; AVX512F-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11598 ; AVX512F-NEXT: vmovdqa64 %zmm24, %zmm3
11599 ; AVX512F-NEXT: vpermt2d %zmm2, %zmm30, %zmm3
11600 ; AVX512F-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11601 ; AVX512F-NEXT: vmovdqa64 %zmm2, %zmm3
11602 ; AVX512F-NEXT: vpermt2d %zmm24, %zmm4, %zmm3
11603 ; AVX512F-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11604 ; AVX512F-NEXT: vpermt2d %zmm2, %zmm1, %zmm24
11605 ; AVX512F-NEXT: vmovdqa64 192(%rdx), %zmm29
11606 ; AVX512F-NEXT: vmovdqa64 192(%rcx), %zmm2
11607 ; AVX512F-NEXT: vmovdqa64 %zmm29, %zmm28
11608 ; AVX512F-NEXT: vpermt2d %zmm2, %zmm11, %zmm28
11609 ; AVX512F-NEXT: vmovdqa64 %zmm29, %zmm3
11610 ; AVX512F-NEXT: vpermt2d %zmm2, %zmm30, %zmm3
11611 ; AVX512F-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11612 ; AVX512F-NEXT: vmovdqa64 %zmm2, %zmm3
11613 ; AVX512F-NEXT: vpermt2d %zmm29, %zmm4, %zmm3
11614 ; AVX512F-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11615 ; AVX512F-NEXT: vmovdqa64 %zmm29, %zmm3
11616 ; AVX512F-NEXT: vpermt2d %zmm2, %zmm1, %zmm3
11617 ; AVX512F-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11618 ; AVX512F-NEXT: vmovdqa64 128(%rdx), %zmm21
11619 ; AVX512F-NEXT: vmovdqa64 128(%rcx), %zmm3
11620 ; AVX512F-NEXT: vpermi2d %zmm3, %zmm21, %zmm11
11621 ; AVX512F-NEXT: vpermi2d %zmm3, %zmm21, %zmm30
11622 ; AVX512F-NEXT: vpermi2d %zmm21, %zmm3, %zmm4
11623 ; AVX512F-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11624 ; AVX512F-NEXT: vmovdqa64 %zmm21, %zmm6
11625 ; AVX512F-NEXT: vmovdqa64 %zmm21, %zmm5
11626 ; AVX512F-NEXT: vmovdqa64 %zmm21, %zmm4
11627 ; AVX512F-NEXT: vpermt2d %zmm3, %zmm1, %zmm21
11628 ; AVX512F-NEXT: vpermt2d %zmm3, %zmm8, %zmm6
11629 ; AVX512F-NEXT: vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11630 ; AVX512F-NEXT: vpermt2d %zmm3, %zmm9, %zmm5
11631 ; AVX512F-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11632 ; AVX512F-NEXT: vpermt2d %zmm3, %zmm0, %zmm4
11633 ; AVX512F-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11634 ; AVX512F-NEXT: vpermi2d %zmm2, %zmm29, %zmm8
11635 ; AVX512F-NEXT: vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11636 ; AVX512F-NEXT: vpermi2d %zmm2, %zmm29, %zmm9
11637 ; AVX512F-NEXT: vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11638 ; AVX512F-NEXT: vpermt2d %zmm2, %zmm0, %zmm29
11639 ; AVX512F-NEXT: vmovdqa64 (%rdi), %zmm22
11640 ; AVX512F-NEXT: vmovdqa64 (%rsi), %zmm0
11641 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm9 = <0,16,u,u,u,u,u,1,17,u,u,u,u,u,2,18>
11642 ; AVX512F-NEXT: vmovdqa64 %zmm22, %zmm1
11643 ; AVX512F-NEXT: vpermt2d %zmm0, %zmm9, %zmm1
11644 ; AVX512F-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11645 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm20 = <u,u,u,u,u,3,19,u,u,u,u,u,4,20,u,u>
11646 ; AVX512F-NEXT: vmovdqa64 %zmm22, %zmm1
11647 ; AVX512F-NEXT: vpermt2d %zmm0, %zmm20, %zmm1
11648 ; AVX512F-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11649 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm27 = <u,u,u,5,21,u,u,u,u,u,6,22,u,u,u,u>
11650 ; AVX512F-NEXT: vmovdqa64 %zmm22, %zmm1
11651 ; AVX512F-NEXT: vpermt2d %zmm0, %zmm27, %zmm1
11652 ; AVX512F-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11653 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm7 = <u,7,23,u,u,u,u,u,8,24,u,u,u,u,u,9>
11654 ; AVX512F-NEXT: vmovdqa64 %zmm22, %zmm1
11655 ; AVX512F-NEXT: vpermt2d %zmm0, %zmm7, %zmm1
11656 ; AVX512F-NEXT: vmovdqu64 %zmm1, (%rsp) # 64-byte Spill
11657 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm6 = <9,u,u,u,u,u,26,10,u,u,u,u,u,27,11,u>
11658 ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm3
11659 ; AVX512F-NEXT: vpermt2d %zmm22, %zmm6, %zmm3
11660 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm1 = <u,u,u,u,12,28,u,u,u,u,u,13,29,u,u,u>
11661 ; AVX512F-NEXT: vmovdqa64 %zmm22, %zmm18
11662 ; AVX512F-NEXT: vpermt2d %zmm0, %zmm1, %zmm18
11663 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm8 = <u,u,14,30,u,u,u,u,u,15,31,u,u,u,u,u>
11664 ; AVX512F-NEXT: vpermt2d %zmm0, %zmm8, %zmm22
11665 ; AVX512F-NEXT: vmovdqa64 64(%rdi), %zmm5
11666 ; AVX512F-NEXT: vmovdqa64 64(%rsi), %zmm0
11667 ; AVX512F-NEXT: vmovdqa64 %zmm5, %zmm31
11668 ; AVX512F-NEXT: vpermt2d %zmm0, %zmm9, %zmm31
11669 ; AVX512F-NEXT: vmovdqa64 %zmm5, %zmm2
11670 ; AVX512F-NEXT: vpermt2d %zmm0, %zmm20, %zmm2
11671 ; AVX512F-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11672 ; AVX512F-NEXT: vmovdqa64 %zmm5, %zmm2
11673 ; AVX512F-NEXT: vpermt2d %zmm0, %zmm27, %zmm2
11674 ; AVX512F-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11675 ; AVX512F-NEXT: vmovdqa64 %zmm5, %zmm26
11676 ; AVX512F-NEXT: vpermt2d %zmm0, %zmm7, %zmm26
11677 ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm25
11678 ; AVX512F-NEXT: vpermt2d %zmm5, %zmm6, %zmm25
11679 ; AVX512F-NEXT: vmovdqa64 %zmm5, %zmm10
11680 ; AVX512F-NEXT: vpermt2d %zmm0, %zmm1, %zmm10
11681 ; AVX512F-NEXT: vpermt2d %zmm0, %zmm8, %zmm5
11682 ; AVX512F-NEXT: vmovdqa64 192(%rdi), %zmm12
11683 ; AVX512F-NEXT: vmovdqa64 192(%rsi), %zmm13
11684 ; AVX512F-NEXT: vmovdqa64 %zmm12, %zmm16
11685 ; AVX512F-NEXT: vpermt2d %zmm13, %zmm7, %zmm16
11686 ; AVX512F-NEXT: vmovdqa64 %zmm13, %zmm14
11687 ; AVX512F-NEXT: vpermt2d %zmm12, %zmm6, %zmm14
11688 ; AVX512F-NEXT: vmovdqa64 %zmm12, %zmm2
11689 ; AVX512F-NEXT: vpermt2d %zmm13, %zmm1, %zmm2
11690 ; AVX512F-NEXT: vmovdqa64 %zmm12, %zmm15
11691 ; AVX512F-NEXT: vpermt2d %zmm13, %zmm8, %zmm15
11692 ; AVX512F-NEXT: vmovdqa64 128(%rdi), %zmm17
11693 ; AVX512F-NEXT: vmovdqa64 128(%rsi), %zmm0
11694 ; AVX512F-NEXT: vpermi2d %zmm0, %zmm17, %zmm7
11695 ; AVX512F-NEXT: vpermi2d %zmm17, %zmm0, %zmm6
11696 ; AVX512F-NEXT: vpermi2d %zmm0, %zmm17, %zmm1
11697 ; AVX512F-NEXT: vmovdqa64 %zmm17, %zmm19
11698 ; AVX512F-NEXT: vmovdqa64 %zmm17, %zmm4
11699 ; AVX512F-NEXT: vmovdqa64 %zmm17, %zmm23
11700 ; AVX512F-NEXT: vpermt2d %zmm0, %zmm8, %zmm17
11701 ; AVX512F-NEXT: vpermt2d %zmm0, %zmm9, %zmm19
11702 ; AVX512F-NEXT: vpermt2d %zmm0, %zmm20, %zmm4
11703 ; AVX512F-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11704 ; AVX512F-NEXT: vpermt2d %zmm0, %zmm27, %zmm23
11705 ; AVX512F-NEXT: vpermi2d %zmm13, %zmm12, %zmm9
11706 ; AVX512F-NEXT: vpermi2d %zmm13, %zmm12, %zmm20
11707 ; AVX512F-NEXT: vpermt2d %zmm13, %zmm27, %zmm12
11708 ; AVX512F-NEXT: movw $3096, %ax # imm = 0xC18
11709 ; AVX512F-NEXT: kmovw %eax, %k1
11710 ; AVX512F-NEXT: vmovdqa32 %zmm28, %zmm16 {%k1}
11711 ; AVX512F-NEXT: movw $-31994, %ax # imm = 0x8306
11712 ; AVX512F-NEXT: kmovw %eax, %k2
11713 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
11714 ; AVX512F-NEXT: vmovdqa32 %zmm0, %zmm14 {%k2}
11715 ; AVX512F-NEXT: vmovdqu64 (%rsp), %zmm13 # 64-byte Reload
11716 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
11717 ; AVX512F-NEXT: vmovdqa32 %zmm0, %zmm13 {%k1}
11718 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
11719 ; AVX512F-NEXT: vmovdqa32 %zmm0, %zmm26 {%k1}
11720 ; AVX512F-NEXT: vmovdqa32 %zmm11, %zmm7 {%k1}
11721 ; AVX512F-NEXT: movq {{[0-9]+}}(%rsp), %rax
11722 ; AVX512F-NEXT: vmovdqa64 (%rax), %zmm28
11723 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm0 = <22,u,u,u,u,5,6,23,u,u,u,u,12,13,24,u>
11724 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
11725 ; AVX512F-NEXT: vpermt2d %zmm28, %zmm0, %zmm8
11726 ; AVX512F-NEXT: movw $28897, %cx # imm = 0x70E1
11727 ; AVX512F-NEXT: kmovw %ecx, %k1
11728 ; AVX512F-NEXT: vmovdqa32 %zmm8, %zmm13 {%k1}
11729 ; AVX512F-NEXT: vmovdqu64 %zmm13, (%rsp) # 64-byte Spill
11730 ; AVX512F-NEXT: vmovdqa64 64(%rax), %zmm8
11731 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
11732 ; AVX512F-NEXT: vpermt2d %zmm8, %zmm0, %zmm11
11733 ; AVX512F-NEXT: vmovdqa32 %zmm11, %zmm26 {%k1}
11734 ; AVX512F-NEXT: vmovdqa64 128(%rax), %zmm27
11735 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
11736 ; AVX512F-NEXT: vpermt2d %zmm27, %zmm0, %zmm11
11737 ; AVX512F-NEXT: vmovdqa32 %zmm11, %zmm7 {%k1}
11738 ; AVX512F-NEXT: movw $6192, %cx # imm = 0x1830
11739 ; AVX512F-NEXT: kmovw %ecx, %k1
11740 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
11741 ; AVX512F-NEXT: vmovdqa32 %zmm2, %zmm0 {%k1}
11742 ; AVX512F-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11743 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
11744 ; AVX512F-NEXT: vmovdqa32 %zmm0, %zmm15 {%k1}
11745 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
11746 ; AVX512F-NEXT: vmovdqa32 %zmm0, %zmm3 {%k2}
11747 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
11748 ; AVX512F-NEXT: vmovdqa32 %zmm0, %zmm25 {%k2}
11749 ; AVX512F-NEXT: vmovdqa32 %zmm30, %zmm6 {%k2}
11750 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm0 = <u,u,u,3,4,25,u,u,u,u,10,11,26,u,u,u>
11751 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
11752 ; AVX512F-NEXT: vpermt2d %zmm28, %zmm0, %zmm2
11753 ; AVX512F-NEXT: movw $7224, %cx # imm = 0x1C38
11754 ; AVX512F-NEXT: kmovw %ecx, %k2
11755 ; AVX512F-NEXT: vmovdqa32 %zmm2, %zmm3 {%k2}
11756 ; AVX512F-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11757 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
11758 ; AVX512F-NEXT: vmovdqa32 %zmm18, %zmm11 {%k1}
11759 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
11760 ; AVX512F-NEXT: vpermt2d %zmm8, %zmm0, %zmm2
11761 ; AVX512F-NEXT: vmovdqa32 %zmm2, %zmm25 {%k2}
11762 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
11763 ; AVX512F-NEXT: vpermt2d %zmm27, %zmm0, %zmm2
11764 ; AVX512F-NEXT: vmovdqa32 %zmm2, %zmm6 {%k2}
11765 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm0 = <u,1,2,27,u,u,u,u,8,9,28,u,u,u,u,15>
11766 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
11767 ; AVX512F-NEXT: vpermt2d %zmm28, %zmm0, %zmm2
11768 ; AVX512F-NEXT: movw $-30962, %cx # imm = 0x870E
11769 ; AVX512F-NEXT: kmovw %ecx, %k2
11770 ; AVX512F-NEXT: vmovdqa32 %zmm2, %zmm11 {%k2}
11771 ; AVX512F-NEXT: vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11772 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
11773 ; AVX512F-NEXT: vmovdqa32 %zmm10, %zmm11 {%k1}
11774 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
11775 ; AVX512F-NEXT: vpermt2d %zmm8, %zmm0, %zmm2
11776 ; AVX512F-NEXT: vmovdqa32 %zmm2, %zmm11 {%k2}
11777 ; AVX512F-NEXT: vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11778 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload
11779 ; AVX512F-NEXT: vmovdqa32 %zmm1, %zmm30 {%k1}
11780 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
11781 ; AVX512F-NEXT: vpermt2d %zmm27, %zmm0, %zmm1
11782 ; AVX512F-NEXT: vmovdqa32 %zmm1, %zmm30 {%k2}
11783 ; AVX512F-NEXT: movw $1548, %cx # imm = 0x60C
11784 ; AVX512F-NEXT: kmovw %ecx, %k2
11785 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
11786 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
11787 ; AVX512F-NEXT: vmovdqa32 %zmm0, %zmm2 {%k2}
11788 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
11789 ; AVX512F-NEXT: vmovdqa32 %zmm0, %zmm22 {%k1}
11790 ; AVX512F-NEXT: vmovdqa32 %zmm24, %zmm5 {%k1}
11791 ; AVX512F-NEXT: vmovdqa32 %zmm21, %zmm17 {%k1}
11792 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm0 = <0,29,u,u,u,u,6,7,30,u,u,u,u,13,14,31>
11793 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
11794 ; AVX512F-NEXT: vpermt2d %zmm28, %zmm0, %zmm1
11795 ; AVX512F-NEXT: movw $-7741, %cx # imm = 0xE1C3
11796 ; AVX512F-NEXT: kmovw %ecx, %k1
11797 ; AVX512F-NEXT: vmovdqa32 %zmm1, %zmm22 {%k1}
11798 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
11799 ; AVX512F-NEXT: vpermt2d %zmm8, %zmm0, %zmm1
11800 ; AVX512F-NEXT: vmovdqa32 %zmm1, %zmm5 {%k1}
11801 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
11802 ; AVX512F-NEXT: vpermt2d %zmm27, %zmm0, %zmm1
11803 ; AVX512F-NEXT: vmovdqa32 %zmm1, %zmm17 {%k1}
11804 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm0 = <u,u,u,u,4,5,16,u,u,u,u,11,12,17,u,u>
11805 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
11806 ; AVX512F-NEXT: vpermt2d %zmm28, %zmm0, %zmm1
11807 ; AVX512F-NEXT: movw $14448, %cx # imm = 0x3870
11808 ; AVX512F-NEXT: kmovw %ecx, %k3
11809 ; AVX512F-NEXT: vmovdqa32 %zmm1, %zmm2 {%k3}
11810 ; AVX512F-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11811 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
11812 ; AVX512F-NEXT: vmovdqa32 %zmm1, %zmm31 {%k2}
11813 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
11814 ; AVX512F-NEXT: vmovdqa32 %zmm1, %zmm19 {%k2}
11815 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
11816 ; AVX512F-NEXT: vmovdqa32 %zmm1, %zmm9 {%k2}
11817 ; AVX512F-NEXT: movw $12384, %cx # imm = 0x3060
11818 ; AVX512F-NEXT: kmovw %ecx, %k1
11819 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
11820 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
11821 ; AVX512F-NEXT: vmovdqa32 %zmm1, %zmm3 {%k1}
11822 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
11823 ; AVX512F-NEXT: vpermt2d %zmm8, %zmm0, %zmm1
11824 ; AVX512F-NEXT: vmovdqa32 %zmm1, %zmm31 {%k3}
11825 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
11826 ; AVX512F-NEXT: vpermt2d %zmm27, %zmm0, %zmm1
11827 ; AVX512F-NEXT: vmovdqa32 %zmm1, %zmm19 {%k3}
11828 ; AVX512F-NEXT: vmovdqa64 192(%r8), %zmm1
11829 ; AVX512F-NEXT: vmovdqa64 192(%r9), %zmm2
11830 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm11 = <u,u,u,u,0,16,u,u,u,u,u,1,17,u,u,u>
11831 ; AVX512F-NEXT: vpermi2d %zmm2, %zmm1, %zmm11
11832 ; AVX512F-NEXT: vmovdqa64 192(%rax), %zmm10
11833 ; AVX512F-NEXT: vpermt2d %zmm10, %zmm0, %zmm11
11834 ; AVX512F-NEXT: vmovdqa32 %zmm11, %zmm9 {%k3}
11835 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm0 = <u,u,2,3,18,u,u,u,u,9,10,19,u,u,u,u>
11836 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
11837 ; AVX512F-NEXT: vpermt2d %zmm28, %zmm0, %zmm11
11838 ; AVX512F-NEXT: movw $3612, %ax # imm = 0xE1C
11839 ; AVX512F-NEXT: kmovw %eax, %k2
11840 ; AVX512F-NEXT: vmovdqa32 %zmm11, %zmm3 {%k2}
11841 ; AVX512F-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11842 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
11843 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
11844 ; AVX512F-NEXT: vmovdqa32 %zmm11, %zmm4 {%k1}
11845 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
11846 ; AVX512F-NEXT: vpermt2d %zmm8, %zmm0, %zmm11
11847 ; AVX512F-NEXT: vmovdqa32 %zmm11, %zmm4 {%k2}
11848 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Reload
11849 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
11850 ; AVX512F-NEXT: vmovdqa32 %zmm3, %zmm24 {%k1}
11851 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
11852 ; AVX512F-NEXT: vpermt2d %zmm27, %zmm0, %zmm11
11853 ; AVX512F-NEXT: vmovdqa32 %zmm11, %zmm24 {%k2}
11854 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload
11855 ; AVX512F-NEXT: vmovdqa32 %zmm20, %zmm18 {%k1}
11856 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm13 = <u,u,2,18,u,u,u,u,u,3,19,u,u,u,u,u>
11857 ; AVX512F-NEXT: vpermi2d %zmm2, %zmm1, %zmm13
11858 ; AVX512F-NEXT: vpermt2d %zmm10, %zmm0, %zmm13
11859 ; AVX512F-NEXT: vmovdqa32 %zmm13, %zmm18 {%k2}
11860 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm3 = <u,1,2,3,4,23,u,u,8,9,10,11,24,u,u,15>
11861 ; AVX512F-NEXT: vpermi2d %zmm1, %zmm16, %zmm3
11862 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm11 = <0,1,2,25,u,u,6,7,8,9,26,u,u,13,14,15>
11863 ; AVX512F-NEXT: vpermi2d %zmm1, %zmm14, %zmm11
11864 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm13 = <0,27,u,u,4,5,6,7,28,u,u,11,12,13,14,29>
11865 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
11866 ; AVX512F-NEXT: vpermi2d %zmm1, %zmm14, %zmm13
11867 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm14 = <u,u,2,3,4,5,30,u,u,9,10,11,12,31,u,u>
11868 ; AVX512F-NEXT: vpermi2d %zmm1, %zmm15, %zmm14
11869 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
11870 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
11871 ; AVX512F-NEXT: vmovdqa32 %zmm15, %zmm0 {%k1}
11872 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm15 = <0,1,20,u,u,u,u,7,8,21,u,u,u,u,14,15>
11873 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
11874 ; AVX512F-NEXT: vpermt2d %zmm28, %zmm15, %zmm16
11875 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm21 # 64-byte Reload
11876 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
11877 ; AVX512F-NEXT: vmovdqa32 %zmm21, %zmm28 {%k1}
11878 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm21 # 64-byte Reload
11879 ; AVX512F-NEXT: vmovdqa32 %zmm21, %zmm23 {%k1}
11880 ; AVX512F-NEXT: vmovdqa32 %zmm29, %zmm12 {%k1}
11881 ; AVX512F-NEXT: movw $15480, %ax # imm = 0x3C78
11882 ; AVX512F-NEXT: kmovw %eax, %k1
11883 ; AVX512F-NEXT: vmovdqa32 %zmm0, %zmm16 {%k1}
11884 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm21 # 64-byte Reload
11885 ; AVX512F-NEXT: vpermt2d %zmm8, %zmm15, %zmm21
11886 ; AVX512F-NEXT: vmovdqa32 %zmm28, %zmm21 {%k1}
11887 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
11888 ; AVX512F-NEXT: vpermt2d %zmm27, %zmm15, %zmm8
11889 ; AVX512F-NEXT: vmovdqa32 %zmm23, %zmm8 {%k1}
11890 ; AVX512F-NEXT: vmovdqa64 %zmm8, %zmm20
11891 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm8 = <4,20,u,u,u,u,u,5,21,u,u,u,u,u,6,22>
11892 ; AVX512F-NEXT: vpermt2d %zmm2, %zmm8, %zmm1
11893 ; AVX512F-NEXT: vpermt2d %zmm10, %zmm15, %zmm1
11894 ; AVX512F-NEXT: vmovdqa32 %zmm12, %zmm1 {%k1}
11895 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm8 = <u,1,2,3,4,5,23,u,8,9,10,11,12,24,u,15>
11896 ; AVX512F-NEXT: vpermi2d %zmm2, %zmm3, %zmm8
11897 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm0 = <0,1,2,3,25,u,6,7,8,9,10,26,u,13,14,15>
11898 ; AVX512F-NEXT: vpermi2d %zmm2, %zmm11, %zmm0
11899 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm11 = <0,1,27,u,4,5,6,7,8,28,u,11,12,13,14,15>
11900 ; AVX512F-NEXT: vpermi2d %zmm2, %zmm13, %zmm11
11901 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm12 = <29,u,2,3,4,5,6,30,u,9,10,11,12,13,31,u>
11902 ; AVX512F-NEXT: vpermi2d %zmm2, %zmm14, %zmm12
11903 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm2 = [22,1,2,3,4,5,6,23,8,9,10,11,12,13,24,15]
11904 ; AVX512F-NEXT: vpermi2d %zmm10, %zmm8, %zmm2
11905 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm8 = [0,1,2,3,4,25,6,7,8,9,10,11,26,13,14,15]
11906 ; AVX512F-NEXT: vpermi2d %zmm10, %zmm0, %zmm8
11907 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm0 = [0,1,2,27,4,5,6,7,8,9,28,11,12,13,14,15]
11908 ; AVX512F-NEXT: vpermi2d %zmm10, %zmm11, %zmm0
11909 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm11 = [0,29,2,3,4,5,6,7,30,9,10,11,12,13,14,31]
11910 ; AVX512F-NEXT: vpermi2d %zmm10, %zmm12, %zmm11
11911 ; AVX512F-NEXT: movq {{[0-9]+}}(%rsp), %rax
11912 ; AVX512F-NEXT: vmovdqa64 %zmm1, 1472(%rax)
11913 ; AVX512F-NEXT: vmovdqa64 %zmm18, 1408(%rax)
11914 ; AVX512F-NEXT: vmovdqa64 %zmm9, 1344(%rax)
11915 ; AVX512F-NEXT: vmovdqa64 %zmm17, 1280(%rax)
11916 ; AVX512F-NEXT: vmovdqa64 %zmm30, 1216(%rax)
11917 ; AVX512F-NEXT: vmovdqa64 %zmm6, 1152(%rax)
11918 ; AVX512F-NEXT: vmovdqa64 %zmm7, 1088(%rax)
11919 ; AVX512F-NEXT: vmovdqa64 %zmm20, 1024(%rax)
11920 ; AVX512F-NEXT: vmovdqa64 %zmm24, 960(%rax)
11921 ; AVX512F-NEXT: vmovdqa64 %zmm19, 896(%rax)
11922 ; AVX512F-NEXT: vmovdqa64 %zmm5, 832(%rax)
11923 ; AVX512F-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
11924 ; AVX512F-NEXT: vmovaps %zmm1, 768(%rax)
11925 ; AVX512F-NEXT: vmovdqa64 %zmm25, 704(%rax)
11926 ; AVX512F-NEXT: vmovdqa64 %zmm26, 640(%rax)
11927 ; AVX512F-NEXT: vmovdqa64 %zmm21, 576(%rax)
11928 ; AVX512F-NEXT: vmovdqa64 %zmm4, 512(%rax)
11929 ; AVX512F-NEXT: vmovdqa64 %zmm31, 448(%rax)
11930 ; AVX512F-NEXT: vmovdqa64 %zmm22, 384(%rax)
11931 ; AVX512F-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
11932 ; AVX512F-NEXT: vmovaps %zmm1, 320(%rax)
11933 ; AVX512F-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
11934 ; AVX512F-NEXT: vmovaps %zmm1, 256(%rax)
11935 ; AVX512F-NEXT: vmovups (%rsp), %zmm1 # 64-byte Reload
11936 ; AVX512F-NEXT: vmovaps %zmm1, 192(%rax)
11937 ; AVX512F-NEXT: vmovdqa64 %zmm16, 128(%rax)
11938 ; AVX512F-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
11939 ; AVX512F-NEXT: vmovaps %zmm1, 64(%rax)
11940 ; AVX512F-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
11941 ; AVX512F-NEXT: vmovaps %zmm1, (%rax)
11942 ; AVX512F-NEXT: vmovdqa64 %zmm11, 1728(%rax)
11943 ; AVX512F-NEXT: vmovdqa64 %zmm0, 1664(%rax)
11944 ; AVX512F-NEXT: vmovdqa64 %zmm8, 1600(%rax)
11945 ; AVX512F-NEXT: vmovdqa64 %zmm2, 1536(%rax)
11946 ; AVX512F-NEXT: addq $3080, %rsp # imm = 0xC08
11947 ; AVX512F-NEXT: vzeroupper
11948 ; AVX512F-NEXT: retq
11950 ; AVX512BW-LABEL: store_i32_stride7_vf64:
11951 ; AVX512BW: # %bb.0:
11952 ; AVX512BW-NEXT: subq $3080, %rsp # imm = 0xC08
11953 ; AVX512BW-NEXT: vmovdqa64 (%rdx), %zmm3
11954 ; AVX512BW-NEXT: vmovdqa64 (%rcx), %zmm2
11955 ; AVX512BW-NEXT: vmovdqa64 (%r8), %zmm0
11956 ; AVX512BW-NEXT: vmovdqa64 64(%r8), %zmm25
11957 ; AVX512BW-NEXT: vmovdqa64 128(%r8), %zmm22
11958 ; AVX512BW-NEXT: vmovdqa64 (%r9), %zmm6
11959 ; AVX512BW-NEXT: vmovdqa64 64(%r9), %zmm5
11960 ; AVX512BW-NEXT: vmovdqa64 128(%r9), %zmm4
11961 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm1 = <u,u,0,16,u,u,u,u,u,1,17,u,u,u,u,u>
11962 ; AVX512BW-NEXT: vmovdqa64 %zmm3, %zmm7
11963 ; AVX512BW-NEXT: vpermt2d %zmm2, %zmm1, %zmm7
11964 ; AVX512BW-NEXT: vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11965 ; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm8
11966 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm1 = <u,u,u,u,0,16,u,u,u,u,u,1,17,u,u,u>
11967 ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm7
11968 ; AVX512BW-NEXT: vpermt2d %zmm6, %zmm1, %zmm7
11969 ; AVX512BW-NEXT: vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11970 ; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm10
11971 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm7 = <2,18,u,u,u,u,u,3,19,u,u,u,u,u,4,20>
11972 ; AVX512BW-NEXT: vmovdqa64 %zmm3, %zmm1
11973 ; AVX512BW-NEXT: vpermt2d %zmm2, %zmm7, %zmm1
11974 ; AVX512BW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11975 ; AVX512BW-NEXT: vmovdqa64 %zmm7, %zmm9
11976 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm1 = <u,u,2,18,u,u,u,u,u,3,19,u,u,u,u,u>
11977 ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm7
11978 ; AVX512BW-NEXT: vpermt2d %zmm6, %zmm1, %zmm7
11979 ; AVX512BW-NEXT: vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11980 ; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm11
11981 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm1 = <4,20,u,u,u,u,u,5,21,u,u,u,u,u,6,22>
11982 ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm7
11983 ; AVX512BW-NEXT: vpermt2d %zmm6, %zmm1, %zmm7
11984 ; AVX512BW-NEXT: vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11985 ; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm7
11986 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm1 = <u,u,u,u,u,7,23,u,u,u,u,u,8,24,u,u>
11987 ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm12
11988 ; AVX512BW-NEXT: vpermt2d %zmm6, %zmm1, %zmm12
11989 ; AVX512BW-NEXT: vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11990 ; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm12
11991 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm1 = <u,u,u,9,25,u,u,u,u,u,10,26,u,u,u,u>
11992 ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm13
11993 ; AVX512BW-NEXT: vpermt2d %zmm6, %zmm1, %zmm13
11994 ; AVX512BW-NEXT: vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11995 ; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm13
11996 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm1 = <u,11,27,u,u,u,u,u,12,28,u,u,u,u,u,13>
11997 ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm14
11998 ; AVX512BW-NEXT: vpermt2d %zmm6, %zmm1, %zmm14
11999 ; AVX512BW-NEXT: vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
12000 ; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm14
12001 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm1 = <13,u,u,u,u,u,30,14,u,u,u,u,u,31,15,u>
12002 ; AVX512BW-NEXT: vpermt2d %zmm0, %zmm1, %zmm6
12003 ; AVX512BW-NEXT: vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
12004 ; AVX512BW-NEXT: vmovdqa64 %zmm25, %zmm0
12005 ; AVX512BW-NEXT: vpermt2d %zmm5, %zmm10, %zmm0
12006 ; AVX512BW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
12007 ; AVX512BW-NEXT: vmovdqa64 %zmm25, %zmm0
12008 ; AVX512BW-NEXT: vpermt2d %zmm5, %zmm11, %zmm0
12009 ; AVX512BW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
12010 ; AVX512BW-NEXT: vmovdqa64 %zmm25, %zmm0
12011 ; AVX512BW-NEXT: vpermt2d %zmm5, %zmm7, %zmm0
12012 ; AVX512BW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
12013 ; AVX512BW-NEXT: vmovdqa64 %zmm25, %zmm0
12014 ; AVX512BW-NEXT: vpermt2d %zmm5, %zmm12, %zmm0
12015 ; AVX512BW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
12016 ; AVX512BW-NEXT: vmovdqa64 %zmm25, %zmm0
12017 ; AVX512BW-NEXT: vpermt2d %zmm5, %zmm13, %zmm0
12018 ; AVX512BW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
12019 ; AVX512BW-NEXT: vmovdqa64 %zmm25, %zmm0
12020 ; AVX512BW-NEXT: vpermt2d %zmm5, %zmm14, %zmm0
12021 ; AVX512BW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
12022 ; AVX512BW-NEXT: vpermt2d %zmm25, %zmm1, %zmm5
12023 ; AVX512BW-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
12024 ; AVX512BW-NEXT: vmovdqa64 %zmm22, %zmm0
12025 ; AVX512BW-NEXT: vpermt2d %zmm4, %zmm10, %zmm0
12026 ; AVX512BW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
12027 ; AVX512BW-NEXT: vmovdqa64 %zmm22, %zmm0
12028 ; AVX512BW-NEXT: vpermt2d %zmm4, %zmm11, %zmm0
12029 ; AVX512BW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
12030 ; AVX512BW-NEXT: vmovdqa64 %zmm22, %zmm0
12031 ; AVX512BW-NEXT: vpermt2d %zmm4, %zmm7, %zmm0
12032 ; AVX512BW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
12033 ; AVX512BW-NEXT: vpermi2d %zmm4, %zmm22, %zmm12
12034 ; AVX512BW-NEXT: vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
12035 ; AVX512BW-NEXT: vpermi2d %zmm4, %zmm22, %zmm13
12036 ; AVX512BW-NEXT: vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
12037 ; AVX512BW-NEXT: vpermi2d %zmm4, %zmm22, %zmm14
12038 ; AVX512BW-NEXT: vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
12039 ; AVX512BW-NEXT: vpermt2d %zmm22, %zmm1, %zmm4
12040 ; AVX512BW-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
12041 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm0 = <u,u,u,u,u,5,21,u,u,u,u,u,6,22,u,u>
12042 ; AVX512BW-NEXT: vmovdqa64 %zmm3, %zmm1
12043 ; AVX512BW-NEXT: vpermt2d %zmm2, %zmm0, %zmm1
12044 ; AVX512BW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
12045 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm11 = <u,u,u,7,23,u,u,u,u,u,8,24,u,u,u,u>
12046 ; AVX512BW-NEXT: vmovdqa64 %zmm3, %zmm1
12047 ; AVX512BW-NEXT: vpermt2d %zmm2, %zmm11, %zmm1
12048 ; AVX512BW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
12049 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm30 = <u,9,25,u,u,u,u,u,10,26,u,u,u,u,u,11>
12050 ; AVX512BW-NEXT: vmovdqa64 %zmm3, %zmm1
12051 ; AVX512BW-NEXT: vpermt2d %zmm2, %zmm30, %zmm1
12052 ; AVX512BW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
12053 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm4 = <11,u,u,u,u,u,28,12,u,u,u,u,u,29,13,u>
12054 ; AVX512BW-NEXT: vmovdqa64 %zmm2, %zmm1
12055 ; AVX512BW-NEXT: vpermt2d %zmm3, %zmm4, %zmm1
12056 ; AVX512BW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
12057 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm1 = <u,u,u,u,14,30,u,u,u,u,u,15,31,u,u,u>
12058 ; AVX512BW-NEXT: vpermt2d %zmm2, %zmm1, %zmm3
12059 ; AVX512BW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
12060 ; AVX512BW-NEXT: vmovdqa64 64(%rdx), %zmm24
12061 ; AVX512BW-NEXT: vmovdqa64 64(%rcx), %zmm2
12062 ; AVX512BW-NEXT: vmovdqa64 %zmm24, %zmm3
12063 ; AVX512BW-NEXT: vpermt2d %zmm2, %zmm8, %zmm3
12064 ; AVX512BW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
12065 ; AVX512BW-NEXT: vmovdqa64 %zmm24, %zmm3
12066 ; AVX512BW-NEXT: vpermt2d %zmm2, %zmm9, %zmm3
12067 ; AVX512BW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
12068 ; AVX512BW-NEXT: vmovdqa64 %zmm24, %zmm3
12069 ; AVX512BW-NEXT: vpermt2d %zmm2, %zmm0, %zmm3
12070 ; AVX512BW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
12071 ; AVX512BW-NEXT: vmovdqa64 %zmm24, %zmm3
12072 ; AVX512BW-NEXT: vpermt2d %zmm2, %zmm11, %zmm3
12073 ; AVX512BW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
12074 ; AVX512BW-NEXT: vmovdqa64 %zmm24, %zmm3
12075 ; AVX512BW-NEXT: vpermt2d %zmm2, %zmm30, %zmm3
12076 ; AVX512BW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
12077 ; AVX512BW-NEXT: vmovdqa64 %zmm2, %zmm3
12078 ; AVX512BW-NEXT: vpermt2d %zmm24, %zmm4, %zmm3
12079 ; AVX512BW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
12080 ; AVX512BW-NEXT: vpermt2d %zmm2, %zmm1, %zmm24
12081 ; AVX512BW-NEXT: vmovdqa64 192(%rdx), %zmm29
12082 ; AVX512BW-NEXT: vmovdqa64 192(%rcx), %zmm2
12083 ; AVX512BW-NEXT: vmovdqa64 %zmm29, %zmm28
12084 ; AVX512BW-NEXT: vpermt2d %zmm2, %zmm11, %zmm28
12085 ; AVX512BW-NEXT: vmovdqa64 %zmm29, %zmm3
12086 ; AVX512BW-NEXT: vpermt2d %zmm2, %zmm30, %zmm3
12087 ; AVX512BW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
12088 ; AVX512BW-NEXT: vmovdqa64 %zmm2, %zmm3
12089 ; AVX512BW-NEXT: vpermt2d %zmm29, %zmm4, %zmm3
12090 ; AVX512BW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
12091 ; AVX512BW-NEXT: vmovdqa64 %zmm29, %zmm3
12092 ; AVX512BW-NEXT: vpermt2d %zmm2, %zmm1, %zmm3
12093 ; AVX512BW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
12094 ; AVX512BW-NEXT: vmovdqa64 128(%rdx), %zmm21
12095 ; AVX512BW-NEXT: vmovdqa64 128(%rcx), %zmm3
12096 ; AVX512BW-NEXT: vpermi2d %zmm3, %zmm21, %zmm11
12097 ; AVX512BW-NEXT: vpermi2d %zmm3, %zmm21, %zmm30
12098 ; AVX512BW-NEXT: vpermi2d %zmm21, %zmm3, %zmm4
12099 ; AVX512BW-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
12100 ; AVX512BW-NEXT: vmovdqa64 %zmm21, %zmm6
12101 ; AVX512BW-NEXT: vmovdqa64 %zmm21, %zmm5
12102 ; AVX512BW-NEXT: vmovdqa64 %zmm21, %zmm4
12103 ; AVX512BW-NEXT: vpermt2d %zmm3, %zmm1, %zmm21
12104 ; AVX512BW-NEXT: vpermt2d %zmm3, %zmm8, %zmm6
12105 ; AVX512BW-NEXT: vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
12106 ; AVX512BW-NEXT: vpermt2d %zmm3, %zmm9, %zmm5
12107 ; AVX512BW-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
12108 ; AVX512BW-NEXT: vpermt2d %zmm3, %zmm0, %zmm4
12109 ; AVX512BW-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
12110 ; AVX512BW-NEXT: vpermi2d %zmm2, %zmm29, %zmm8
12111 ; AVX512BW-NEXT: vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
12112 ; AVX512BW-NEXT: vpermi2d %zmm2, %zmm29, %zmm9
12113 ; AVX512BW-NEXT: vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
12114 ; AVX512BW-NEXT: vpermt2d %zmm2, %zmm0, %zmm29
12115 ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm22
12116 ; AVX512BW-NEXT: vmovdqa64 (%rsi), %zmm0
12117 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm9 = <0,16,u,u,u,u,u,1,17,u,u,u,u,u,2,18>
12118 ; AVX512BW-NEXT: vmovdqa64 %zmm22, %zmm1
12119 ; AVX512BW-NEXT: vpermt2d %zmm0, %zmm9, %zmm1
12120 ; AVX512BW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
12121 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm20 = <u,u,u,u,u,3,19,u,u,u,u,u,4,20,u,u>
12122 ; AVX512BW-NEXT: vmovdqa64 %zmm22, %zmm1
12123 ; AVX512BW-NEXT: vpermt2d %zmm0, %zmm20, %zmm1
12124 ; AVX512BW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
12125 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm27 = <u,u,u,5,21,u,u,u,u,u,6,22,u,u,u,u>
12126 ; AVX512BW-NEXT: vmovdqa64 %zmm22, %zmm1
12127 ; AVX512BW-NEXT: vpermt2d %zmm0, %zmm27, %zmm1
12128 ; AVX512BW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
12129 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm7 = <u,7,23,u,u,u,u,u,8,24,u,u,u,u,u,9>
12130 ; AVX512BW-NEXT: vmovdqa64 %zmm22, %zmm1
12131 ; AVX512BW-NEXT: vpermt2d %zmm0, %zmm7, %zmm1
12132 ; AVX512BW-NEXT: vmovdqu64 %zmm1, (%rsp) # 64-byte Spill
12133 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm6 = <9,u,u,u,u,u,26,10,u,u,u,u,u,27,11,u>
12134 ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm3
12135 ; AVX512BW-NEXT: vpermt2d %zmm22, %zmm6, %zmm3
12136 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm1 = <u,u,u,u,12,28,u,u,u,u,u,13,29,u,u,u>
12137 ; AVX512BW-NEXT: vmovdqa64 %zmm22, %zmm18
12138 ; AVX512BW-NEXT: vpermt2d %zmm0, %zmm1, %zmm18
12139 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm8 = <u,u,14,30,u,u,u,u,u,15,31,u,u,u,u,u>
12140 ; AVX512BW-NEXT: vpermt2d %zmm0, %zmm8, %zmm22
12141 ; AVX512BW-NEXT: vmovdqa64 64(%rdi), %zmm5
12142 ; AVX512BW-NEXT: vmovdqa64 64(%rsi), %zmm0
12143 ; AVX512BW-NEXT: vmovdqa64 %zmm5, %zmm31
12144 ; AVX512BW-NEXT: vpermt2d %zmm0, %zmm9, %zmm31
12145 ; AVX512BW-NEXT: vmovdqa64 %zmm5, %zmm2
12146 ; AVX512BW-NEXT: vpermt2d %zmm0, %zmm20, %zmm2
12147 ; AVX512BW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
12148 ; AVX512BW-NEXT: vmovdqa64 %zmm5, %zmm2
12149 ; AVX512BW-NEXT: vpermt2d %zmm0, %zmm27, %zmm2
12150 ; AVX512BW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
12151 ; AVX512BW-NEXT: vmovdqa64 %zmm5, %zmm26
12152 ; AVX512BW-NEXT: vpermt2d %zmm0, %zmm7, %zmm26
12153 ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm25
12154 ; AVX512BW-NEXT: vpermt2d %zmm5, %zmm6, %zmm25
12155 ; AVX512BW-NEXT: vmovdqa64 %zmm5, %zmm10
12156 ; AVX512BW-NEXT: vpermt2d %zmm0, %zmm1, %zmm10
12157 ; AVX512BW-NEXT: vpermt2d %zmm0, %zmm8, %zmm5
12158 ; AVX512BW-NEXT: vmovdqa64 192(%rdi), %zmm12
12159 ; AVX512BW-NEXT: vmovdqa64 192(%rsi), %zmm13
12160 ; AVX512BW-NEXT: vmovdqa64 %zmm12, %zmm16
12161 ; AVX512BW-NEXT: vpermt2d %zmm13, %zmm7, %zmm16
12162 ; AVX512BW-NEXT: vmovdqa64 %zmm13, %zmm14
12163 ; AVX512BW-NEXT: vpermt2d %zmm12, %zmm6, %zmm14
12164 ; AVX512BW-NEXT: vmovdqa64 %zmm12, %zmm2
12165 ; AVX512BW-NEXT: vpermt2d %zmm13, %zmm1, %zmm2
12166 ; AVX512BW-NEXT: vmovdqa64 %zmm12, %zmm15
12167 ; AVX512BW-NEXT: vpermt2d %zmm13, %zmm8, %zmm15
12168 ; AVX512BW-NEXT: vmovdqa64 128(%rdi), %zmm17
12169 ; AVX512BW-NEXT: vmovdqa64 128(%rsi), %zmm0
12170 ; AVX512BW-NEXT: vpermi2d %zmm0, %zmm17, %zmm7
12171 ; AVX512BW-NEXT: vpermi2d %zmm17, %zmm0, %zmm6
12172 ; AVX512BW-NEXT: vpermi2d %zmm0, %zmm17, %zmm1
12173 ; AVX512BW-NEXT: vmovdqa64 %zmm17, %zmm19
12174 ; AVX512BW-NEXT: vmovdqa64 %zmm17, %zmm4
12175 ; AVX512BW-NEXT: vmovdqa64 %zmm17, %zmm23
12176 ; AVX512BW-NEXT: vpermt2d %zmm0, %zmm8, %zmm17
12177 ; AVX512BW-NEXT: vpermt2d %zmm0, %zmm9, %zmm19
12178 ; AVX512BW-NEXT: vpermt2d %zmm0, %zmm20, %zmm4
12179 ; AVX512BW-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
12180 ; AVX512BW-NEXT: vpermt2d %zmm0, %zmm27, %zmm23
12181 ; AVX512BW-NEXT: vpermi2d %zmm13, %zmm12, %zmm9
12182 ; AVX512BW-NEXT: vpermi2d %zmm13, %zmm12, %zmm20
12183 ; AVX512BW-NEXT: vpermt2d %zmm13, %zmm27, %zmm12
12184 ; AVX512BW-NEXT: movw $3096, %ax # imm = 0xC18
12185 ; AVX512BW-NEXT: kmovd %eax, %k1
12186 ; AVX512BW-NEXT: vmovdqa32 %zmm28, %zmm16 {%k1}
12187 ; AVX512BW-NEXT: movw $-31994, %ax # imm = 0x8306
12188 ; AVX512BW-NEXT: kmovd %eax, %k2
12189 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
12190 ; AVX512BW-NEXT: vmovdqa32 %zmm0, %zmm14 {%k2}
12191 ; AVX512BW-NEXT: vmovdqu64 (%rsp), %zmm13 # 64-byte Reload
12192 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
12193 ; AVX512BW-NEXT: vmovdqa32 %zmm0, %zmm13 {%k1}
12194 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
12195 ; AVX512BW-NEXT: vmovdqa32 %zmm0, %zmm26 {%k1}
12196 ; AVX512BW-NEXT: vmovdqa32 %zmm11, %zmm7 {%k1}
12197 ; AVX512BW-NEXT: movq {{[0-9]+}}(%rsp), %rax
12198 ; AVX512BW-NEXT: vmovdqa64 (%rax), %zmm28
12199 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm0 = <22,u,u,u,u,5,6,23,u,u,u,u,12,13,24,u>
12200 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
12201 ; AVX512BW-NEXT: vpermt2d %zmm28, %zmm0, %zmm8
12202 ; AVX512BW-NEXT: movw $28897, %cx # imm = 0x70E1
12203 ; AVX512BW-NEXT: kmovd %ecx, %k1
12204 ; AVX512BW-NEXT: vmovdqa32 %zmm8, %zmm13 {%k1}
12205 ; AVX512BW-NEXT: vmovdqu64 %zmm13, (%rsp) # 64-byte Spill
12206 ; AVX512BW-NEXT: vmovdqa64 64(%rax), %zmm8
12207 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
12208 ; AVX512BW-NEXT: vpermt2d %zmm8, %zmm0, %zmm11
12209 ; AVX512BW-NEXT: vmovdqa32 %zmm11, %zmm26 {%k1}
12210 ; AVX512BW-NEXT: vmovdqa64 128(%rax), %zmm27
12211 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
12212 ; AVX512BW-NEXT: vpermt2d %zmm27, %zmm0, %zmm11
12213 ; AVX512BW-NEXT: vmovdqa32 %zmm11, %zmm7 {%k1}
12214 ; AVX512BW-NEXT: movw $6192, %cx # imm = 0x1830
12215 ; AVX512BW-NEXT: kmovd %ecx, %k1
12216 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
12217 ; AVX512BW-NEXT: vmovdqa32 %zmm2, %zmm0 {%k1}
12218 ; AVX512BW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
12219 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
12220 ; AVX512BW-NEXT: vmovdqa32 %zmm0, %zmm15 {%k1}
12221 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
12222 ; AVX512BW-NEXT: vmovdqa32 %zmm0, %zmm3 {%k2}
12223 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
12224 ; AVX512BW-NEXT: vmovdqa32 %zmm0, %zmm25 {%k2}
12225 ; AVX512BW-NEXT: vmovdqa32 %zmm30, %zmm6 {%k2}
12226 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm0 = <u,u,u,3,4,25,u,u,u,u,10,11,26,u,u,u>
12227 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
12228 ; AVX512BW-NEXT: vpermt2d %zmm28, %zmm0, %zmm2
12229 ; AVX512BW-NEXT: movw $7224, %cx # imm = 0x1C38
12230 ; AVX512BW-NEXT: kmovd %ecx, %k2
12231 ; AVX512BW-NEXT: vmovdqa32 %zmm2, %zmm3 {%k2}
12232 ; AVX512BW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
12233 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
12234 ; AVX512BW-NEXT: vmovdqa32 %zmm18, %zmm11 {%k1}
12235 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
12236 ; AVX512BW-NEXT: vpermt2d %zmm8, %zmm0, %zmm2
12237 ; AVX512BW-NEXT: vmovdqa32 %zmm2, %zmm25 {%k2}
12238 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
12239 ; AVX512BW-NEXT: vpermt2d %zmm27, %zmm0, %zmm2
12240 ; AVX512BW-NEXT: vmovdqa32 %zmm2, %zmm6 {%k2}
12241 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm0 = <u,1,2,27,u,u,u,u,8,9,28,u,u,u,u,15>
12242 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
12243 ; AVX512BW-NEXT: vpermt2d %zmm28, %zmm0, %zmm2
12244 ; AVX512BW-NEXT: movw $-30962, %cx # imm = 0x870E
12245 ; AVX512BW-NEXT: kmovd %ecx, %k2
12246 ; AVX512BW-NEXT: vmovdqa32 %zmm2, %zmm11 {%k2}
12247 ; AVX512BW-NEXT: vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
12248 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
12249 ; AVX512BW-NEXT: vmovdqa32 %zmm10, %zmm11 {%k1}
12250 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
12251 ; AVX512BW-NEXT: vpermt2d %zmm8, %zmm0, %zmm2
12252 ; AVX512BW-NEXT: vmovdqa32 %zmm2, %zmm11 {%k2}
12253 ; AVX512BW-NEXT: vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
12254 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload
12255 ; AVX512BW-NEXT: vmovdqa32 %zmm1, %zmm30 {%k1}
12256 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
12257 ; AVX512BW-NEXT: vpermt2d %zmm27, %zmm0, %zmm1
12258 ; AVX512BW-NEXT: vmovdqa32 %zmm1, %zmm30 {%k2}
12259 ; AVX512BW-NEXT: movw $1548, %cx # imm = 0x60C
12260 ; AVX512BW-NEXT: kmovd %ecx, %k2
12261 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
12262 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
12263 ; AVX512BW-NEXT: vmovdqa32 %zmm0, %zmm2 {%k2}
12264 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
12265 ; AVX512BW-NEXT: vmovdqa32 %zmm0, %zmm22 {%k1}
12266 ; AVX512BW-NEXT: vmovdqa32 %zmm24, %zmm5 {%k1}
12267 ; AVX512BW-NEXT: vmovdqa32 %zmm21, %zmm17 {%k1}
12268 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm0 = <0,29,u,u,u,u,6,7,30,u,u,u,u,13,14,31>
12269 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
12270 ; AVX512BW-NEXT: vpermt2d %zmm28, %zmm0, %zmm1
12271 ; AVX512BW-NEXT: movw $-7741, %cx # imm = 0xE1C3
12272 ; AVX512BW-NEXT: kmovd %ecx, %k1
12273 ; AVX512BW-NEXT: vmovdqa32 %zmm1, %zmm22 {%k1}
12274 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
12275 ; AVX512BW-NEXT: vpermt2d %zmm8, %zmm0, %zmm1
12276 ; AVX512BW-NEXT: vmovdqa32 %zmm1, %zmm5 {%k1}
12277 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
12278 ; AVX512BW-NEXT: vpermt2d %zmm27, %zmm0, %zmm1
12279 ; AVX512BW-NEXT: vmovdqa32 %zmm1, %zmm17 {%k1}
12280 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm0 = <u,u,u,u,4,5,16,u,u,u,u,11,12,17,u,u>
12281 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
12282 ; AVX512BW-NEXT: vpermt2d %zmm28, %zmm0, %zmm1
12283 ; AVX512BW-NEXT: movw $14448, %cx # imm = 0x3870
12284 ; AVX512BW-NEXT: kmovd %ecx, %k3
12285 ; AVX512BW-NEXT: vmovdqa32 %zmm1, %zmm2 {%k3}
12286 ; AVX512BW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
12287 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
12288 ; AVX512BW-NEXT: vmovdqa32 %zmm1, %zmm31 {%k2}
12289 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
12290 ; AVX512BW-NEXT: vmovdqa32 %zmm1, %zmm19 {%k2}
12291 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
12292 ; AVX512BW-NEXT: vmovdqa32 %zmm1, %zmm9 {%k2}
12293 ; AVX512BW-NEXT: movw $12384, %cx # imm = 0x3060
12294 ; AVX512BW-NEXT: kmovd %ecx, %k1
12295 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
12296 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
12297 ; AVX512BW-NEXT: vmovdqa32 %zmm1, %zmm3 {%k1}
12298 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
12299 ; AVX512BW-NEXT: vpermt2d %zmm8, %zmm0, %zmm1
12300 ; AVX512BW-NEXT: vmovdqa32 %zmm1, %zmm31 {%k3}
12301 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
12302 ; AVX512BW-NEXT: vpermt2d %zmm27, %zmm0, %zmm1
12303 ; AVX512BW-NEXT: vmovdqa32 %zmm1, %zmm19 {%k3}
12304 ; AVX512BW-NEXT: vmovdqa64 192(%r8), %zmm1
12305 ; AVX512BW-NEXT: vmovdqa64 192(%r9), %zmm2
12306 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm11 = <u,u,u,u,0,16,u,u,u,u,u,1,17,u,u,u>
12307 ; AVX512BW-NEXT: vpermi2d %zmm2, %zmm1, %zmm11
12308 ; AVX512BW-NEXT: vmovdqa64 192(%rax), %zmm10
12309 ; AVX512BW-NEXT: vpermt2d %zmm10, %zmm0, %zmm11
12310 ; AVX512BW-NEXT: vmovdqa32 %zmm11, %zmm9 {%k3}
12311 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm0 = <u,u,2,3,18,u,u,u,u,9,10,19,u,u,u,u>
12312 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
12313 ; AVX512BW-NEXT: vpermt2d %zmm28, %zmm0, %zmm11
12314 ; AVX512BW-NEXT: movw $3612, %ax # imm = 0xE1C
12315 ; AVX512BW-NEXT: kmovd %eax, %k2
12316 ; AVX512BW-NEXT: vmovdqa32 %zmm11, %zmm3 {%k2}
12317 ; AVX512BW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
12318 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
12319 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
12320 ; AVX512BW-NEXT: vmovdqa32 %zmm11, %zmm4 {%k1}
12321 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
12322 ; AVX512BW-NEXT: vpermt2d %zmm8, %zmm0, %zmm11
12323 ; AVX512BW-NEXT: vmovdqa32 %zmm11, %zmm4 {%k2}
12324 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Reload
12325 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
12326 ; AVX512BW-NEXT: vmovdqa32 %zmm3, %zmm24 {%k1}
12327 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
12328 ; AVX512BW-NEXT: vpermt2d %zmm27, %zmm0, %zmm11
12329 ; AVX512BW-NEXT: vmovdqa32 %zmm11, %zmm24 {%k2}
12330 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload
12331 ; AVX512BW-NEXT: vmovdqa32 %zmm20, %zmm18 {%k1}
12332 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm13 = <u,u,2,18,u,u,u,u,u,3,19,u,u,u,u,u>
12333 ; AVX512BW-NEXT: vpermi2d %zmm2, %zmm1, %zmm13
12334 ; AVX512BW-NEXT: vpermt2d %zmm10, %zmm0, %zmm13
12335 ; AVX512BW-NEXT: vmovdqa32 %zmm13, %zmm18 {%k2}
12336 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm3 = <u,1,2,3,4,23,u,u,8,9,10,11,24,u,u,15>
12337 ; AVX512BW-NEXT: vpermi2d %zmm1, %zmm16, %zmm3
12338 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm11 = <0,1,2,25,u,u,6,7,8,9,26,u,u,13,14,15>
12339 ; AVX512BW-NEXT: vpermi2d %zmm1, %zmm14, %zmm11
12340 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm13 = <0,27,u,u,4,5,6,7,28,u,u,11,12,13,14,29>
12341 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
12342 ; AVX512BW-NEXT: vpermi2d %zmm1, %zmm14, %zmm13
12343 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm14 = <u,u,2,3,4,5,30,u,u,9,10,11,12,31,u,u>
12344 ; AVX512BW-NEXT: vpermi2d %zmm1, %zmm15, %zmm14
12345 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
12346 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
12347 ; AVX512BW-NEXT: vmovdqa32 %zmm15, %zmm0 {%k1}
12348 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm15 = <0,1,20,u,u,u,u,7,8,21,u,u,u,u,14,15>
12349 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
12350 ; AVX512BW-NEXT: vpermt2d %zmm28, %zmm15, %zmm16
12351 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm21 # 64-byte Reload
12352 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
12353 ; AVX512BW-NEXT: vmovdqa32 %zmm21, %zmm28 {%k1}
12354 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm21 # 64-byte Reload
12355 ; AVX512BW-NEXT: vmovdqa32 %zmm21, %zmm23 {%k1}
12356 ; AVX512BW-NEXT: vmovdqa32 %zmm29, %zmm12 {%k1}
12357 ; AVX512BW-NEXT: movw $15480, %ax # imm = 0x3C78
12358 ; AVX512BW-NEXT: kmovd %eax, %k1
12359 ; AVX512BW-NEXT: vmovdqa32 %zmm0, %zmm16 {%k1}
12360 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm21 # 64-byte Reload
12361 ; AVX512BW-NEXT: vpermt2d %zmm8, %zmm15, %zmm21
12362 ; AVX512BW-NEXT: vmovdqa32 %zmm28, %zmm21 {%k1}
12363 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
12364 ; AVX512BW-NEXT: vpermt2d %zmm27, %zmm15, %zmm8
12365 ; AVX512BW-NEXT: vmovdqa32 %zmm23, %zmm8 {%k1}
12366 ; AVX512BW-NEXT: vmovdqa64 %zmm8, %zmm20
12367 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm8 = <4,20,u,u,u,u,u,5,21,u,u,u,u,u,6,22>
12368 ; AVX512BW-NEXT: vpermt2d %zmm2, %zmm8, %zmm1
12369 ; AVX512BW-NEXT: vpermt2d %zmm10, %zmm15, %zmm1
12370 ; AVX512BW-NEXT: vmovdqa32 %zmm12, %zmm1 {%k1}
12371 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm8 = <u,1,2,3,4,5,23,u,8,9,10,11,12,24,u,15>
12372 ; AVX512BW-NEXT: vpermi2d %zmm2, %zmm3, %zmm8
12373 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm0 = <0,1,2,3,25,u,6,7,8,9,10,26,u,13,14,15>
12374 ; AVX512BW-NEXT: vpermi2d %zmm2, %zmm11, %zmm0
12375 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm11 = <0,1,27,u,4,5,6,7,8,28,u,11,12,13,14,15>
12376 ; AVX512BW-NEXT: vpermi2d %zmm2, %zmm13, %zmm11
12377 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm12 = <29,u,2,3,4,5,6,30,u,9,10,11,12,13,31,u>
12378 ; AVX512BW-NEXT: vpermi2d %zmm2, %zmm14, %zmm12
12379 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm2 = [22,1,2,3,4,5,6,23,8,9,10,11,12,13,24,15]
12380 ; AVX512BW-NEXT: vpermi2d %zmm10, %zmm8, %zmm2
12381 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm8 = [0,1,2,3,4,25,6,7,8,9,10,11,26,13,14,15]
12382 ; AVX512BW-NEXT: vpermi2d %zmm10, %zmm0, %zmm8
12383 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm0 = [0,1,2,27,4,5,6,7,8,9,28,11,12,13,14,15]
12384 ; AVX512BW-NEXT: vpermi2d %zmm10, %zmm11, %zmm0
12385 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm11 = [0,29,2,3,4,5,6,7,30,9,10,11,12,13,14,31]
12386 ; AVX512BW-NEXT: vpermi2d %zmm10, %zmm12, %zmm11
12387 ; AVX512BW-NEXT: movq {{[0-9]+}}(%rsp), %rax
12388 ; AVX512BW-NEXT: vmovdqa64 %zmm1, 1472(%rax)
12389 ; AVX512BW-NEXT: vmovdqa64 %zmm18, 1408(%rax)
12390 ; AVX512BW-NEXT: vmovdqa64 %zmm9, 1344(%rax)
12391 ; AVX512BW-NEXT: vmovdqa64 %zmm17, 1280(%rax)
12392 ; AVX512BW-NEXT: vmovdqa64 %zmm30, 1216(%rax)
12393 ; AVX512BW-NEXT: vmovdqa64 %zmm6, 1152(%rax)
12394 ; AVX512BW-NEXT: vmovdqa64 %zmm7, 1088(%rax)
12395 ; AVX512BW-NEXT: vmovdqa64 %zmm20, 1024(%rax)
12396 ; AVX512BW-NEXT: vmovdqa64 %zmm24, 960(%rax)
12397 ; AVX512BW-NEXT: vmovdqa64 %zmm19, 896(%rax)
12398 ; AVX512BW-NEXT: vmovdqa64 %zmm5, 832(%rax)
12399 ; AVX512BW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
12400 ; AVX512BW-NEXT: vmovaps %zmm1, 768(%rax)
12401 ; AVX512BW-NEXT: vmovdqa64 %zmm25, 704(%rax)
12402 ; AVX512BW-NEXT: vmovdqa64 %zmm26, 640(%rax)
12403 ; AVX512BW-NEXT: vmovdqa64 %zmm21, 576(%rax)
12404 ; AVX512BW-NEXT: vmovdqa64 %zmm4, 512(%rax)
12405 ; AVX512BW-NEXT: vmovdqa64 %zmm31, 448(%rax)
12406 ; AVX512BW-NEXT: vmovdqa64 %zmm22, 384(%rax)
12407 ; AVX512BW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
12408 ; AVX512BW-NEXT: vmovaps %zmm1, 320(%rax)
12409 ; AVX512BW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
12410 ; AVX512BW-NEXT: vmovaps %zmm1, 256(%rax)
12411 ; AVX512BW-NEXT: vmovups (%rsp), %zmm1 # 64-byte Reload
12412 ; AVX512BW-NEXT: vmovaps %zmm1, 192(%rax)
12413 ; AVX512BW-NEXT: vmovdqa64 %zmm16, 128(%rax)
12414 ; AVX512BW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
12415 ; AVX512BW-NEXT: vmovaps %zmm1, 64(%rax)
12416 ; AVX512BW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
12417 ; AVX512BW-NEXT: vmovaps %zmm1, (%rax)
12418 ; AVX512BW-NEXT: vmovdqa64 %zmm11, 1728(%rax)
12419 ; AVX512BW-NEXT: vmovdqa64 %zmm0, 1664(%rax)
12420 ; AVX512BW-NEXT: vmovdqa64 %zmm8, 1600(%rax)
12421 ; AVX512BW-NEXT: vmovdqa64 %zmm2, 1536(%rax)
12422 ; AVX512BW-NEXT: addq $3080, %rsp # imm = 0xC08
12423 ; AVX512BW-NEXT: vzeroupper
12424 ; AVX512BW-NEXT: retq
12425 %in.vec0 = load <64 x i32>, ptr %in.vecptr0, align 64
12426 %in.vec1 = load <64 x i32>, ptr %in.vecptr1, align 64
12427 %in.vec2 = load <64 x i32>, ptr %in.vecptr2, align 64
12428 %in.vec3 = load <64 x i32>, ptr %in.vecptr3, align 64
12429 %in.vec4 = load <64 x i32>, ptr %in.vecptr4, align 64
12430 %in.vec5 = load <64 x i32>, ptr %in.vecptr5, align 64
12431 %in.vec6 = load <64 x i32>, ptr %in.vecptr6, align 64
12432 %1 = shufflevector <64 x i32> %in.vec0, <64 x i32> %in.vec1, <128 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95, i32 96, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 112, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127>
12433 %2 = shufflevector <64 x i32> %in.vec2, <64 x i32> %in.vec3, <128 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95, i32 96, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 112, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127>
12434 %3 = shufflevector <64 x i32> %in.vec4, <64 x i32> %in.vec5, <128 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95, i32 96, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 112, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127>
12435 %4 = shufflevector <128 x i32> %1, <128 x i32> %2, <256 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95, i32 96, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 112, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127, i32 128, i32 129, i32 130, i32 131, i32 132, i32 133, i32 134, i32 135, i32 136, i32 137, i32 138, i32 139, i32 140, i32 141, i32 142, i32 143, i32 144, i32 145, i32 146, i32 147, i32 148, i32 149, i32 150, i32 151, i32 152, i32 153, i32 154, i32 155, i32 156, i32 157, i32 158, i32 159, i32 160, i32 161, i32 162, i32 163, i32 164, i32 165, i32 166, i32 167, i32 168, i32 169, i32 170, i32 171, i32 172, i32 173, i32 174, i32 175, i32 176, i32 177, i32 178, i32 179, i32 180, i32 181, i32 182, i32 183, i32 184, i32 185, i32 186, i32 187, i32 188, i32 189, i32 190, i32 191, i32 192, i32 193, i32 194, i32 195, i32 196, i32 197, i32 198, i32 199, i32 200, i32 201, i32 202, i32 203, i32 204, i32 205, i32 206, i32 207, i32 208, i32 209, i32 210, i32 211, i32 212, i32 213, i32 214, i32 215, i32 216, i32 217, i32 218, i32 219, i32 220, i32 221, i32 222, i32 223, i32 224, i32 225, i32 226, i32 227, i32 228, i32 229, i32 230, i32 231, i32 232, i32 233, i32 234, i32 235, i32 236, i32 237, i32 238, i32 239, i32 240, i32 241, i32 242, i32 243, i32 244, i32 245, i32 246, i32 247, i32 248, i32 249, i32 250, i32 251, i32 252, i32 253, i32 254, i32 255>
12436 %5 = shufflevector <64 x i32> %in.vec6, <64 x i32> poison, <128 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
12437 %6 = shufflevector <128 x i32> %3, <128 x i32> %5, <192 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95, i32 96, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 112, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127, i32 128, i32 129, i32 130, i32 131, i32 132, i32 133, i32 134, i32 135, i32 136, i32 137, i32 138, i32 139, i32 140, i32 141, i32 142, i32 143, i32 144, i32 145, i32 146, i32 147, i32 148, i32 149, i32 150, i32 151, i32 152, i32 153, i32 154, i32 155, i32 156, i32 157, i32 158, i32 159, i32 160, i32 161, i32 162, i32 163, i32 164, i32 165, i32 166, i32 167, i32 168, i32 169, i32 170, i32 171, i32 172, i32 173, i32 174, i32 175, i32 176, i32 177, i32 178, i32 179, i32 180, i32 181, i32 182, i32 183, i32 184, i32 185, i32 186, i32 187, i32 188, i32 189, i32 190, i32 191>
12438 %7 = shufflevector <192 x i32> %6, <192 x i32> poison, <256 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95, i32 96, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 112, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127, i32 128, i32 129, i32 130, i32 131, i32 132, i32 133, i32 134, i32 135, i32 136, i32 137, i32 138, i32 139, i32 140, i32 141, i32 142, i32 143, i32 144, i32 145, i32 146, i32 147, i32 148, i32 149, i32 150, i32 151, i32 152, i32 153, i32 154, i32 155, i32 156, i32 157, i32 158, i32 159, i32 160, i32 161, i32 162, i32 163, i32 164, i32 165, i32 166, i32 167, i32 168, i32 169, i32 170, i32 171, i32 172, i32 173, i32 174, i32 175, i32 176, i32 177, i32 178, i32 179, i32 180, i32 181, i32 182, i32 183, i32 184, i32 185, i32 186, i32 187, i32 188, i32 189, i32 190, i32 191, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
12439 %8 = shufflevector <256 x i32> %4, <256 x i32> %7, <448 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95, i32 96, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 112, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127, i32 128, i32 129, i32 130, i32 131, i32 132, i32 133, i32 134, i32 135, i32 136, i32 137, i32 138, i32 139, i32 140, i32 141, i32 142, i32 143, i32 144, i32 145, i32 146, i32 147, i32 148, i32 149, i32 150, i32 151, i32 152, i32 153, i32 154, i32 155, i32 156, i32 157, i32 158, i32 159, i32 160, i32 161, i32 162, i32 163, i32 164, i32 165, i32 166, i32 167, i32 168, i32 169, i32 170, i32 171, i32 172, i32 173, i32 174, i32 175, i32 176, i32 177, i32 178, i32 179, i32 180, i32 181, i32 182, i32 183, i32 184, i32 185, i32 186, i32 187, i32 188, i32 189, i32 190, i32 191, i32 192, i32 193, i32 194, i32 195, i32 196, i32 197, i32 198, i32 199, i32 200, i32 201, i32 202, i32 203, i32 204, i32 205, i32 206, i32 207, i32 208, i32 209, i32 210, i32 211, i32 212, i32 213, i32 214, i32 215, i32 216, i32 217, i32 218, i32 219, i32 220, i32 221, i32 222, i32 223, i32 224, i32 225, i32 226, i32 227, i32 228, i32 229, i32 230, i32 231, i32 232, i32 233, i32 234, i32 235, i32 236, i32 237, i32 238, i32 239, i32 240, i32 241, i32 242, i32 243, i32 244, i32 245, i32 246, i32 247, i32 248, i32 249, i32 250, i32 251, i32 252, i32 253, i32 254, i32 255, i32 256, i32 257, i32 258, i32 259, i32 260, i32 261, i32 262, i32 263, i32 264, i32 265, i32 266, i32 267, i32 268, i32 269, i32 270, i32 271, i32 272, i32 273, i32 274, i32 275, i32 276, i32 277, i32 278, i32 279, i32 280, i32 281, i32 282, i32 283, i32 284, i32 285, i32 286, i32 287, i32 288, i32 289, i32 290, i32 291, i32 292, i32 293, i32 294, i32 295, i32 296, i32 297, i32 298, i32 299, i32 300, i32 301, i32 302, i32 303, i32 304, i32 305, i32 306, i32 307, i32 308, i32 309, i32 310, i32 311, i32 312, i32 313, i32 314, i32 315, i32 316, i32 317, i32 318, i32 319, i32 320, i32 321, i32 322, i32 323, i32 324, i32 325, i32 326, i32 327, i32 328, i32 329, i32 330, i32 331, i32 332, i32 333, i32 334, i32 335, i32 336, i32 337, i32 338, i32 339, i32 340, i32 341, i32 342, i32 343, i32 344, i32 345, i32 346, i32 347, i32 348, i32 349, i32 350, i32 351, i32 352, i32 353, i32 354, i32 355, i32 356, i32 357, i32 358, i32 359, i32 360, i32 361, i32 362, i32 363, i32 364, i32 365, i32 366, i32 367, i32 368, i32 369, i32 370, i32 371, i32 372, i32 373, i32 374, i32 375, i32 376, i32 377, i32 378, i32 379, i32 380, i32 381, i32 382, i32 383, i32 384, i32 385, i32 386, i32 387, i32 388, i32 389, i32 390, i32 391, i32 392, i32 393, i32 394, i32 395, i32 396, i32 397, i32 398, i32 399, i32 400, i32 401, i32 402, i32 403, i32 404, i32 405, i32 406, i32 407, i32 408, i32 409, i32 410, i32 411, i32 412, i32 413, i32 414, i32 415, i32 416, i32 417, i32 418, i32 419, i32 420, i32 421, i32 422, i32 423, i32 424, i32 425, i32 426, i32 427, i32 428, i32 429, i32 430, i32 431, i32 432, i32 433, i32 434, i32 435, i32 436, i32 437, i32 438, i32 439, i32 440, i32 441, i32 442, i32 443, i32 444, i32 445, i32 446, i32 447>
12440 %interleaved.vec = shufflevector <448 x i32> %8, <448 x i32> poison, <448 x i32> <i32 0, i32 64, i32 128, i32 192, i32 256, i32 320, i32 384, i32 1, i32 65, i32 129, i32 193, i32 257, i32 321, i32 385, i32 2, i32 66, i32 130, i32 194, i32 258, i32 322, i32 386, i32 3, i32 67, i32 131, i32 195, i32 259, i32 323, i32 387, i32 4, i32 68, i32 132, i32 196, i32 260, i32 324, i32 388, i32 5, i32 69, i32 133, i32 197, i32 261, i32 325, i32 389, i32 6, i32 70, i32 134, i32 198, i32 262, i32 326, i32 390, i32 7, i32 71, i32 135, i32 199, i32 263, i32 327, i32 391, i32 8, i32 72, i32 136, i32 200, i32 264, i32 328, i32 392, i32 9, i32 73, i32 137, i32 201, i32 265, i32 329, i32 393, i32 10, i32 74, i32 138, i32 202, i32 266, i32 330, i32 394, i32 11, i32 75, i32 139, i32 203, i32 267, i32 331, i32 395, i32 12, i32 76, i32 140, i32 204, i32 268, i32 332, i32 396, i32 13, i32 77, i32 141, i32 205, i32 269, i32 333, i32 397, i32 14, i32 78, i32 142, i32 206, i32 270, i32 334, i32 398, i32 15, i32 79, i32 143, i32 207, i32 271, i32 335, i32 399, i32 16, i32 80, i32 144, i32 208, i32 272, i32 336, i32 400, i32 17, i32 81, i32 145, i32 209, i32 273, i32 337, i32 401, i32 18, i32 82, i32 146, i32 210, i32 274, i32 338, i32 402, i32 19, i32 83, i32 147, i32 211, i32 275, i32 339, i32 403, i32 20, i32 84, i32 148, i32 212, i32 276, i32 340, i32 404, i32 21, i32 85, i32 149, i32 213, i32 277, i32 341, i32 405, i32 22, i32 86, i32 150, i32 214, i32 278, i32 342, i32 406, i32 23, i32 87, i32 151, i32 215, i32 279, i32 343, i32 407, i32 24, i32 88, i32 152, i32 216, i32 280, i32 344, i32 408, i32 25, i32 89, i32 153, i32 217, i32 281, i32 345, i32 409, i32 26, i32 90, i32 154, i32 218, i32 282, i32 346, i32 410, i32 27, i32 91, i32 155, i32 219, i32 283, i32 347, i32 411, i32 28, i32 92, i32 156, i32 220, i32 284, i32 348, i32 412, i32 29, i32 93, i32 157, i32 221, i32 285, i32 349, i32 413, i32 30, i32 94, i32 158, i32 222, i32 286, i32 350, i32 414, i32 31, i32 95, i32 159, i32 223, i32 287, i32 351, i32 415, i32 32, i32 96, i32 160, i32 224, i32 288, i32 352, i32 416, i32 33, i32 97, i32 161, i32 225, i32 289, i32 353, i32 417, i32 34, i32 98, i32 162, i32 226, i32 290, i32 354, i32 418, i32 35, i32 99, i32 163, i32 227, i32 291, i32 355, i32 419, i32 36, i32 100, i32 164, i32 228, i32 292, i32 356, i32 420, i32 37, i32 101, i32 165, i32 229, i32 293, i32 357, i32 421, i32 38, i32 102, i32 166, i32 230, i32 294, i32 358, i32 422, i32 39, i32 103, i32 167, i32 231, i32 295, i32 359, i32 423, i32 40, i32 104, i32 168, i32 232, i32 296, i32 360, i32 424, i32 41, i32 105, i32 169, i32 233, i32 297, i32 361, i32 425, i32 42, i32 106, i32 170, i32 234, i32 298, i32 362, i32 426, i32 43, i32 107, i32 171, i32 235, i32 299, i32 363, i32 427, i32 44, i32 108, i32 172, i32 236, i32 300, i32 364, i32 428, i32 45, i32 109, i32 173, i32 237, i32 301, i32 365, i32 429, i32 46, i32 110, i32 174, i32 238, i32 302, i32 366, i32 430, i32 47, i32 111, i32 175, i32 239, i32 303, i32 367, i32 431, i32 48, i32 112, i32 176, i32 240, i32 304, i32 368, i32 432, i32 49, i32 113, i32 177, i32 241, i32 305, i32 369, i32 433, i32 50, i32 114, i32 178, i32 242, i32 306, i32 370, i32 434, i32 51, i32 115, i32 179, i32 243, i32 307, i32 371, i32 435, i32 52, i32 116, i32 180, i32 244, i32 308, i32 372, i32 436, i32 53, i32 117, i32 181, i32 245, i32 309, i32 373, i32 437, i32 54, i32 118, i32 182, i32 246, i32 310, i32 374, i32 438, i32 55, i32 119, i32 183, i32 247, i32 311, i32 375, i32 439, i32 56, i32 120, i32 184, i32 248, i32 312, i32 376, i32 440, i32 57, i32 121, i32 185, i32 249, i32 313, i32 377, i32 441, i32 58, i32 122, i32 186, i32 250, i32 314, i32 378, i32 442, i32 59, i32 123, i32 187, i32 251, i32 315, i32 379, i32 443, i32 60, i32 124, i32 188, i32 252, i32 316, i32 380, i32 444, i32 61, i32 125, i32 189, i32 253, i32 317, i32 381, i32 445, i32 62, i32 126, i32 190, i32 254, i32 318, i32 382, i32 446, i32 63, i32 127, i32 191, i32 255, i32 319, i32 383, i32 447>
12441 store <448 x i32> %interleaved.vec, ptr %out.vec, align 64
12444 ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
12448 ; AVX2-ONLY: {{.*}}
12449 ; AVX512BW-FAST: {{.*}}
12450 ; AVX512BW-ONLY-FAST: {{.*}}
12451 ; AVX512BW-ONLY-SLOW: {{.*}}
12452 ; AVX512BW-SLOW: {{.*}}
12453 ; AVX512DQ-FAST: {{.*}}
12454 ; AVX512DQ-SLOW: {{.*}}
12455 ; AVX512DQBW-FAST: {{.*}}
12456 ; AVX512DQBW-SLOW: {{.*}}
12457 ; AVX512F-FAST: {{.*}}
12458 ; AVX512F-ONLY-FAST: {{.*}}
12459 ; AVX512F-ONLY-SLOW: {{.*}}
12460 ; AVX512F-SLOW: {{.*}}
12461 ; FALLBACK0: {{.*}}
12462 ; FALLBACK1: {{.*}}
12463 ; FALLBACK10: {{.*}}
12464 ; FALLBACK11: {{.*}}
12465 ; FALLBACK12: {{.*}}
12466 ; FALLBACK2: {{.*}}
12467 ; FALLBACK3: {{.*}}
12468 ; FALLBACK4: {{.*}}
12469 ; FALLBACK5: {{.*}}
12470 ; FALLBACK6: {{.*}}
12471 ; FALLBACK7: {{.*}}
12472 ; FALLBACK8: {{.*}}
12473 ; FALLBACK9: {{.*}}