1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+sse2 | FileCheck %s --check-prefixes=SSE,FALLBACK0
3 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1,AVX1-ONLY,FALLBACK1
4 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-SLOW,FALLBACK2
5 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST,FALLBACK3
6 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST-PERLANE,FALLBACK4
7 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
8 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
9 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
10 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
11 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
12 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
13 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
14 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
16 ; These patterns are produced by LoopVectorizer for interleaved stores.
18 define void @store_i32_stride7_vf2(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecptr2, ptr %in.vecptr3, ptr %in.vecptr4, ptr %in.vecptr5, ptr %in.vecptr6, ptr %out.vec) nounwind {
19 ; SSE-LABEL: store_i32_stride7_vf2:
21 ; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax
22 ; SSE-NEXT: movq {{[0-9]+}}(%rsp), %r10
23 ; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
24 ; SSE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
25 ; SSE-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
26 ; SSE-NEXT: movsd {{.*#+}} xmm3 = mem[0],zero
27 ; SSE-NEXT: movq {{.*#+}} xmm4 = mem[0],zero
28 ; SSE-NEXT: movsd {{.*#+}} xmm5 = mem[0],zero
29 ; SSE-NEXT: movsd {{.*#+}} xmm6 = mem[0],zero
30 ; SSE-NEXT: movaps %xmm6, %xmm7
31 ; SSE-NEXT: movlhps {{.*#+}} xmm7 = xmm7[0],xmm5[0]
32 ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm4[1,1,1,1]
33 ; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,1],xmm0[1,3]
34 ; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
35 ; SSE-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
36 ; SSE-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
37 ; SSE-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0]
38 ; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[1,1],xmm3[1,1]
39 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,3],xmm6[2,0]
40 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm7[3,3,3,3]
41 ; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,2],xmm4[0,2]
42 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1]
43 ; SSE-NEXT: movaps %xmm1, 32(%rax)
44 ; SSE-NEXT: movaps %xmm7, 16(%rax)
45 ; SSE-NEXT: movaps %xmm0, (%rax)
46 ; SSE-NEXT: movq %xmm2, 48(%rax)
49 ; AVX1-ONLY-LABEL: store_i32_stride7_vf2:
51 ; AVX1-ONLY-NEXT: movq {{[0-9]+}}(%rsp), %rax
52 ; AVX1-ONLY-NEXT: movq {{[0-9]+}}(%rsp), %r10
53 ; AVX1-ONLY-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
54 ; AVX1-ONLY-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
55 ; AVX1-ONLY-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero
56 ; AVX1-ONLY-NEXT: vmovq {{.*#+}} xmm3 = mem[0],zero
57 ; AVX1-ONLY-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm3[0],xmm2[0]
58 ; AVX1-ONLY-NEXT: vmovq {{.*#+}} xmm4 = mem[0],zero
59 ; AVX1-ONLY-NEXT: vmovq {{.*#+}} xmm5 = mem[0],zero
60 ; AVX1-ONLY-NEXT: vpunpcklqdq {{.*#+}} xmm4 = xmm5[0],xmm4[0]
61 ; AVX1-ONLY-NEXT: vmovsd {{.*#+}} xmm6 = mem[0],zero
62 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm6, %ymm0, %ymm0
63 ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
64 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm0[2,3,0,1]
65 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm6 = ymm1[3,0],ymm0[1,0],ymm1[7,4],ymm0[5,4]
66 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm6 = ymm6[0,2],ymm0[2,1],ymm6[4,6],ymm0[6,5]
67 ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm5 = xmm2[12,13,14,15],xmm5[0,1,2,3,4,5,6,7,8,9,10,11]
68 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm5 = xmm5[0,2],xmm4[3,3]
69 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm5 = xmm5[0,1,2],xmm6[3]
70 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm1[0,2],ymm0[0,3],ymm1[4,6],ymm0[4,7]
71 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm1
72 ; AVX1-ONLY-NEXT: vpermilps {{.*#+}} ymm1 = ymm1[u,u,0,2,u,u,u,5]
73 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm2[0,2],ymm1[2,3],ymm2[4,6],ymm1[6,7]
74 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6],ymm1[7]
75 ; AVX1-ONLY-NEXT: vmovaps %ymm0, (%rax)
76 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm6, %xmm0
77 ; AVX1-ONLY-NEXT: vmovlps %xmm0, 48(%rax)
78 ; AVX1-ONLY-NEXT: vmovaps %xmm5, 32(%rax)
79 ; AVX1-ONLY-NEXT: vzeroupper
80 ; AVX1-ONLY-NEXT: retq
82 ; AVX2-SLOW-LABEL: store_i32_stride7_vf2:
84 ; AVX2-SLOW-NEXT: movq {{[0-9]+}}(%rsp), %rax
85 ; AVX2-SLOW-NEXT: movq {{[0-9]+}}(%rsp), %r10
86 ; AVX2-SLOW-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
87 ; AVX2-SLOW-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
88 ; AVX2-SLOW-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
89 ; AVX2-SLOW-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero
90 ; AVX2-SLOW-NEXT: vmovlhps {{.*#+}} xmm2 = xmm3[0],xmm2[0]
91 ; AVX2-SLOW-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero
92 ; AVX2-SLOW-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
93 ; AVX2-SLOW-NEXT: vmovlhps {{.*#+}} xmm3 = xmm4[0],xmm3[0]
94 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
95 ; AVX2-SLOW-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero
96 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
97 ; AVX2-SLOW-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm3[0],ymm0[2],ymm3[2]
98 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,1,3]
99 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm1 = ymm2[0,2,2,1,4,6,6,5]
100 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,2,2,1]
101 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm3 = ymm0[0,2,2,3,4,6,6,7]
102 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[0,1,0,2]
103 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm3[4,5,6],ymm1[7]
104 ; AVX2-SLOW-NEXT: vmovaps {{.*#+}} xmm3 = <3,5,7,u>
105 ; AVX2-SLOW-NEXT: vpermps %ymm2, %ymm3, %ymm2
106 ; AVX2-SLOW-NEXT: vbroadcastf128 {{.*#+}} ymm3 = [3,5,0,1,3,5,0,1]
107 ; AVX2-SLOW-NEXT: # ymm3 = mem[0,1,0,1]
108 ; AVX2-SLOW-NEXT: vpermps %ymm0, %ymm3, %ymm0
109 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm2 = xmm2[0,1,2],xmm0[3]
110 ; AVX2-SLOW-NEXT: vextractf128 $1, %ymm0, %xmm0
111 ; AVX2-SLOW-NEXT: vmovlps %xmm0, 48(%rax)
112 ; AVX2-SLOW-NEXT: vmovaps %xmm2, 32(%rax)
113 ; AVX2-SLOW-NEXT: vmovaps %ymm1, (%rax)
114 ; AVX2-SLOW-NEXT: vzeroupper
115 ; AVX2-SLOW-NEXT: retq
117 ; AVX2-FAST-LABEL: store_i32_stride7_vf2:
118 ; AVX2-FAST: # %bb.0:
119 ; AVX2-FAST-NEXT: movq {{[0-9]+}}(%rsp), %rax
120 ; AVX2-FAST-NEXT: movq {{[0-9]+}}(%rsp), %r10
121 ; AVX2-FAST-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
122 ; AVX2-FAST-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
123 ; AVX2-FAST-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
124 ; AVX2-FAST-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero
125 ; AVX2-FAST-NEXT: vmovlhps {{.*#+}} xmm2 = xmm3[0],xmm2[0]
126 ; AVX2-FAST-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero
127 ; AVX2-FAST-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
128 ; AVX2-FAST-NEXT: vmovlhps {{.*#+}} xmm3 = xmm4[0],xmm3[0]
129 ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
130 ; AVX2-FAST-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero
131 ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
132 ; AVX2-FAST-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm3[0],ymm0[2],ymm3[2]
133 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,1,3]
134 ; AVX2-FAST-NEXT: vmovaps {{.*#+}} xmm1 = <3,5,7,u>
135 ; AVX2-FAST-NEXT: vpermps %ymm2, %ymm1, %ymm1
136 ; AVX2-FAST-NEXT: vbroadcastf128 {{.*#+}} ymm3 = [3,5,0,1,3,5,0,1]
137 ; AVX2-FAST-NEXT: # ymm3 = mem[0,1,0,1]
138 ; AVX2-FAST-NEXT: vpermps %ymm0, %ymm3, %ymm3
139 ; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1,2],xmm3[3]
140 ; AVX2-FAST-NEXT: vmovaps {{.*#+}} ymm4 = <0,2,4,6,u,u,u,1>
141 ; AVX2-FAST-NEXT: vpermps %ymm2, %ymm4, %ymm2
142 ; AVX2-FAST-NEXT: vbroadcastf128 {{.*#+}} ymm4 = [0,2,4,0,0,2,4,0]
143 ; AVX2-FAST-NEXT: # ymm4 = mem[0,1,0,1]
144 ; AVX2-FAST-NEXT: vpermps %ymm0, %ymm4, %ymm0
145 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6],ymm2[7]
146 ; AVX2-FAST-NEXT: vextractf128 $1, %ymm3, %xmm2
147 ; AVX2-FAST-NEXT: vmovlps %xmm2, 48(%rax)
148 ; AVX2-FAST-NEXT: vmovaps %ymm0, (%rax)
149 ; AVX2-FAST-NEXT: vmovaps %xmm1, 32(%rax)
150 ; AVX2-FAST-NEXT: vzeroupper
151 ; AVX2-FAST-NEXT: retq
153 ; AVX2-FAST-PERLANE-LABEL: store_i32_stride7_vf2:
154 ; AVX2-FAST-PERLANE: # %bb.0:
155 ; AVX2-FAST-PERLANE-NEXT: movq {{[0-9]+}}(%rsp), %rax
156 ; AVX2-FAST-PERLANE-NEXT: movq {{[0-9]+}}(%rsp), %r10
157 ; AVX2-FAST-PERLANE-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
158 ; AVX2-FAST-PERLANE-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
159 ; AVX2-FAST-PERLANE-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
160 ; AVX2-FAST-PERLANE-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero
161 ; AVX2-FAST-PERLANE-NEXT: vmovlhps {{.*#+}} xmm2 = xmm3[0],xmm2[0]
162 ; AVX2-FAST-PERLANE-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero
163 ; AVX2-FAST-PERLANE-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
164 ; AVX2-FAST-PERLANE-NEXT: vmovlhps {{.*#+}} xmm3 = xmm4[0],xmm3[0]
165 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
166 ; AVX2-FAST-PERLANE-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero
167 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
168 ; AVX2-FAST-PERLANE-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm3[0],ymm0[2],ymm3[2]
169 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,1,3]
170 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm1 = ymm2[0,2,2,1,4,6,6,5]
171 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,2,2,1]
172 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm3 = ymm0[0,2,2,3,4,6,6,7]
173 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[0,1,0,2]
174 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm3[4,5,6],ymm1[7]
175 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{.*#+}} xmm3 = <3,5,7,u>
176 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm2, %ymm3, %ymm2
177 ; AVX2-FAST-PERLANE-NEXT: vbroadcastf128 {{.*#+}} ymm3 = [3,5,0,1,3,5,0,1]
178 ; AVX2-FAST-PERLANE-NEXT: # ymm3 = mem[0,1,0,1]
179 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm0, %ymm3, %ymm0
180 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} xmm2 = xmm2[0,1,2],xmm0[3]
181 ; AVX2-FAST-PERLANE-NEXT: vextractf128 $1, %ymm0, %xmm0
182 ; AVX2-FAST-PERLANE-NEXT: vmovlps %xmm0, 48(%rax)
183 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm2, 32(%rax)
184 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm1, (%rax)
185 ; AVX2-FAST-PERLANE-NEXT: vzeroupper
186 ; AVX2-FAST-PERLANE-NEXT: retq
188 ; AVX512F-SLOW-LABEL: store_i32_stride7_vf2:
189 ; AVX512F-SLOW: # %bb.0:
190 ; AVX512F-SLOW-NEXT: movq {{[0-9]+}}(%rsp), %rax
191 ; AVX512F-SLOW-NEXT: movq {{[0-9]+}}(%rsp), %r10
192 ; AVX512F-SLOW-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
193 ; AVX512F-SLOW-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
194 ; AVX512F-SLOW-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero
195 ; AVX512F-SLOW-NEXT: vmovq {{.*#+}} xmm3 = mem[0],zero
196 ; AVX512F-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm3[0],xmm2[0]
197 ; AVX512F-SLOW-NEXT: vmovq {{.*#+}} xmm3 = mem[0],zero
198 ; AVX512F-SLOW-NEXT: vmovq {{.*#+}} xmm4 = mem[0],zero
199 ; AVX512F-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm3 = xmm4[0],xmm3[0]
200 ; AVX512F-SLOW-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm2
201 ; AVX512F-SLOW-NEXT: vmovq {{.*#+}} xmm3 = mem[0],zero
202 ; AVX512F-SLOW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
203 ; AVX512F-SLOW-NEXT: vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],ymm3[0],ymm0[2],ymm3[2]
204 ; AVX512F-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm1 = <0,2,4,6,16,20,18,1,3,5,7,17,21,19,u,u>
205 ; AVX512F-SLOW-NEXT: vpermi2d %zmm0, %zmm2, %zmm1
206 ; AVX512F-SLOW-NEXT: vextracti32x4 $2, %zmm1, 32(%rax)
207 ; AVX512F-SLOW-NEXT: vextracti32x4 $3, %zmm1, %xmm0
208 ; AVX512F-SLOW-NEXT: vmovq %xmm0, 48(%rax)
209 ; AVX512F-SLOW-NEXT: vmovdqa %ymm1, (%rax)
210 ; AVX512F-SLOW-NEXT: vzeroupper
211 ; AVX512F-SLOW-NEXT: retq
213 ; AVX512F-FAST-LABEL: store_i32_stride7_vf2:
214 ; AVX512F-FAST: # %bb.0:
215 ; AVX512F-FAST-NEXT: movq {{[0-9]+}}(%rsp), %rax
216 ; AVX512F-FAST-NEXT: movq {{[0-9]+}}(%rsp), %r10
217 ; AVX512F-FAST-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
218 ; AVX512F-FAST-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
219 ; AVX512F-FAST-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero
220 ; AVX512F-FAST-NEXT: vmovq {{.*#+}} xmm3 = mem[0],zero
221 ; AVX512F-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm3[0],xmm2[0]
222 ; AVX512F-FAST-NEXT: vmovq {{.*#+}} xmm3 = mem[0],zero
223 ; AVX512F-FAST-NEXT: vmovq {{.*#+}} xmm4 = mem[0],zero
224 ; AVX512F-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm3 = xmm4[0],xmm3[0]
225 ; AVX512F-FAST-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm2
226 ; AVX512F-FAST-NEXT: vmovq {{.*#+}} xmm3 = mem[0],zero
227 ; AVX512F-FAST-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
228 ; AVX512F-FAST-NEXT: vmovdqa {{.*#+}} ymm1 = <0,2,4,u>
229 ; AVX512F-FAST-NEXT: vpermi2q %ymm3, %ymm0, %ymm1
230 ; AVX512F-FAST-NEXT: vinserti64x4 $1, %ymm1, %zmm2, %zmm0
231 ; AVX512F-FAST-NEXT: vmovdqa64 {{.*#+}} zmm1 = <0,2,4,6,8,10,12,1,3,5,7,9,11,13,u,u>
232 ; AVX512F-FAST-NEXT: vpermd %zmm0, %zmm1, %zmm0
233 ; AVX512F-FAST-NEXT: vextracti32x4 $2, %zmm0, 32(%rax)
234 ; AVX512F-FAST-NEXT: vextracti32x4 $3, %zmm0, %xmm1
235 ; AVX512F-FAST-NEXT: vmovq %xmm1, 48(%rax)
236 ; AVX512F-FAST-NEXT: vmovdqa %ymm0, (%rax)
237 ; AVX512F-FAST-NEXT: vzeroupper
238 ; AVX512F-FAST-NEXT: retq
240 ; AVX512BW-SLOW-LABEL: store_i32_stride7_vf2:
241 ; AVX512BW-SLOW: # %bb.0:
242 ; AVX512BW-SLOW-NEXT: movq {{[0-9]+}}(%rsp), %rax
243 ; AVX512BW-SLOW-NEXT: movq {{[0-9]+}}(%rsp), %r10
244 ; AVX512BW-SLOW-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
245 ; AVX512BW-SLOW-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
246 ; AVX512BW-SLOW-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero
247 ; AVX512BW-SLOW-NEXT: vmovq {{.*#+}} xmm3 = mem[0],zero
248 ; AVX512BW-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm3[0],xmm2[0]
249 ; AVX512BW-SLOW-NEXT: vmovq {{.*#+}} xmm3 = mem[0],zero
250 ; AVX512BW-SLOW-NEXT: vmovq {{.*#+}} xmm4 = mem[0],zero
251 ; AVX512BW-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm3 = xmm4[0],xmm3[0]
252 ; AVX512BW-SLOW-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm2
253 ; AVX512BW-SLOW-NEXT: vmovq {{.*#+}} xmm3 = mem[0],zero
254 ; AVX512BW-SLOW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
255 ; AVX512BW-SLOW-NEXT: vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],ymm3[0],ymm0[2],ymm3[2]
256 ; AVX512BW-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm1 = <0,2,4,6,16,20,18,1,3,5,7,17,21,19,u,u>
257 ; AVX512BW-SLOW-NEXT: vpermi2d %zmm0, %zmm2, %zmm1
258 ; AVX512BW-SLOW-NEXT: vextracti32x4 $2, %zmm1, 32(%rax)
259 ; AVX512BW-SLOW-NEXT: vextracti32x4 $3, %zmm1, %xmm0
260 ; AVX512BW-SLOW-NEXT: vmovq %xmm0, 48(%rax)
261 ; AVX512BW-SLOW-NEXT: vmovdqa %ymm1, (%rax)
262 ; AVX512BW-SLOW-NEXT: vzeroupper
263 ; AVX512BW-SLOW-NEXT: retq
265 ; AVX512BW-FAST-LABEL: store_i32_stride7_vf2:
266 ; AVX512BW-FAST: # %bb.0:
267 ; AVX512BW-FAST-NEXT: movq {{[0-9]+}}(%rsp), %rax
268 ; AVX512BW-FAST-NEXT: movq {{[0-9]+}}(%rsp), %r10
269 ; AVX512BW-FAST-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
270 ; AVX512BW-FAST-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
271 ; AVX512BW-FAST-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero
272 ; AVX512BW-FAST-NEXT: vmovq {{.*#+}} xmm3 = mem[0],zero
273 ; AVX512BW-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm3[0],xmm2[0]
274 ; AVX512BW-FAST-NEXT: vmovq {{.*#+}} xmm3 = mem[0],zero
275 ; AVX512BW-FAST-NEXT: vmovq {{.*#+}} xmm4 = mem[0],zero
276 ; AVX512BW-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm3 = xmm4[0],xmm3[0]
277 ; AVX512BW-FAST-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm2
278 ; AVX512BW-FAST-NEXT: vmovq {{.*#+}} xmm3 = mem[0],zero
279 ; AVX512BW-FAST-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
280 ; AVX512BW-FAST-NEXT: vmovdqa {{.*#+}} ymm1 = <0,2,4,u>
281 ; AVX512BW-FAST-NEXT: vpermi2q %ymm3, %ymm0, %ymm1
282 ; AVX512BW-FAST-NEXT: vinserti64x4 $1, %ymm1, %zmm2, %zmm0
283 ; AVX512BW-FAST-NEXT: vmovdqa64 {{.*#+}} zmm1 = <0,2,4,6,8,10,12,1,3,5,7,9,11,13,u,u>
284 ; AVX512BW-FAST-NEXT: vpermd %zmm0, %zmm1, %zmm0
285 ; AVX512BW-FAST-NEXT: vextracti32x4 $2, %zmm0, 32(%rax)
286 ; AVX512BW-FAST-NEXT: vextracti32x4 $3, %zmm0, %xmm1
287 ; AVX512BW-FAST-NEXT: vmovq %xmm1, 48(%rax)
288 ; AVX512BW-FAST-NEXT: vmovdqa %ymm0, (%rax)
289 ; AVX512BW-FAST-NEXT: vzeroupper
290 ; AVX512BW-FAST-NEXT: retq
291 %in.vec0 = load <2 x i32>, ptr %in.vecptr0, align 64
292 %in.vec1 = load <2 x i32>, ptr %in.vecptr1, align 64
293 %in.vec2 = load <2 x i32>, ptr %in.vecptr2, align 64
294 %in.vec3 = load <2 x i32>, ptr %in.vecptr3, align 64
295 %in.vec4 = load <2 x i32>, ptr %in.vecptr4, align 64
296 %in.vec5 = load <2 x i32>, ptr %in.vecptr5, align 64
297 %in.vec6 = load <2 x i32>, ptr %in.vecptr6, align 64
298 %1 = shufflevector <2 x i32> %in.vec0, <2 x i32> %in.vec1, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
299 %2 = shufflevector <2 x i32> %in.vec2, <2 x i32> %in.vec3, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
300 %3 = shufflevector <2 x i32> %in.vec4, <2 x i32> %in.vec5, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
301 %4 = shufflevector <4 x i32> %1, <4 x i32> %2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
302 %5 = shufflevector <2 x i32> %in.vec6, <2 x i32> poison, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
303 %6 = shufflevector <4 x i32> %3, <4 x i32> %5, <6 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5>
304 %7 = shufflevector <6 x i32> %6, <6 x i32> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 undef, i32 undef>
305 %8 = shufflevector <8 x i32> %4, <8 x i32> %7, <14 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13>
306 %interleaved.vec = shufflevector <14 x i32> %8, <14 x i32> poison, <14 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13>
307 store <14 x i32> %interleaved.vec, ptr %out.vec, align 64
311 define void @store_i32_stride7_vf4(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecptr2, ptr %in.vecptr3, ptr %in.vecptr4, ptr %in.vecptr5, ptr %in.vecptr6, ptr %out.vec) nounwind {
312 ; SSE-LABEL: store_i32_stride7_vf4:
314 ; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax
315 ; SSE-NEXT: movq {{[0-9]+}}(%rsp), %r10
316 ; SSE-NEXT: movaps (%rdi), %xmm0
317 ; SSE-NEXT: movaps (%rsi), %xmm6
318 ; SSE-NEXT: movaps (%rdx), %xmm5
319 ; SSE-NEXT: movaps (%rcx), %xmm1
320 ; SSE-NEXT: movaps (%r8), %xmm4
321 ; SSE-NEXT: movaps (%r9), %xmm2
322 ; SSE-NEXT: movaps (%r10), %xmm8
323 ; SSE-NEXT: movaps %xmm5, %xmm7
324 ; SSE-NEXT: unpcklps {{.*#+}} xmm7 = xmm7[0],xmm1[0],xmm7[1],xmm1[1]
325 ; SSE-NEXT: movaps %xmm0, %xmm3
326 ; SSE-NEXT: unpcklps {{.*#+}} xmm3 = xmm3[0],xmm6[0],xmm3[1],xmm6[1]
327 ; SSE-NEXT: movlhps {{.*#+}} xmm3 = xmm3[0],xmm7[0]
328 ; SSE-NEXT: movaps %xmm4, %xmm9
329 ; SSE-NEXT: unpckhps {{.*#+}} xmm9 = xmm9[2],xmm2[2],xmm9[3],xmm2[3]
330 ; SSE-NEXT: movaps %xmm5, %xmm7
331 ; SSE-NEXT: unpckhps {{.*#+}} xmm7 = xmm7[2],xmm1[2],xmm7[3],xmm1[3]
332 ; SSE-NEXT: movlhps {{.*#+}} xmm7 = xmm7[0],xmm9[0]
333 ; SSE-NEXT: movaps %xmm8, %xmm9
334 ; SSE-NEXT: shufps {{.*#+}} xmm9 = xmm9[3,3],xmm2[3,3]
335 ; SSE-NEXT: movaps %xmm4, %xmm10
336 ; SSE-NEXT: shufps {{.*#+}} xmm10 = xmm10[1,1],xmm1[1,1]
337 ; SSE-NEXT: unpckhps {{.*#+}} xmm1 = xmm1[2],xmm4[2],xmm1[3],xmm4[3]
338 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,3],xmm9[2,0]
339 ; SSE-NEXT: movaps %xmm0, %xmm9
340 ; SSE-NEXT: unpckhps {{.*#+}} xmm9 = xmm9[2],xmm6[2],xmm9[3],xmm6[3]
341 ; SSE-NEXT: unpcklps {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1]
342 ; SSE-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm8[0],xmm2[1],xmm8[1]
343 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[2,3],xmm9[0,1]
344 ; SSE-NEXT: movaps %xmm6, %xmm9
345 ; SSE-NEXT: unpcklps {{.*#+}} xmm9 = xmm9[0],xmm5[0],xmm9[1],xmm5[1]
346 ; SSE-NEXT: shufps {{.*#+}} xmm9 = xmm9[2,3],xmm10[2,0]
347 ; SSE-NEXT: pshufd {{.*#+}} xmm10 = xmm8[2,3,2,3]
348 ; SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[0,1],xmm0[1,3]
349 ; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,1],xmm8[0,2]
350 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[3,3],xmm6[3,3]
351 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,3],xmm5[2,0]
352 ; SSE-NEXT: movss {{.*#+}} xmm0 = xmm10[0],xmm0[1,2,3]
353 ; SSE-NEXT: movaps %xmm4, 16(%rax)
354 ; SSE-NEXT: movaps %xmm9, 32(%rax)
355 ; SSE-NEXT: movaps %xmm2, 48(%rax)
356 ; SSE-NEXT: movaps %xmm1, 96(%rax)
357 ; SSE-NEXT: movaps %xmm7, 64(%rax)
358 ; SSE-NEXT: movaps %xmm3, (%rax)
359 ; SSE-NEXT: movaps %xmm0, 80(%rax)
362 ; AVX1-ONLY-LABEL: store_i32_stride7_vf4:
363 ; AVX1-ONLY: # %bb.0:
364 ; AVX1-ONLY-NEXT: movq {{[0-9]+}}(%rsp), %rax
365 ; AVX1-ONLY-NEXT: movq {{[0-9]+}}(%rsp), %r10
366 ; AVX1-ONLY-NEXT: vmovaps (%rdi), %xmm5
367 ; AVX1-ONLY-NEXT: vmovaps (%rsi), %xmm6
368 ; AVX1-ONLY-NEXT: vmovaps (%rdx), %xmm3
369 ; AVX1-ONLY-NEXT: vmovaps (%rcx), %xmm4
370 ; AVX1-ONLY-NEXT: vmovaps (%r8), %xmm1
371 ; AVX1-ONLY-NEXT: vmovaps (%r9), %xmm2
372 ; AVX1-ONLY-NEXT: vmovaps (%r10), %xmm0
373 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm6, %ymm5, %ymm7
374 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm8
375 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm9
376 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm10 = ymm9[1,0],ymm8[1,0],ymm9[5,4],ymm8[5,4]
377 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm10 = ymm10[2,0],ymm8[2,1],ymm10[6,4],ymm8[6,5]
378 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm5, %ymm6, %ymm5
379 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} ymm6 = ymm7[1],ymm5[1],ymm7[3],ymm5[3]
380 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm6 = ymm5[1,1],ymm6[2,0],ymm5[5,5],ymm6[6,4]
381 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm11 = xmm3[1,1],xmm4[1,1]
382 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0],ymm11[1,2],ymm6[3,4,5,6,7]
383 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2],ymm10[3,4,5],ymm6[6,7]
384 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm10 = ymm5[3,3],ymm7[3,3],ymm5[7,7],ymm7[7,7]
385 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm11
386 ; AVX1-ONLY-NEXT: vunpckhps {{.*#+}} ymm11 = ymm3[2],ymm11[2],ymm3[3],ymm11[3],ymm3[6],ymm11[6],ymm3[7],ymm11[7]
387 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm10 = ymm11[0,1,2,3,4],ymm10[5,6],ymm11[7]
388 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm11 = xmm2[1],xmm1[1]
389 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm9 = ymm9[2,1],ymm11[2,0],ymm9[6,5],ymm11[6,4]
390 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm9 = ymm10[0,1],ymm9[2,3,4],ymm10[5,6,7]
391 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm10
392 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} ymm8 = ymm10[0],ymm8[0],ymm10[1],ymm8[1],ymm10[4],ymm8[4],ymm10[5],ymm8[5]
393 ; AVX1-ONLY-NEXT: vbroadcastss (%r10), %ymm10
394 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1,2,3,4,5],ymm10[6,7]
395 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} ymm5 = ymm7[0],ymm5[0],ymm7[1],ymm5[1],ymm7[4],ymm5[4],ymm7[5],ymm5[5]
396 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm3 = xmm4[0,0],xmm3[0,0]
397 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm3 = xmm3[0,1,2,0]
398 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm3 = ymm5[0,1],ymm3[2,3],ymm5[4,5,6,7]
399 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm8[4,5,6],ymm3[7]
400 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm1[3,3],xmm2[3,3]
401 ; AVX1-ONLY-NEXT: vbroadcastss 12(%rcx), %xmm2
402 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm2[0],xmm1[1,2,3]
403 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3]
404 ; AVX1-ONLY-NEXT: vmovaps %xmm0, 96(%rax)
405 ; AVX1-ONLY-NEXT: vmovaps %ymm3, (%rax)
406 ; AVX1-ONLY-NEXT: vmovaps %ymm9, 64(%rax)
407 ; AVX1-ONLY-NEXT: vmovaps %ymm6, 32(%rax)
408 ; AVX1-ONLY-NEXT: vzeroupper
409 ; AVX1-ONLY-NEXT: retq
411 ; AVX2-SLOW-LABEL: store_i32_stride7_vf4:
412 ; AVX2-SLOW: # %bb.0:
413 ; AVX2-SLOW-NEXT: movq {{[0-9]+}}(%rsp), %rax
414 ; AVX2-SLOW-NEXT: movq {{[0-9]+}}(%rsp), %r10
415 ; AVX2-SLOW-NEXT: vmovaps (%rdi), %xmm4
416 ; AVX2-SLOW-NEXT: vmovaps (%rdx), %xmm5
417 ; AVX2-SLOW-NEXT: vmovaps (%rcx), %xmm0
418 ; AVX2-SLOW-NEXT: vmovaps (%r8), %xmm2
419 ; AVX2-SLOW-NEXT: vmovaps (%r9), %xmm3
420 ; AVX2-SLOW-NEXT: vmovaps (%r10), %xmm1
421 ; AVX2-SLOW-NEXT: vinsertf128 $1, (%rsi), %ymm4, %ymm4
422 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm0, %ymm5, %ymm6
423 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm7
424 ; AVX2-SLOW-NEXT: vbroadcastf128 {{.*#+}} ymm8 = [2,6,0,3,2,6,0,3]
425 ; AVX2-SLOW-NEXT: # ymm8 = mem[0,1,0,1]
426 ; AVX2-SLOW-NEXT: vpermps %ymm6, %ymm8, %ymm8
427 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm9 = ymm4[3,3,3,3,7,7,7,7]
428 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm9 = ymm9[0,1,0,2]
429 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1,2,3,4],ymm9[5,6],ymm8[7]
430 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm9
431 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm9 = ymm9[2],ymm3[2],ymm9[3],ymm3[3],ymm9[6],ymm3[6],ymm9[7],ymm3[7]
432 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm9 = ymm9[0,1,0,1,4,5,4,5]
433 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1],ymm9[2,3,4],ymm8[5,6,7]
434 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm9
435 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm10 = ymm7[1,1,1,1,5,5,5,5]
436 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm9 = ymm10[0,1,2,3,4],ymm9[5],ymm10[6,7]
437 ; AVX2-SLOW-NEXT: vinsertps {{.*#+}} xmm5 = zero,xmm5[1],xmm0[1],zero
438 ; AVX2-SLOW-NEXT: vbroadcastf128 {{.*#+}} ymm10 = [5,0,2,6,5,0,2,6]
439 ; AVX2-SLOW-NEXT: # ymm10 = mem[0,1,0,1]
440 ; AVX2-SLOW-NEXT: vpermps %ymm4, %ymm10, %ymm10
441 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm5 = ymm10[0],ymm5[1,2],ymm10[3,4,5,6,7]
442 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2],ymm9[3,4,5],ymm5[6,7]
443 ; AVX2-SLOW-NEXT: vmovddup {{.*#+}} xmm9 = [0,4,0,4]
444 ; AVX2-SLOW-NEXT: # xmm9 = mem[0,0]
445 ; AVX2-SLOW-NEXT: vpermps %ymm6, %ymm9, %ymm6
446 ; AVX2-SLOW-NEXT: vbroadcastf128 {{.*#+}} ymm9 = [0,4,0,1,0,4,0,1]
447 ; AVX2-SLOW-NEXT: # ymm9 = mem[0,1,0,1]
448 ; AVX2-SLOW-NEXT: vpermps %ymm4, %ymm9, %ymm4
449 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm6[2,3],ymm4[4,5,6,7]
450 ; AVX2-SLOW-NEXT: vbroadcastsd {{.*#+}} ymm6 = [0,4,0,4,0,4,0,4]
451 ; AVX2-SLOW-NEXT: vpermps %ymm7, %ymm6, %ymm6
452 ; AVX2-SLOW-NEXT: vbroadcastss (%r10), %ymm7
453 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3,4,5],ymm7[6,7]
454 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm6[4,5,6],ymm4[7]
455 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm2 = xmm2[3,3],xmm3[3,3]
456 ; AVX2-SLOW-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[3],xmm2[1,2],zero
457 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3]
458 ; AVX2-SLOW-NEXT: vmovaps %xmm0, 96(%rax)
459 ; AVX2-SLOW-NEXT: vmovaps %ymm4, (%rax)
460 ; AVX2-SLOW-NEXT: vmovaps %ymm5, 32(%rax)
461 ; AVX2-SLOW-NEXT: vmovaps %ymm8, 64(%rax)
462 ; AVX2-SLOW-NEXT: vzeroupper
463 ; AVX2-SLOW-NEXT: retq
465 ; AVX2-FAST-LABEL: store_i32_stride7_vf4:
466 ; AVX2-FAST: # %bb.0:
467 ; AVX2-FAST-NEXT: movq {{[0-9]+}}(%rsp), %rax
468 ; AVX2-FAST-NEXT: movq {{[0-9]+}}(%rsp), %r10
469 ; AVX2-FAST-NEXT: vmovaps (%rdi), %xmm2
470 ; AVX2-FAST-NEXT: vmovaps (%rdx), %xmm3
471 ; AVX2-FAST-NEXT: vmovaps (%rcx), %xmm1
472 ; AVX2-FAST-NEXT: vmovaps (%r8), %xmm4
473 ; AVX2-FAST-NEXT: vmovaps (%r9), %xmm5
474 ; AVX2-FAST-NEXT: vmovaps (%r10), %xmm0
475 ; AVX2-FAST-NEXT: vinsertf128 $1, (%rsi), %ymm2, %ymm2
476 ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm1, %ymm3, %ymm3
477 ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm5, %ymm4, %ymm6
478 ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm7
479 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm8 = ymm6[1,1,1,1,5,5,5,5]
480 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm7 = ymm8[0,1,2,3,4],ymm7[5],ymm8[6,7]
481 ; AVX2-FAST-NEXT: vbroadcastsd {{.*#+}} ymm8 = [5,1,5,1,5,1,5,1]
482 ; AVX2-FAST-NEXT: vpermps %ymm3, %ymm8, %ymm8
483 ; AVX2-FAST-NEXT: vbroadcastf128 {{.*#+}} ymm9 = [5,0,2,6,5,0,2,6]
484 ; AVX2-FAST-NEXT: # ymm9 = mem[0,1,0,1]
485 ; AVX2-FAST-NEXT: vpermps %ymm2, %ymm9, %ymm9
486 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm8 = ymm9[0],ymm8[1,2],ymm9[3,4,5,6,7]
487 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm7 = ymm8[0,1,2],ymm7[3,4,5],ymm8[6,7]
488 ; AVX2-FAST-NEXT: vbroadcastsd {{.*#+}} ymm8 = [7,3,7,3,7,3,7,3]
489 ; AVX2-FAST-NEXT: vpermps %ymm2, %ymm8, %ymm8
490 ; AVX2-FAST-NEXT: vbroadcastf128 {{.*#+}} ymm9 = [2,6,0,3,2,6,0,3]
491 ; AVX2-FAST-NEXT: # ymm9 = mem[0,1,0,1]
492 ; AVX2-FAST-NEXT: vpermps %ymm3, %ymm9, %ymm9
493 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm8 = ymm9[0,1,2,3,4],ymm8[5,6],ymm9[7]
494 ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm0, %ymm4, %ymm4
495 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm4 = ymm4[2],ymm5[2],ymm4[3],ymm5[3],ymm4[6],ymm5[6],ymm4[7],ymm5[7]
496 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm4 = ymm4[0,1,0,1,4,5,4,5]
497 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm4 = ymm8[0,1],ymm4[2,3,4],ymm8[5,6,7]
498 ; AVX2-FAST-NEXT: vmovddup {{.*#+}} xmm5 = [0,4,0,4]
499 ; AVX2-FAST-NEXT: # xmm5 = mem[0,0]
500 ; AVX2-FAST-NEXT: vpermps %ymm3, %ymm5, %ymm3
501 ; AVX2-FAST-NEXT: vbroadcastf128 {{.*#+}} ymm5 = [0,4,0,1,0,4,0,1]
502 ; AVX2-FAST-NEXT: # ymm5 = mem[0,1,0,1]
503 ; AVX2-FAST-NEXT: vpermps %ymm2, %ymm5, %ymm2
504 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm3[2,3],ymm2[4,5,6,7]
505 ; AVX2-FAST-NEXT: vbroadcastsd {{.*#+}} ymm3 = [0,4,0,4,0,4,0,4]
506 ; AVX2-FAST-NEXT: vpermps %ymm6, %ymm3, %ymm3
507 ; AVX2-FAST-NEXT: vbroadcastss (%r10), %ymm5
508 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5],ymm5[6,7]
509 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6],ymm2[7]
510 ; AVX2-FAST-NEXT: vbroadcastsd {{.*#+}} ymm3 = [7,3,7,3,7,3,7,3]
511 ; AVX2-FAST-NEXT: vpermps %ymm6, %ymm3, %ymm3
512 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm1 = xmm1[3,3,3,3]
513 ; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0],xmm3[1,2,3]
514 ; AVX2-FAST-NEXT: vmovaps %ymm2, (%rax)
515 ; AVX2-FAST-NEXT: vmovaps %ymm4, 64(%rax)
516 ; AVX2-FAST-NEXT: vmovaps %ymm7, 32(%rax)
517 ; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3]
518 ; AVX2-FAST-NEXT: vmovaps %xmm0, 96(%rax)
519 ; AVX2-FAST-NEXT: vzeroupper
520 ; AVX2-FAST-NEXT: retq
522 ; AVX2-FAST-PERLANE-LABEL: store_i32_stride7_vf4:
523 ; AVX2-FAST-PERLANE: # %bb.0:
524 ; AVX2-FAST-PERLANE-NEXT: movq {{[0-9]+}}(%rsp), %rax
525 ; AVX2-FAST-PERLANE-NEXT: movq {{[0-9]+}}(%rsp), %r10
526 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rdi), %xmm4
527 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rdx), %xmm5
528 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rcx), %xmm0
529 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%r8), %xmm2
530 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%r9), %xmm3
531 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%r10), %xmm1
532 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, (%rsi), %ymm4, %ymm4
533 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm0, %ymm5, %ymm6
534 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm7
535 ; AVX2-FAST-PERLANE-NEXT: vbroadcastf128 {{.*#+}} ymm8 = [2,6,0,3,2,6,0,3]
536 ; AVX2-FAST-PERLANE-NEXT: # ymm8 = mem[0,1,0,1]
537 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm6, %ymm8, %ymm8
538 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm9 = ymm4[3,3,3,3,7,7,7,7]
539 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm9 = ymm9[0,1,0,2]
540 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1,2,3,4],ymm9[5,6],ymm8[7]
541 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm9
542 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm9 = ymm9[2],ymm3[2],ymm9[3],ymm3[3],ymm9[6],ymm3[6],ymm9[7],ymm3[7]
543 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm9 = ymm9[0,1,0,1,4,5,4,5]
544 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1],ymm9[2,3,4],ymm8[5,6,7]
545 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm9
546 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm10 = ymm7[1,1,1,1,5,5,5,5]
547 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm9 = ymm10[0,1,2,3,4],ymm9[5],ymm10[6,7]
548 ; AVX2-FAST-PERLANE-NEXT: vinsertps {{.*#+}} xmm5 = zero,xmm5[1],xmm0[1],zero
549 ; AVX2-FAST-PERLANE-NEXT: vbroadcastf128 {{.*#+}} ymm10 = [5,0,2,6,5,0,2,6]
550 ; AVX2-FAST-PERLANE-NEXT: # ymm10 = mem[0,1,0,1]
551 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm4, %ymm10, %ymm10
552 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm5 = ymm10[0],ymm5[1,2],ymm10[3,4,5,6,7]
553 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2],ymm9[3,4,5],ymm5[6,7]
554 ; AVX2-FAST-PERLANE-NEXT: vmovddup {{.*#+}} xmm9 = [0,4,0,4]
555 ; AVX2-FAST-PERLANE-NEXT: # xmm9 = mem[0,0]
556 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm6, %ymm9, %ymm6
557 ; AVX2-FAST-PERLANE-NEXT: vbroadcastf128 {{.*#+}} ymm9 = [0,4,0,1,0,4,0,1]
558 ; AVX2-FAST-PERLANE-NEXT: # ymm9 = mem[0,1,0,1]
559 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm4, %ymm9, %ymm4
560 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm6[2,3],ymm4[4,5,6,7]
561 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd {{.*#+}} ymm6 = [0,4,0,4,0,4,0,4]
562 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm7, %ymm6, %ymm6
563 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss (%r10), %ymm7
564 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3,4,5],ymm7[6,7]
565 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm6[4,5,6],ymm4[7]
566 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm2 = xmm2[3,3],xmm3[3,3]
567 ; AVX2-FAST-PERLANE-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[3],xmm2[1,2],zero
568 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3]
569 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm0, 96(%rax)
570 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm4, (%rax)
571 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm5, 32(%rax)
572 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm8, 64(%rax)
573 ; AVX2-FAST-PERLANE-NEXT: vzeroupper
574 ; AVX2-FAST-PERLANE-NEXT: retq
576 ; AVX512-LABEL: store_i32_stride7_vf4:
578 ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax
579 ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r10
580 ; AVX512-NEXT: vmovdqa (%rdi), %xmm0
581 ; AVX512-NEXT: vmovdqa (%rdx), %xmm1
582 ; AVX512-NEXT: vmovdqa (%r8), %xmm2
583 ; AVX512-NEXT: vinserti128 $1, (%rcx), %ymm1, %ymm1
584 ; AVX512-NEXT: vinserti128 $1, (%rsi), %ymm0, %ymm0
585 ; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
586 ; AVX512-NEXT: vinserti128 $1, (%r9), %ymm2, %ymm1
587 ; AVX512-NEXT: vinserti32x4 $2, (%r10), %zmm1, %zmm1
588 ; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,4,8,12,16,20,24,1,5,9,13,17,21,25,2,6]
589 ; AVX512-NEXT: vpermi2d %zmm1, %zmm0, %zmm2
590 ; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm3 = <10,14,18,22,26,3,7,11,15,19,23,27,u,u,u,u>
591 ; AVX512-NEXT: vpermi2d %zmm1, %zmm0, %zmm3
592 ; AVX512-NEXT: vextracti32x4 $2, %zmm3, 96(%rax)
593 ; AVX512-NEXT: vmovdqa64 %zmm2, (%rax)
594 ; AVX512-NEXT: vmovdqa %ymm3, 64(%rax)
595 ; AVX512-NEXT: vzeroupper
597 %in.vec0 = load <4 x i32>, ptr %in.vecptr0, align 64
598 %in.vec1 = load <4 x i32>, ptr %in.vecptr1, align 64
599 %in.vec2 = load <4 x i32>, ptr %in.vecptr2, align 64
600 %in.vec3 = load <4 x i32>, ptr %in.vecptr3, align 64
601 %in.vec4 = load <4 x i32>, ptr %in.vecptr4, align 64
602 %in.vec5 = load <4 x i32>, ptr %in.vecptr5, align 64
603 %in.vec6 = load <4 x i32>, ptr %in.vecptr6, align 64
604 %1 = shufflevector <4 x i32> %in.vec0, <4 x i32> %in.vec1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
605 %2 = shufflevector <4 x i32> %in.vec2, <4 x i32> %in.vec3, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
606 %3 = shufflevector <4 x i32> %in.vec4, <4 x i32> %in.vec5, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
607 %4 = shufflevector <8 x i32> %1, <8 x i32> %2, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
608 %5 = shufflevector <4 x i32> %in.vec6, <4 x i32> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
609 %6 = shufflevector <8 x i32> %3, <8 x i32> %5, <12 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11>
610 %7 = shufflevector <12 x i32> %6, <12 x i32> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 undef, i32 undef, i32 undef, i32 undef>
611 %8 = shufflevector <16 x i32> %4, <16 x i32> %7, <28 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27>
612 %interleaved.vec = shufflevector <28 x i32> %8, <28 x i32> poison, <28 x i32> <i32 0, i32 4, i32 8, i32 12, i32 16, i32 20, i32 24, i32 1, i32 5, i32 9, i32 13, i32 17, i32 21, i32 25, i32 2, i32 6, i32 10, i32 14, i32 18, i32 22, i32 26, i32 3, i32 7, i32 11, i32 15, i32 19, i32 23, i32 27>
613 store <28 x i32> %interleaved.vec, ptr %out.vec, align 64
617 define void @store_i32_stride7_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecptr2, ptr %in.vecptr3, ptr %in.vecptr4, ptr %in.vecptr5, ptr %in.vecptr6, ptr %out.vec) nounwind {
618 ; SSE-LABEL: store_i32_stride7_vf8:
620 ; SSE-NEXT: subq $104, %rsp
621 ; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax
622 ; SSE-NEXT: movdqa (%rdi), %xmm8
623 ; SSE-NEXT: movdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
624 ; SSE-NEXT: movdqa 16(%rdi), %xmm14
625 ; SSE-NEXT: movdqa (%rsi), %xmm3
626 ; SSE-NEXT: movdqa 16(%rsi), %xmm5
627 ; SSE-NEXT: movdqa 16(%rdx), %xmm13
628 ; SSE-NEXT: movdqa 16(%rcx), %xmm9
629 ; SSE-NEXT: movdqa 16(%r8), %xmm11
630 ; SSE-NEXT: movdqa %xmm11, (%rsp) # 16-byte Spill
631 ; SSE-NEXT: movdqa (%r9), %xmm4
632 ; SSE-NEXT: movaps 16(%r9), %xmm1
633 ; SSE-NEXT: movdqa (%rax), %xmm7
634 ; SSE-NEXT: movdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
635 ; SSE-NEXT: movaps 16(%rax), %xmm12
636 ; SSE-NEXT: movaps %xmm12, %xmm0
637 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3],xmm1[3,3]
638 ; SSE-NEXT: movaps %xmm1, %xmm6
639 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
640 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm11[3,3,3,3]
641 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm9[3,3,3,3]
642 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
643 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,0]
644 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
645 ; SSE-NEXT: movdqa %xmm11, %xmm0
646 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm9[1,1]
647 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm13[1,1,1,1]
648 ; SSE-NEXT: movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
649 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm5[1,1,1,1]
650 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
651 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,0]
652 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
653 ; SSE-NEXT: movdqa %xmm14, %xmm0
654 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm5[2],xmm0[3],xmm5[3]
655 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm12[1,1,1,1]
656 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm6[1,1,1,1]
657 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
658 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
659 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
660 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm7[1,1,1,1]
661 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm4[1,1,1,1]
662 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
663 ; SSE-NEXT: movdqa %xmm8, %xmm0
664 ; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
665 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm3[2],xmm0[3],xmm3[3]
666 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
667 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
668 ; SSE-NEXT: movaps (%rdx), %xmm11
669 ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm3[1,1,1,1]
670 ; SSE-NEXT: movaps %xmm11, %xmm1
671 ; SSE-NEXT: movss {{.*#+}} xmm1 = xmm5[0],xmm1[1,2,3]
672 ; SSE-NEXT: movaps (%rcx), %xmm3
673 ; SSE-NEXT: movaps (%r8), %xmm7
674 ; SSE-NEXT: movaps %xmm7, %xmm15
675 ; SSE-NEXT: shufps {{.*#+}} xmm15 = xmm15[1,1],xmm3[1,1]
676 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm15[2,0]
677 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
678 ; SSE-NEXT: movdqa %xmm13, %xmm15
679 ; SSE-NEXT: punpckhdq {{.*#+}} xmm15 = xmm15[2],xmm9[2],xmm15[3],xmm9[3]
680 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm9 = xmm9[0],xmm13[0]
681 ; SSE-NEXT: movdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
682 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
683 ; SSE-NEXT: shufps {{.*#+}} xmm13 = xmm13[3,3],xmm10[3,3]
684 ; SSE-NEXT: movdqa %xmm14, %xmm1
685 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm12[0,3]
686 ; SSE-NEXT: shufps {{.*#+}} xmm12 = xmm12[2,1],xmm14[3,3]
687 ; SSE-NEXT: shufps {{.*#+}} xmm12 = xmm12[0,2],xmm13[2,0]
688 ; SSE-NEXT: movaps (%rsp), %xmm8 # 16-byte Reload
689 ; SSE-NEXT: movaps %xmm8, %xmm13
690 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
691 ; SSE-NEXT: unpcklps {{.*#+}} xmm13 = xmm13[0],xmm6[0],xmm13[1],xmm6[1]
692 ; SSE-NEXT: shufps {{.*#+}} xmm13 = xmm13[0,1],xmm1[2,0]
693 ; SSE-NEXT: movaps %xmm7, %xmm1
694 ; SSE-NEXT: movdqa %xmm4, %xmm9
695 ; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
696 ; SSE-NEXT: unpckhps {{.*#+}} xmm1 = xmm1[2],xmm4[2],xmm1[3],xmm4[3]
697 ; SSE-NEXT: movaps %xmm11, %xmm2
698 ; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
699 ; SSE-NEXT: unpckhps {{.*#+}} xmm2 = xmm2[2],xmm3[2],xmm2[3],xmm3[3]
700 ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm1[0]
701 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
702 ; SSE-NEXT: movaps %xmm4, %xmm1
703 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
704 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm5[1,3]
705 ; SSE-NEXT: movaps %xmm7, %xmm0
706 ; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm9[0],xmm0[1],xmm9[1]
707 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,2]
708 ; SSE-NEXT: movaps %xmm11, %xmm1
709 ; SSE-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
710 ; SSE-NEXT: movaps %xmm5, %xmm3
711 ; SSE-NEXT: movaps %xmm5, %xmm9
712 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
713 ; SSE-NEXT: unpcklps {{.*#+}} xmm9 = xmm9[0],xmm5[0],xmm9[1],xmm5[1]
714 ; SSE-NEXT: movlhps {{.*#+}} xmm9 = xmm9[0],xmm1[0]
715 ; SSE-NEXT: unpckhps {{.*#+}} xmm8 = xmm8[2],xmm6[2],xmm8[3],xmm6[3]
716 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm15 = xmm15[0],xmm8[0]
717 ; SSE-NEXT: unpcklps {{.*#+}} xmm14 = xmm14[0],xmm10[0],xmm14[1],xmm10[1]
718 ; SSE-NEXT: shufps $36, {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Folded Reload
719 ; SSE-NEXT: # xmm14 = xmm14[0,1],mem[2,0]
720 ; SSE-NEXT: shufps {{.*#+}} xmm11 = xmm11[3,3],xmm5[3,3]
721 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,3],xmm11[2,0]
722 ; SSE-NEXT: movaps %xmm3, %xmm6
723 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm4[2,3,2,3]
724 ; SSE-NEXT: movss {{.*#+}} xmm6 = xmm1[0],xmm6[1,2,3]
725 ; SSE-NEXT: shufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
726 ; SSE-NEXT: # xmm4 = xmm4[3,3],mem[3,3]
727 ; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,3],xmm4[2,0]
728 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
729 ; SSE-NEXT: # xmm1 = mem[3,3,3,3]
730 ; SSE-NEXT: movss {{.*#+}} xmm7 = xmm1[0],xmm7[1,2,3]
731 ; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax
732 ; SSE-NEXT: movaps %xmm14, 112(%rax)
733 ; SSE-NEXT: movdqa %xmm15, 176(%rax)
734 ; SSE-NEXT: movaps %xmm9, (%rax)
735 ; SSE-NEXT: movaps %xmm0, 16(%rax)
736 ; SSE-NEXT: movaps %xmm2, 64(%rax)
737 ; SSE-NEXT: movaps %xmm13, 128(%rax)
738 ; SSE-NEXT: movaps %xmm12, 192(%rax)
739 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
740 ; SSE-NEXT: movaps %xmm0, 32(%rax)
741 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
742 ; SSE-NEXT: movaps %xmm0, 48(%rax)
743 ; SSE-NEXT: movaps %xmm7, 96(%rax)
744 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
745 ; SSE-NEXT: movaps %xmm0, 160(%rax)
746 ; SSE-NEXT: movaps %xmm6, 80(%rax)
747 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
748 ; SSE-NEXT: movaps %xmm0, 144(%rax)
749 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
750 ; SSE-NEXT: movaps %xmm0, 208(%rax)
751 ; SSE-NEXT: addq $104, %rsp
754 ; AVX1-ONLY-LABEL: store_i32_stride7_vf8:
755 ; AVX1-ONLY: # %bb.0:
756 ; AVX1-ONLY-NEXT: movq {{[0-9]+}}(%rsp), %rax
757 ; AVX1-ONLY-NEXT: vmovaps (%rdi), %ymm0
758 ; AVX1-ONLY-NEXT: vmovaps (%rsi), %ymm1
759 ; AVX1-ONLY-NEXT: vmovaps (%rdx), %ymm2
760 ; AVX1-ONLY-NEXT: vmovaps (%rcx), %ymm3
761 ; AVX1-ONLY-NEXT: vmovaps (%r8), %ymm7
762 ; AVX1-ONLY-NEXT: vmovaps (%r9), %ymm8
763 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm4 = ymm2[1,1],ymm3[1,1],ymm2[5,5],ymm3[5,5]
764 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm5 = ymm1[1,1],ymm0[1,1],ymm1[5,5],ymm0[5,5]
765 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm5 = ymm5[2,3,2,3]
766 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0,1,2,3,4],ymm4[5,6],ymm5[7]
767 ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm5 = ymm8[0],ymm7[0],ymm8[2],ymm7[2]
768 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm5 = ymm5[2,0],ymm7[2,1],ymm5[6,4],ymm7[6,5]
769 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm6 = mem[2,3],ymm5[2,3]
770 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm5, %xmm5
771 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm5 = ymm5[0],ymm6[0],ymm5[2],ymm6[3]
772 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0,1,2],ymm4[3,4,5,6],ymm5[7]
773 ; AVX1-ONLY-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
774 ; AVX1-ONLY-NEXT: vmovaps (%r8), %xmm5
775 ; AVX1-ONLY-NEXT: vmovaps (%r9), %xmm6
776 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm4 = xmm6[1,1],xmm5[1,1]
777 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm4, %ymm4, %ymm4
778 ; AVX1-ONLY-NEXT: vmovaps (%rsi), %xmm11
779 ; AVX1-ONLY-NEXT: vmovaps (%rdi), %xmm13
780 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm9 = xmm13[1],xmm11[1]
781 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm9 = xmm11[1,1],xmm9[0,2]
782 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm9, %ymm9, %ymm9
783 ; AVX1-ONLY-NEXT: vmovaps (%rcx), %xmm15
784 ; AVX1-ONLY-NEXT: vmovaps (%rdx), %xmm14
785 ; AVX1-ONLY-NEXT: vinsertps {{.*#+}} xmm10 = zero,xmm14[1],xmm15[1],zero
786 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm9 = ymm9[0],ymm10[1,2],ymm9[3,4,5,6,7]
787 ; AVX1-ONLY-NEXT: vmovaps (%rax), %xmm10
788 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm10, %ymm0, %ymm12
789 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3,4],ymm12[5],ymm4[6,7]
790 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm4 = ymm9[0,1,2],ymm4[3,4,5],ymm9[6,7]
791 ; AVX1-ONLY-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
792 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} xmm4 = xmm5[0],xmm6[0],xmm5[1],xmm6[1]
793 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm4
794 ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm4 = ymm4[0],ymm12[0],ymm4[2],ymm12[2]
795 ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm12 = xmm11[0],xmm13[0]
796 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm12 = xmm12[2,0],xmm13[2,1]
797 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm12, %ymm12, %ymm12
798 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} xmm9 = xmm14[0],xmm15[0],xmm14[1],xmm15[1]
799 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm9 = xmm9[0,1,0,1]
800 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm9 = ymm12[0,1],ymm9[2,3],ymm12[4,5,6,7]
801 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm4 = ymm9[0,1,2,3],ymm4[4,5,6],ymm9[7]
802 ; AVX1-ONLY-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
803 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} ymm4 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
804 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm4 = ymm4[0,1,2,0,4,5,6,4]
805 ; AVX1-ONLY-NEXT: vunpckhps {{.*#+}} ymm9 = ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[6],ymm3[6],ymm2[7],ymm3[7]
806 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm4[2,3],ymm9[2,3]
807 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} ymm9 = ymm7[1],ymm8[1],ymm7[3],ymm8[3]
808 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm9 = ymm8[1,1],ymm9[0,2],ymm8[5,5],ymm9[4,6]
809 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm9 = ymm9[2,3,2,3]
810 ; AVX1-ONLY-NEXT: vmovaps 16(%rax), %xmm12
811 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm9 = ymm9[0],ymm12[1],ymm9[2,3,4,5,6,7]
812 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm4 = ymm9[0,1],ymm4[2,3,4,5],ymm9[6,7]
813 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm9 = ymm3[3,3],ymm2[3,3],ymm3[7,7],ymm2[7,7]
814 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm9 = ymm9[2,3,2,3]
815 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm12 = ymm0[3,3],ymm1[3,3],ymm0[7,7],ymm1[7,7]
816 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm12, %xmm12
817 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm9 = ymm12[0,1,2],ymm9[3,4,5,6,7]
818 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm7 = ymm7[3,3],ymm8[3,3],ymm7[7,7],ymm8[7,7]
819 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm7 = ymm9[0,1,2,3,4],ymm7[5,6],ymm9[7]
820 ; AVX1-ONLY-NEXT: vbroadcastsd 24(%rax), %ymm8
821 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm7 = ymm8[0],ymm7[1,2,3,4,5,6],ymm8[7]
822 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm8 = xmm13[3,3],xmm11[3,3]
823 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm8, %ymm0, %ymm8
824 ; AVX1-ONLY-NEXT: vunpckhps {{.*#+}} xmm9 = xmm14[2],xmm15[2],xmm14[3],xmm15[3]
825 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm14, %ymm9, %ymm9
826 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm8 = ymm9[0,1,2,3,4],ymm8[5,6],ymm9[7]
827 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm9 = xmm6[2,2,2,2]
828 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm9 = xmm5[0,1,2],xmm9[3]
829 ; AVX1-ONLY-NEXT: vbroadcastsd 8(%rax), %ymm11
830 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm9 = ymm9[0,1,2,3],ymm11[4,5,6,7]
831 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1],ymm9[2,3,4],ymm8[5,6,7]
832 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
833 ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm2[0],ymm3[0],ymm2[2],ymm3[2]
834 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm3[3,1],ymm1[0,2],ymm3[7,5],ymm1[4,6]
835 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5],ymm1[6,7]
836 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm5[3,3],xmm6[3,3]
837 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1,2],xmm10[3]
838 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4,5,6,7]
839 ; AVX1-ONLY-NEXT: movq {{[0-9]+}}(%rsp), %rax
840 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 96(%rax)
841 ; AVX1-ONLY-NEXT: vmovaps %ymm8, 64(%rax)
842 ; AVX1-ONLY-NEXT: vmovaps %ymm4, 160(%rax)
843 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
844 ; AVX1-ONLY-NEXT: vmovaps %ymm0, (%rax)
845 ; AVX1-ONLY-NEXT: vmovaps %ymm7, 192(%rax)
846 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
847 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 32(%rax)
848 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
849 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 128(%rax)
850 ; AVX1-ONLY-NEXT: vzeroupper
851 ; AVX1-ONLY-NEXT: retq
853 ; AVX2-SLOW-LABEL: store_i32_stride7_vf8:
854 ; AVX2-SLOW: # %bb.0:
855 ; AVX2-SLOW-NEXT: movq {{[0-9]+}}(%rsp), %rax
856 ; AVX2-SLOW-NEXT: vmovaps (%rdi), %ymm0
857 ; AVX2-SLOW-NEXT: vmovaps (%rsi), %ymm2
858 ; AVX2-SLOW-NEXT: vmovaps (%rdx), %ymm8
859 ; AVX2-SLOW-NEXT: vmovaps (%rcx), %ymm3
860 ; AVX2-SLOW-NEXT: vmovaps (%r8), %ymm6
861 ; AVX2-SLOW-NEXT: vmovaps (%r9), %ymm7
862 ; AVX2-SLOW-NEXT: vmovaps (%rax), %xmm1
863 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm9
864 ; AVX2-SLOW-NEXT: vmovaps (%r8), %xmm4
865 ; AVX2-SLOW-NEXT: vmovaps (%r9), %xmm5
866 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm10 = xmm5[1,1,1,1]
867 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm10 = xmm10[0],xmm4[1],xmm10[2,3]
868 ; AVX2-SLOW-NEXT: vbroadcastsd %xmm10, %ymm10
869 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm9 = ymm10[0,1,2,3,4],ymm9[5],ymm10[6,7]
870 ; AVX2-SLOW-NEXT: vmovaps (%rcx), %xmm12
871 ; AVX2-SLOW-NEXT: vmovaps (%rdx), %xmm13
872 ; AVX2-SLOW-NEXT: vinsertps {{.*#+}} xmm10 = zero,xmm13[1],xmm12[1],zero
873 ; AVX2-SLOW-NEXT: vmovaps (%rdi), %xmm14
874 ; AVX2-SLOW-NEXT: vmovaps (%rsi), %xmm15
875 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm11 = xmm15[1,1,2,2]
876 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm11 = xmm11[0,1],xmm14[2],xmm11[3]
877 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm11 = ymm11[0,1,2,1]
878 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm10 = ymm11[0],ymm10[1,2],ymm11[3,4,5,6,7]
879 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm9 = ymm10[0,1,2],ymm9[3,4,5],ymm10[6,7]
880 ; AVX2-SLOW-NEXT: vmovups %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
881 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm10 = ymm8[2],ymm3[2],ymm8[3],ymm3[3],ymm8[6],ymm3[6],ymm8[7],ymm3[7]
882 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm11 = ymm0[2],ymm2[2],ymm0[3],ymm2[3],ymm0[6],ymm2[6],ymm0[7],ymm2[7]
883 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm11 = ymm11[2,2,2,2]
884 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm10 = ymm11[0,1,2,3],ymm10[4,5,6,7]
885 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm11 = ymm7[1,1,2,2,5,5,6,6]
886 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm11 = ymm11[0,1],ymm6[2],ymm11[3,4,5],ymm6[6],ymm11[7]
887 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm11 = ymm11[2,1,2,3]
888 ; AVX2-SLOW-NEXT: vmovaps 16(%rax), %xmm9
889 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm9 = ymm11[0],ymm9[1],ymm11[2,3,4,5,6,7]
890 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm9 = ymm9[0,1],ymm10[2,3,4,5],ymm9[6,7]
891 ; AVX2-SLOW-NEXT: vmovups %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
892 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm9 = xmm14[3,3],xmm15[3,3]
893 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm9, %ymm0, %ymm9
894 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} xmm11 = xmm13[2],xmm12[2],xmm13[3],xmm12[3]
895 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm11 = xmm11[0,1,2,2]
896 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm11 = ymm11[0,1,2,1]
897 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm9 = ymm11[0,1,2,3,4],ymm9[5,6],ymm11[7]
898 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm11 = xmm5[2,2,2,2]
899 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm11 = xmm4[0,1,2],xmm11[3]
900 ; AVX2-SLOW-NEXT: vbroadcastsd 8(%rax), %ymm10
901 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm10 = ymm11[0,1,2,3],ymm10[4,5,6,7]
902 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm11 = ymm9[0,1],ymm10[2,3,4],ymm9[5,6,7]
903 ; AVX2-SLOW-NEXT: vbroadcastss %xmm12, %xmm9
904 ; AVX2-SLOW-NEXT: vbroadcastss %xmm13, %xmm10
905 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} xmm9 = xmm10[0],xmm9[0],xmm10[1],xmm9[1]
906 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} xmm10 = xmm14[0],xmm15[0],xmm14[1],xmm15[1]
907 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm10 = xmm10[0,1,2,2]
908 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm10 = ymm10[0,1,2,1]
909 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm9 = ymm10[0,1],ymm9[2,3],ymm10[4,5,6,7]
910 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} xmm10 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
911 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm10, %ymm0, %ymm10
912 ; AVX2-SLOW-NEXT: vbroadcastsd %xmm1, %ymm12
913 ; AVX2-SLOW-NEXT: vunpcklpd {{.*#+}} ymm10 = ymm10[0],ymm12[0],ymm10[2],ymm12[2]
914 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm12 = ymm9[0,1,2,3],ymm10[4,5,6],ymm9[7]
915 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm9 = ymm8[1,1],ymm3[1,1],ymm8[5,5],ymm3[5,5]
916 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm10 = ymm2[1,1,1,1,5,5,5,5]
917 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0],ymm0[1],ymm10[2,3,4],ymm0[5],ymm10[6,7]
918 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm10 = ymm10[2,2,2,2]
919 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm9 = ymm10[0,1,2,3,4],ymm9[5,6],ymm10[7]
920 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm10 = ymm7[0,0,0,0,4,4,4,4]
921 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm13 = ymm6[0,1,0,1,4,5,4,5]
922 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm10 = ymm13[0],ymm10[1],ymm13[2,3,4],ymm10[5],ymm13[6,7]
923 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm10 = ymm10[2,1,2,3]
924 ; AVX2-SLOW-NEXT: vbroadcastsd 16(%rax), %ymm13
925 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0,1],ymm13[2,3],ymm10[4,5,6,7]
926 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm9 = ymm10[0,1,2],ymm9[3,4,5,6],ymm10[7]
927 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm8 = ymm3[2],ymm8[2],ymm3[3],ymm8[3],ymm3[6],ymm8[6],ymm3[7],ymm8[7]
928 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm8 = ymm8[3,3,3,3]
929 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm10 = ymm2[2],ymm0[2],ymm2[3],ymm0[3],ymm2[6],ymm0[6],ymm2[7],ymm0[7]
930 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm10 = ymm10[3,3,3,3]
931 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm8 = ymm10[0,1,2],ymm8[3,4,5,6,7]
932 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm6 = ymm6[3,3],ymm7[3,3],ymm6[7,7],ymm7[7,7]
933 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm6 = ymm8[0,1,2,3,4],ymm6[5,6],ymm8[7]
934 ; AVX2-SLOW-NEXT: vbroadcastsd 24(%rax), %ymm7
935 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm6 = ymm7[0],ymm6[1,2,3,4,5,6],ymm7[7]
936 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} ymm0 = ymm0[0],ymm2[0],ymm0[1],ymm2[1],ymm0[4],ymm2[4],ymm0[5],ymm2[5]
937 ; AVX2-SLOW-NEXT: vbroadcastss 16(%rdx), %ymm2
938 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm3 = ymm3[3,1,2,0,7,5,6,4]
939 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1,2,3,4,5],ymm2[6],ymm3[7]
940 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5],ymm2[6,7]
941 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm2 = xmm4[3,3],xmm5[3,3]
942 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm1 = xmm2[0,1,2],xmm1[3]
943 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4,5,6,7]
944 ; AVX2-SLOW-NEXT: movq {{[0-9]+}}(%rsp), %rax
945 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 96(%rax)
946 ; AVX2-SLOW-NEXT: vmovaps %ymm6, 192(%rax)
947 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
948 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 160(%rax)
949 ; AVX2-SLOW-NEXT: vmovaps %ymm9, 128(%rax)
950 ; AVX2-SLOW-NEXT: vmovaps %ymm12, (%rax)
951 ; AVX2-SLOW-NEXT: vmovaps %ymm11, 64(%rax)
952 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
953 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 32(%rax)
954 ; AVX2-SLOW-NEXT: vzeroupper
955 ; AVX2-SLOW-NEXT: retq
957 ; AVX2-FAST-LABEL: store_i32_stride7_vf8:
958 ; AVX2-FAST: # %bb.0:
959 ; AVX2-FAST-NEXT: movq {{[0-9]+}}(%rsp), %rax
960 ; AVX2-FAST-NEXT: vmovaps (%rdi), %ymm1
961 ; AVX2-FAST-NEXT: vmovaps (%rsi), %ymm2
962 ; AVX2-FAST-NEXT: vmovaps (%rdx), %ymm10
963 ; AVX2-FAST-NEXT: vmovaps (%rcx), %ymm3
964 ; AVX2-FAST-NEXT: vmovaps (%r8), %ymm7
965 ; AVX2-FAST-NEXT: vmovaps (%r9), %ymm8
966 ; AVX2-FAST-NEXT: vmovaps (%rax), %xmm0
967 ; AVX2-FAST-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
968 ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm6
969 ; AVX2-FAST-NEXT: vmovaps (%r8), %xmm4
970 ; AVX2-FAST-NEXT: vmovaps (%r9), %xmm5
971 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm9 = xmm5[1,1,1,1]
972 ; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm9 = xmm9[0],xmm4[1],xmm9[2,3]
973 ; AVX2-FAST-NEXT: vbroadcastsd %xmm9, %ymm9
974 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm6 = ymm9[0,1,2,3,4],ymm6[5],ymm9[6,7]
975 ; AVX2-FAST-NEXT: vmovaps (%rcx), %xmm11
976 ; AVX2-FAST-NEXT: vmovaps (%rdx), %xmm12
977 ; AVX2-FAST-NEXT: vinsertps {{.*#+}} xmm9 = zero,xmm12[1],xmm11[1],zero
978 ; AVX2-FAST-NEXT: vmovaps (%rdi), %xmm13
979 ; AVX2-FAST-NEXT: vmovaps (%rsi), %xmm14
980 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm15 = xmm14[1,1,2,2]
981 ; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm15 = xmm15[0,1],xmm13[2],xmm15[3]
982 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm15 = ymm15[0,1,2,1]
983 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm9 = ymm15[0],ymm9[1,2],ymm15[3,4,5,6,7]
984 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm9[0,1,2],ymm6[3,4,5],ymm9[6,7]
985 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
986 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm6 = ymm10[2],ymm3[2],ymm10[3],ymm3[3],ymm10[6],ymm3[6],ymm10[7],ymm3[7]
987 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm9 = ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[6],ymm2[6],ymm1[7],ymm2[7]
988 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm9 = ymm9[2,2,2,2]
989 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm6 = ymm9[0,1,2,3],ymm6[4,5,6,7]
990 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm9 = ymm8[1,1,2,2,5,5,6,6]
991 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm9 = ymm9[0,1],ymm7[2],ymm9[3,4,5],ymm7[6],ymm9[7]
992 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm9 = ymm9[2,1,2,3]
993 ; AVX2-FAST-NEXT: vmovaps 16(%rax), %xmm15
994 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm9 = ymm9[0],ymm15[1],ymm9[2,3,4,5,6,7]
995 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm9[0,1],ymm6[2,3,4,5],ymm9[6,7]
996 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
997 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm6 = ymm2[1,1,1,1,5,5,5,5]
998 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0],ymm1[1],ymm6[2,3,4],ymm1[5],ymm6[6,7]
999 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[2,2,2,2]
1000 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm15 = ymm10[1,1],ymm3[1,1],ymm10[5,5],ymm3[5,5]
1001 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3,4],ymm15[5,6],ymm6[7]
1002 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm15 = ymm8[0,0,0,0,4,4,4,4]
1003 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm9 = ymm7[0,1,0,1,4,5,4,5]
1004 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm9 = ymm9[0],ymm15[1],ymm9[2,3,4],ymm15[5],ymm9[6,7]
1005 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm9 = ymm9[2,1,2,3]
1006 ; AVX2-FAST-NEXT: vbroadcastsd 16(%rax), %ymm15
1007 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm9 = ymm9[0,1],ymm15[2,3],ymm9[4,5,6,7]
1008 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm9[0,1,2],ymm6[3,4,5,6],ymm9[7]
1009 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1010 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm6 = xmm13[3,3],xmm14[3,3]
1011 ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm6, %ymm0, %ymm6
1012 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} xmm9 = xmm12[2],xmm11[2],xmm12[3],xmm11[3]
1013 ; AVX2-FAST-NEXT: vbroadcastf128 {{.*#+}} ymm0 = [0,1,2,2,0,1,2,2]
1014 ; AVX2-FAST-NEXT: # ymm0 = mem[0,1,0,1]
1015 ; AVX2-FAST-NEXT: vpermps %ymm9, %ymm0, %ymm9
1016 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm6 = ymm9[0,1,2,3,4],ymm6[5,6],ymm9[7]
1017 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm9 = xmm5[2,2,2,2]
1018 ; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm9 = xmm4[0,1,2],xmm9[3]
1019 ; AVX2-FAST-NEXT: vbroadcastsd 8(%rax), %ymm15
1020 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm9 = ymm9[0,1,2,3],ymm15[4,5,6,7]
1021 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1],ymm9[2,3,4],ymm6[5,6,7]
1022 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm9 = ymm3[2],ymm10[2],ymm3[3],ymm10[3],ymm3[6],ymm10[6],ymm3[7],ymm10[7]
1023 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm9 = ymm9[3,3,3,3]
1024 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm10 = ymm2[2],ymm1[2],ymm2[3],ymm1[3],ymm2[6],ymm1[6],ymm2[7],ymm1[7]
1025 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm10 = ymm10[3,3,3,3]
1026 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm9 = ymm10[0,1,2],ymm9[3,4,5,6,7]
1027 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm7 = ymm7[3,3],ymm8[3,3],ymm7[7,7],ymm8[7,7]
1028 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm7 = ymm9[0,1,2,3,4],ymm7[5,6],ymm9[7]
1029 ; AVX2-FAST-NEXT: vbroadcastsd 24(%rax), %ymm8
1030 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm7 = ymm8[0],ymm7[1,2,3,4,5,6],ymm8[7]
1031 ; AVX2-FAST-NEXT: vbroadcastss %xmm11, %xmm8
1032 ; AVX2-FAST-NEXT: vbroadcastss %xmm12, %xmm9
1033 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} xmm8 = xmm9[0],xmm8[0],xmm9[1],xmm8[1]
1034 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} xmm9 = xmm13[0],xmm14[0],xmm13[1],xmm14[1]
1035 ; AVX2-FAST-NEXT: vpermps %ymm9, %ymm0, %ymm0
1036 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm8[2,3],ymm0[4,5,6,7]
1037 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} xmm8 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
1038 ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm8, %ymm0, %ymm8
1039 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
1040 ; AVX2-FAST-NEXT: vbroadcastsd %xmm10, %ymm9
1041 ; AVX2-FAST-NEXT: vunpcklpd {{.*#+}} ymm8 = ymm8[0],ymm9[0],ymm8[2],ymm9[2]
1042 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm8[4,5,6],ymm0[7]
1043 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[1],ymm2[1],ymm1[4],ymm2[4],ymm1[5],ymm2[5]
1044 ; AVX2-FAST-NEXT: vbroadcastss 16(%rdx), %ymm2
1045 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm3 = ymm3[3,1,2,0,7,5,6,4]
1046 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1,2,3,4,5],ymm2[6],ymm3[7]
1047 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5],ymm2[6,7]
1048 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm2 = xmm4[3,3],xmm5[3,3]
1049 ; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm2 = xmm2[0,1,2],xmm10[3]
1050 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2,3],ymm1[4,5,6,7]
1051 ; AVX2-FAST-NEXT: movq {{[0-9]+}}(%rsp), %rax
1052 ; AVX2-FAST-NEXT: vmovaps %ymm1, 96(%rax)
1053 ; AVX2-FAST-NEXT: vmovaps %ymm0, (%rax)
1054 ; AVX2-FAST-NEXT: vmovaps %ymm7, 192(%rax)
1055 ; AVX2-FAST-NEXT: vmovaps %ymm6, 64(%rax)
1056 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
1057 ; AVX2-FAST-NEXT: vmovaps %ymm0, 160(%rax)
1058 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
1059 ; AVX2-FAST-NEXT: vmovaps %ymm0, 128(%rax)
1060 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
1061 ; AVX2-FAST-NEXT: vmovaps %ymm0, 32(%rax)
1062 ; AVX2-FAST-NEXT: vzeroupper
1063 ; AVX2-FAST-NEXT: retq
1065 ; AVX2-FAST-PERLANE-LABEL: store_i32_stride7_vf8:
1066 ; AVX2-FAST-PERLANE: # %bb.0:
1067 ; AVX2-FAST-PERLANE-NEXT: movq {{[0-9]+}}(%rsp), %rax
1068 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rdi), %ymm0
1069 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rsi), %ymm2
1070 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rdx), %ymm8
1071 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rcx), %ymm3
1072 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%r8), %ymm6
1073 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%r9), %ymm7
1074 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rax), %xmm1
1075 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm9
1076 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%r8), %xmm4
1077 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%r9), %xmm5
1078 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm10 = xmm5[1,1,1,1]
1079 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} xmm10 = xmm10[0],xmm4[1],xmm10[2,3]
1080 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd %xmm10, %ymm10
1081 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm9 = ymm10[0,1,2,3,4],ymm9[5],ymm10[6,7]
1082 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rcx), %xmm12
1083 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rdx), %xmm13
1084 ; AVX2-FAST-PERLANE-NEXT: vinsertps {{.*#+}} xmm10 = zero,xmm13[1],xmm12[1],zero
1085 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rdi), %xmm14
1086 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rsi), %xmm15
1087 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm11 = xmm15[1,1,2,2]
1088 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} xmm11 = xmm11[0,1],xmm14[2],xmm11[3]
1089 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm11 = ymm11[0,1,2,1]
1090 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm10 = ymm11[0],ymm10[1,2],ymm11[3,4,5,6,7]
1091 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm9 = ymm10[0,1,2],ymm9[3,4,5],ymm10[6,7]
1092 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1093 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm10 = ymm8[2],ymm3[2],ymm8[3],ymm3[3],ymm8[6],ymm3[6],ymm8[7],ymm3[7]
1094 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm11 = ymm0[2],ymm2[2],ymm0[3],ymm2[3],ymm0[6],ymm2[6],ymm0[7],ymm2[7]
1095 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm11 = ymm11[2,2,2,2]
1096 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm10 = ymm11[0,1,2,3],ymm10[4,5,6,7]
1097 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm11 = ymm7[1,1,2,2,5,5,6,6]
1098 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm11 = ymm11[0,1],ymm6[2],ymm11[3,4,5],ymm6[6],ymm11[7]
1099 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm11 = ymm11[2,1,2,3]
1100 ; AVX2-FAST-PERLANE-NEXT: vmovaps 16(%rax), %xmm9
1101 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm9 = ymm11[0],ymm9[1],ymm11[2,3,4,5,6,7]
1102 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm9 = ymm9[0,1],ymm10[2,3,4,5],ymm9[6,7]
1103 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1104 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm9 = xmm14[3,3],xmm15[3,3]
1105 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm9, %ymm0, %ymm9
1106 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} xmm11 = xmm13[2],xmm12[2],xmm13[3],xmm12[3]
1107 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm11 = xmm11[0,1,2,2]
1108 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm11 = ymm11[0,1,2,1]
1109 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm9 = ymm11[0,1,2,3,4],ymm9[5,6],ymm11[7]
1110 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm11 = xmm5[2,2,2,2]
1111 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} xmm11 = xmm4[0,1,2],xmm11[3]
1112 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 8(%rax), %ymm10
1113 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm10 = ymm11[0,1,2,3],ymm10[4,5,6,7]
1114 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm11 = ymm9[0,1],ymm10[2,3,4],ymm9[5,6,7]
1115 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss %xmm12, %xmm9
1116 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss %xmm13, %xmm10
1117 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} xmm9 = xmm10[0],xmm9[0],xmm10[1],xmm9[1]
1118 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} xmm10 = xmm14[0],xmm15[0],xmm14[1],xmm15[1]
1119 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm10 = xmm10[0,1,2,2]
1120 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm10 = ymm10[0,1,2,1]
1121 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm9 = ymm10[0,1],ymm9[2,3],ymm10[4,5,6,7]
1122 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} xmm10 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
1123 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm10, %ymm0, %ymm10
1124 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd %xmm1, %ymm12
1125 ; AVX2-FAST-PERLANE-NEXT: vunpcklpd {{.*#+}} ymm10 = ymm10[0],ymm12[0],ymm10[2],ymm12[2]
1126 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm12 = ymm9[0,1,2,3],ymm10[4,5,6],ymm9[7]
1127 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm9 = ymm8[1,1],ymm3[1,1],ymm8[5,5],ymm3[5,5]
1128 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm10 = ymm2[1,1,1,1,5,5,5,5]
1129 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0],ymm0[1],ymm10[2,3,4],ymm0[5],ymm10[6,7]
1130 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm10 = ymm10[2,2,2,2]
1131 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm9 = ymm10[0,1,2,3,4],ymm9[5,6],ymm10[7]
1132 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm10 = ymm7[0,0,0,0,4,4,4,4]
1133 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm13 = ymm6[0,1,0,1,4,5,4,5]
1134 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm10 = ymm13[0],ymm10[1],ymm13[2,3,4],ymm10[5],ymm13[6,7]
1135 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm10 = ymm10[2,1,2,3]
1136 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 16(%rax), %ymm13
1137 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0,1],ymm13[2,3],ymm10[4,5,6,7]
1138 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm9 = ymm10[0,1,2],ymm9[3,4,5,6],ymm10[7]
1139 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm8 = ymm3[2],ymm8[2],ymm3[3],ymm8[3],ymm3[6],ymm8[6],ymm3[7],ymm8[7]
1140 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm8 = ymm8[3,3,3,3]
1141 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm10 = ymm2[2],ymm0[2],ymm2[3],ymm0[3],ymm2[6],ymm0[6],ymm2[7],ymm0[7]
1142 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm10 = ymm10[3,3,3,3]
1143 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm8 = ymm10[0,1,2],ymm8[3,4,5,6,7]
1144 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm6 = ymm6[3,3],ymm7[3,3],ymm6[7,7],ymm7[7,7]
1145 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm6 = ymm8[0,1,2,3,4],ymm6[5,6],ymm8[7]
1146 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 24(%rax), %ymm7
1147 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm6 = ymm7[0],ymm6[1,2,3,4,5,6],ymm7[7]
1148 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} ymm0 = ymm0[0],ymm2[0],ymm0[1],ymm2[1],ymm0[4],ymm2[4],ymm0[5],ymm2[5]
1149 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss 16(%rdx), %ymm2
1150 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm3 = ymm3[3,1,2,0,7,5,6,4]
1151 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1,2,3,4,5],ymm2[6],ymm3[7]
1152 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5],ymm2[6,7]
1153 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm2 = xmm4[3,3],xmm5[3,3]
1154 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} xmm1 = xmm2[0,1,2],xmm1[3]
1155 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4,5,6,7]
1156 ; AVX2-FAST-PERLANE-NEXT: movq {{[0-9]+}}(%rsp), %rax
1157 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 96(%rax)
1158 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm6, 192(%rax)
1159 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
1160 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 160(%rax)
1161 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm9, 128(%rax)
1162 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm12, (%rax)
1163 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm11, 64(%rax)
1164 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
1165 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 32(%rax)
1166 ; AVX2-FAST-PERLANE-NEXT: vzeroupper
1167 ; AVX2-FAST-PERLANE-NEXT: retq
1169 ; AVX512F-LABEL: store_i32_stride7_vf8:
1171 ; AVX512F-NEXT: movq {{[0-9]+}}(%rsp), %rax
1172 ; AVX512F-NEXT: movq {{[0-9]+}}(%rsp), %r10
1173 ; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
1174 ; AVX512F-NEXT: vmovdqa (%rdx), %ymm1
1175 ; AVX512F-NEXT: vmovdqa (%r8), %ymm2
1176 ; AVX512F-NEXT: vmovdqa (%r10), %ymm3
1177 ; AVX512F-NEXT: vinserti64x4 $1, (%rsi), %zmm0, %zmm4
1178 ; AVX512F-NEXT: vinserti64x4 $1, (%rcx), %zmm1, %zmm5
1179 ; AVX512F-NEXT: vinserti64x4 $1, (%r9), %zmm2, %zmm2
1180 ; AVX512F-NEXT: vbroadcasti128 {{.*#+}} ymm0 = [31,7,15,23,31,7,15,23]
1181 ; AVX512F-NEXT: # ymm0 = mem[0,1,0,1]
1182 ; AVX512F-NEXT: vpermi2d %zmm5, %zmm4, %zmm0
1183 ; AVX512F-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [6,23,31,7,6,23,31,7]
1184 ; AVX512F-NEXT: # ymm1 = mem[0,1,0,1]
1185 ; AVX512F-NEXT: vpermi2d %zmm2, %zmm3, %zmm1
1186 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm6 = <u,u,u,u,0,8,16,u,u,u,u,1,9,17,u,u>
1187 ; AVX512F-NEXT: vpermi2d %zmm3, %zmm2, %zmm6
1188 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm7 = <0,8,16,24,u,u,u,1,9,17,25,u,u,u,2,10>
1189 ; AVX512F-NEXT: vpermi2d %zmm5, %zmm4, %zmm7
1190 ; AVX512F-NEXT: movw $14448, %cx # imm = 0x3870
1191 ; AVX512F-NEXT: kmovw %ecx, %k1
1192 ; AVX512F-NEXT: vmovdqa32 %zmm6, %zmm7 {%k1}
1193 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm6 = <u,u,2,10,18,u,u,u,u,3,11,19,u,u,u,u>
1194 ; AVX512F-NEXT: vpermi2d %zmm3, %zmm2, %zmm6
1195 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm8 = <2,10,u,u,u,19,27,3,11,u,u,u,20,28,4,12>
1196 ; AVX512F-NEXT: vpermi2d %zmm4, %zmm5, %zmm8
1197 ; AVX512F-NEXT: movw $3612, %cx # imm = 0xE1C
1198 ; AVX512F-NEXT: kmovw %ecx, %k1
1199 ; AVX512F-NEXT: vmovdqa32 %zmm6, %zmm8 {%k1}
1200 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm6 = <u,u,u,5,13,21,29,u,u,u,6,14,22,30,u,u>
1201 ; AVX512F-NEXT: vpermi2d %zmm5, %zmm4, %zmm6
1202 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm4 = <4,12,20,u,u,u,u,5,13,21,u,u,u,u,6,14>
1203 ; AVX512F-NEXT: vpermi2d %zmm3, %zmm2, %zmm4
1204 ; AVX512F-NEXT: movw $15480, %cx # imm = 0x3C78
1205 ; AVX512F-NEXT: kmovw %ecx, %k1
1206 ; AVX512F-NEXT: vmovdqa32 %zmm6, %zmm4 {%k1}
1207 ; AVX512F-NEXT: vmovdqa64 %zmm4, 128(%rax)
1208 ; AVX512F-NEXT: vmovdqa64 %zmm8, 64(%rax)
1209 ; AVX512F-NEXT: vmovdqa64 %zmm7, (%rax)
1210 ; AVX512F-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4],ymm1[5,6,7]
1211 ; AVX512F-NEXT: vmovdqa %ymm0, 192(%rax)
1212 ; AVX512F-NEXT: vzeroupper
1213 ; AVX512F-NEXT: retq
1215 ; AVX512BW-LABEL: store_i32_stride7_vf8:
1216 ; AVX512BW: # %bb.0:
1217 ; AVX512BW-NEXT: movq {{[0-9]+}}(%rsp), %rax
1218 ; AVX512BW-NEXT: movq {{[0-9]+}}(%rsp), %r10
1219 ; AVX512BW-NEXT: vmovdqa (%rdi), %ymm0
1220 ; AVX512BW-NEXT: vmovdqa (%rdx), %ymm1
1221 ; AVX512BW-NEXT: vmovdqa (%r8), %ymm2
1222 ; AVX512BW-NEXT: vmovdqa (%r10), %ymm3
1223 ; AVX512BW-NEXT: vinserti64x4 $1, (%rsi), %zmm0, %zmm4
1224 ; AVX512BW-NEXT: vinserti64x4 $1, (%rcx), %zmm1, %zmm5
1225 ; AVX512BW-NEXT: vinserti64x4 $1, (%r9), %zmm2, %zmm2
1226 ; AVX512BW-NEXT: vbroadcasti128 {{.*#+}} ymm0 = [31,7,15,23,31,7,15,23]
1227 ; AVX512BW-NEXT: # ymm0 = mem[0,1,0,1]
1228 ; AVX512BW-NEXT: vpermi2d %zmm5, %zmm4, %zmm0
1229 ; AVX512BW-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [6,23,31,7,6,23,31,7]
1230 ; AVX512BW-NEXT: # ymm1 = mem[0,1,0,1]
1231 ; AVX512BW-NEXT: vpermi2d %zmm2, %zmm3, %zmm1
1232 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm6 = <u,u,u,u,0,8,16,u,u,u,u,1,9,17,u,u>
1233 ; AVX512BW-NEXT: vpermi2d %zmm3, %zmm2, %zmm6
1234 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm7 = <0,8,16,24,u,u,u,1,9,17,25,u,u,u,2,10>
1235 ; AVX512BW-NEXT: vpermi2d %zmm5, %zmm4, %zmm7
1236 ; AVX512BW-NEXT: movw $14448, %cx # imm = 0x3870
1237 ; AVX512BW-NEXT: kmovd %ecx, %k1
1238 ; AVX512BW-NEXT: vmovdqa32 %zmm6, %zmm7 {%k1}
1239 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm6 = <u,u,2,10,18,u,u,u,u,3,11,19,u,u,u,u>
1240 ; AVX512BW-NEXT: vpermi2d %zmm3, %zmm2, %zmm6
1241 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm8 = <2,10,u,u,u,19,27,3,11,u,u,u,20,28,4,12>
1242 ; AVX512BW-NEXT: vpermi2d %zmm4, %zmm5, %zmm8
1243 ; AVX512BW-NEXT: movw $3612, %cx # imm = 0xE1C
1244 ; AVX512BW-NEXT: kmovd %ecx, %k1
1245 ; AVX512BW-NEXT: vmovdqa32 %zmm6, %zmm8 {%k1}
1246 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm6 = <u,u,u,5,13,21,29,u,u,u,6,14,22,30,u,u>
1247 ; AVX512BW-NEXT: vpermi2d %zmm5, %zmm4, %zmm6
1248 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm4 = <4,12,20,u,u,u,u,5,13,21,u,u,u,u,6,14>
1249 ; AVX512BW-NEXT: vpermi2d %zmm3, %zmm2, %zmm4
1250 ; AVX512BW-NEXT: movw $15480, %cx # imm = 0x3C78
1251 ; AVX512BW-NEXT: kmovd %ecx, %k1
1252 ; AVX512BW-NEXT: vmovdqa32 %zmm6, %zmm4 {%k1}
1253 ; AVX512BW-NEXT: vmovdqa64 %zmm4, 128(%rax)
1254 ; AVX512BW-NEXT: vmovdqa64 %zmm8, 64(%rax)
1255 ; AVX512BW-NEXT: vmovdqa64 %zmm7, (%rax)
1256 ; AVX512BW-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4],ymm1[5,6,7]
1257 ; AVX512BW-NEXT: vmovdqa %ymm0, 192(%rax)
1258 ; AVX512BW-NEXT: vzeroupper
1259 ; AVX512BW-NEXT: retq
1260 %in.vec0 = load <8 x i32>, ptr %in.vecptr0, align 64
1261 %in.vec1 = load <8 x i32>, ptr %in.vecptr1, align 64
1262 %in.vec2 = load <8 x i32>, ptr %in.vecptr2, align 64
1263 %in.vec3 = load <8 x i32>, ptr %in.vecptr3, align 64
1264 %in.vec4 = load <8 x i32>, ptr %in.vecptr4, align 64
1265 %in.vec5 = load <8 x i32>, ptr %in.vecptr5, align 64
1266 %in.vec6 = load <8 x i32>, ptr %in.vecptr6, align 64
1267 %1 = shufflevector <8 x i32> %in.vec0, <8 x i32> %in.vec1, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
1268 %2 = shufflevector <8 x i32> %in.vec2, <8 x i32> %in.vec3, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
1269 %3 = shufflevector <8 x i32> %in.vec4, <8 x i32> %in.vec5, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
1270 %4 = shufflevector <16 x i32> %1, <16 x i32> %2, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
1271 %5 = shufflevector <8 x i32> %in.vec6, <8 x i32> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
1272 %6 = shufflevector <16 x i32> %3, <16 x i32> %5, <24 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23>
1273 %7 = shufflevector <24 x i32> %6, <24 x i32> poison, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
1274 %8 = shufflevector <32 x i32> %4, <32 x i32> %7, <56 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55>
1275 %interleaved.vec = shufflevector <56 x i32> %8, <56 x i32> poison, <56 x i32> <i32 0, i32 8, i32 16, i32 24, i32 32, i32 40, i32 48, i32 1, i32 9, i32 17, i32 25, i32 33, i32 41, i32 49, i32 2, i32 10, i32 18, i32 26, i32 34, i32 42, i32 50, i32 3, i32 11, i32 19, i32 27, i32 35, i32 43, i32 51, i32 4, i32 12, i32 20, i32 28, i32 36, i32 44, i32 52, i32 5, i32 13, i32 21, i32 29, i32 37, i32 45, i32 53, i32 6, i32 14, i32 22, i32 30, i32 38, i32 46, i32 54, i32 7, i32 15, i32 23, i32 31, i32 39, i32 47, i32 55>
1276 store <56 x i32> %interleaved.vec, ptr %out.vec, align 64
1280 define void @store_i32_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecptr2, ptr %in.vecptr3, ptr %in.vecptr4, ptr %in.vecptr5, ptr %in.vecptr6, ptr %out.vec) nounwind {
1281 ; SSE-LABEL: store_i32_stride7_vf16:
1283 ; SSE-NEXT: subq $536, %rsp # imm = 0x218
1284 ; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax
1285 ; SSE-NEXT: movdqa (%rdi), %xmm3
1286 ; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1287 ; SSE-NEXT: movdqa (%rsi), %xmm7
1288 ; SSE-NEXT: movdqa 16(%rsi), %xmm9
1289 ; SSE-NEXT: movaps (%rdx), %xmm5
1290 ; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1291 ; SSE-NEXT: movdqa 16(%rdx), %xmm10
1292 ; SSE-NEXT: movdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1293 ; SSE-NEXT: movaps (%rcx), %xmm14
1294 ; SSE-NEXT: movaps 16(%rcx), %xmm2
1295 ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1296 ; SSE-NEXT: movaps (%r8), %xmm15
1297 ; SSE-NEXT: movaps 16(%r8), %xmm13
1298 ; SSE-NEXT: movaps %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1299 ; SSE-NEXT: movdqa (%r9), %xmm11
1300 ; SSE-NEXT: movdqa 16(%r9), %xmm12
1301 ; SSE-NEXT: movdqa %xmm12, (%rsp) # 16-byte Spill
1302 ; SSE-NEXT: movdqa (%rax), %xmm4
1303 ; SSE-NEXT: movaps %xmm15, %xmm0
1304 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm14[1,1]
1305 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm7[1,1,1,1]
1306 ; SSE-NEXT: movss {{.*#+}} xmm5 = xmm1[0],xmm5[1,2,3]
1307 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,1],xmm0[2,0]
1308 ; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1309 ; SSE-NEXT: movdqa %xmm3, %xmm0
1310 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm7[2],xmm0[3],xmm7[3]
1311 ; SSE-NEXT: movdqa %xmm7, %xmm3
1312 ; SSE-NEXT: movdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1313 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm4[1,1,1,1]
1314 ; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1315 ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm11[1,1,1,1]
1316 ; SSE-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm1[0],xmm5[1],xmm1[1]
1317 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm5 = xmm5[0],xmm0[0]
1318 ; SSE-NEXT: movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1319 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm10[1,1,1,1]
1320 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm9[1,1,1,1]
1321 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
1322 ; SSE-NEXT: movaps %xmm13, %xmm0
1323 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm2[1,1]
1324 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,0]
1325 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1326 ; SSE-NEXT: movdqa 16(%rax), %xmm10
1327 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm10[1,1,1,1]
1328 ; SSE-NEXT: movdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1329 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm12[1,1,1,1]
1330 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
1331 ; SSE-NEXT: movdqa 16(%rdi), %xmm13
1332 ; SSE-NEXT: movdqa %xmm13, %xmm0
1333 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm9[2],xmm0[3],xmm9[3]
1334 ; SSE-NEXT: movdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1335 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
1336 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1337 ; SSE-NEXT: movdqa 32(%rsi), %xmm8
1338 ; SSE-NEXT: movaps 32(%rdx), %xmm5
1339 ; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1340 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm8[1,1,1,1]
1341 ; SSE-NEXT: movaps %xmm5, %xmm1
1342 ; SSE-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
1343 ; SSE-NEXT: movaps 32(%rcx), %xmm5
1344 ; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1345 ; SSE-NEXT: movaps 32(%r8), %xmm0
1346 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1347 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm5[1,1]
1348 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,0]
1349 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1350 ; SSE-NEXT: movdqa 32(%r9), %xmm5
1351 ; SSE-NEXT: movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1352 ; SSE-NEXT: movdqa 32(%rax), %xmm0
1353 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1354 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
1355 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm5[1,1,1,1]
1356 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
1357 ; SSE-NEXT: movdqa 32(%rdi), %xmm0
1358 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1359 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm8[2],xmm0[3],xmm8[3]
1360 ; SSE-NEXT: movdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1361 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
1362 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1363 ; SSE-NEXT: movdqa 48(%rsi), %xmm1
1364 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1365 ; SSE-NEXT: movdqa 48(%rdx), %xmm0
1366 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1367 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
1368 ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm1[1,1,1,1]
1369 ; SSE-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1]
1370 ; SSE-NEXT: movaps 48(%rcx), %xmm2
1371 ; SSE-NEXT: movaps 48(%r8), %xmm1
1372 ; SSE-NEXT: movaps %xmm1, %xmm0
1373 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm2[1,1]
1374 ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1375 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,1],xmm0[2,0]
1376 ; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1377 ; SSE-NEXT: movaps 48(%rdi), %xmm0
1378 ; SSE-NEXT: movaps 48(%rax), %xmm7
1379 ; SSE-NEXT: movaps %xmm0, %xmm5
1380 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[1,1],xmm7[0,3]
1381 ; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1382 ; SSE-NEXT: movaps 48(%r9), %xmm6
1383 ; SSE-NEXT: movaps %xmm6, %xmm5
1384 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[1,1],xmm7[1,1]
1385 ; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1386 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1387 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1388 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1],xmm7[2,3]
1389 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1390 ; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[3,3],xmm6[3,3]
1391 ; SSE-NEXT: movaps %xmm1, %xmm5
1392 ; SSE-NEXT: unpcklps {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1]
1393 ; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1394 ; SSE-NEXT: unpckhpd {{.*#+}} xmm6 = xmm6[1],xmm1[1]
1395 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[3,3,3,3]
1396 ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm2[3,3,3,3]
1397 ; SSE-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm1[0],xmm5[1],xmm1[1]
1398 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,1],xmm7[2,0]
1399 ; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1400 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
1401 ; SSE-NEXT: movaps %xmm5, %xmm0
1402 ; SSE-NEXT: movaps %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1403 ; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm14[0],xmm0[1],xmm14[1]
1404 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
1405 ; SSE-NEXT: movdqa %xmm2, %xmm7
1406 ; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm3[0],xmm7[1],xmm3[1]
1407 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm7 = xmm7[0],xmm0[0]
1408 ; SSE-NEXT: movdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1409 ; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,1],xmm2[1,3]
1410 ; SSE-NEXT: movaps %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1411 ; SSE-NEXT: movaps %xmm15, %xmm2
1412 ; SSE-NEXT: movdqa %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1413 ; SSE-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm11[0],xmm2[1],xmm11[1]
1414 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1],xmm4[0,2]
1415 ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1416 ; SSE-NEXT: unpckhps {{.*#+}} xmm15 = xmm15[2],xmm11[2],xmm15[3],xmm11[3]
1417 ; SSE-NEXT: movaps %xmm5, %xmm2
1418 ; SSE-NEXT: movaps %xmm5, %xmm7
1419 ; SSE-NEXT: unpckhps {{.*#+}} xmm2 = xmm2[2],xmm14[2],xmm2[3],xmm14[3]
1420 ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm15[0]
1421 ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1422 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
1423 ; SSE-NEXT: movaps %xmm2, %xmm0
1424 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
1425 ; SSE-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm4[0]
1426 ; SSE-NEXT: movdqa %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1427 ; SSE-NEXT: movdqa %xmm13, %xmm15
1428 ; SSE-NEXT: punpckldq {{.*#+}} xmm15 = xmm15[0],xmm9[0],xmm15[1],xmm9[1]
1429 ; SSE-NEXT: shufps {{.*#+}} xmm15 = xmm15[0,1],xmm0[2,0]
1430 ; SSE-NEXT: movdqa %xmm13, %xmm0
1431 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm10[0,3]
1432 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
1433 ; SSE-NEXT: movaps %xmm14, %xmm12
1434 ; SSE-NEXT: movaps (%rsp), %xmm3 # 16-byte Reload
1435 ; SSE-NEXT: unpcklps {{.*#+}} xmm12 = xmm12[0],xmm3[0],xmm12[1],xmm3[1]
1436 ; SSE-NEXT: shufps {{.*#+}} xmm12 = xmm12[0,1],xmm0[2,0]
1437 ; SSE-NEXT: movaps %xmm4, %xmm10
1438 ; SSE-NEXT: unpckhps {{.*#+}} xmm10 = xmm10[2],xmm2[2],xmm10[3],xmm2[3]
1439 ; SSE-NEXT: movaps %xmm14, %xmm0
1440 ; SSE-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],xmm3[2],xmm0[3],xmm3[3]
1441 ; SSE-NEXT: movlhps {{.*#+}} xmm10 = xmm10[0],xmm0[0]
1442 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
1443 ; SSE-NEXT: movdqa %xmm5, %xmm0
1444 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
1445 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
1446 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
1447 ; SSE-NEXT: movdqa %xmm13, %xmm11
1448 ; SSE-NEXT: punpckldq {{.*#+}} xmm11 = xmm11[0],xmm8[0],xmm11[1],xmm8[1]
1449 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm11 = xmm11[0],xmm0[0]
1450 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1451 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm13[1,3]
1452 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
1453 ; SSE-NEXT: movaps %xmm9, %xmm8
1454 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
1455 ; SSE-NEXT: unpcklps {{.*#+}} xmm8 = xmm8[0],xmm4[0],xmm8[1],xmm4[1]
1456 ; SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[0,1],xmm0[0,2]
1457 ; SSE-NEXT: movaps %xmm9, %xmm0
1458 ; SSE-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],xmm4[2],xmm0[3],xmm4[3]
1459 ; SSE-NEXT: punpckhdq {{.*#+}} xmm5 = xmm5[2],xmm2[2],xmm5[3],xmm2[3]
1460 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm5 = xmm5[0],xmm0[0]
1461 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
1462 ; SSE-NEXT: movaps %xmm1, %xmm4
1463 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
1464 ; SSE-NEXT: unpckhps {{.*#+}} xmm4 = xmm4[2],xmm3[2],xmm4[3],xmm3[3]
1465 ; SSE-NEXT: movlhps {{.*#+}} xmm3 = xmm3[0],xmm1[0]
1466 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
1467 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1468 ; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
1469 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm3[2,0]
1470 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1471 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
1472 ; SSE-NEXT: shufps $36, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
1473 ; SSE-NEXT: # xmm3 = xmm3[0,1],mem[2,0]
1474 ; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1475 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1476 ; SSE-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
1477 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
1478 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,2],xmm0[0,1]
1479 ; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,1],xmm6[2,0]
1480 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,3],xmm2[3,3]
1481 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1482 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,0]
1483 ; SSE-NEXT: shufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload
1484 ; SSE-NEXT: # xmm7 = xmm7[3,3],mem[3,3]
1485 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
1486 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm7[2,0]
1487 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
1488 ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm7[2,3,2,3]
1489 ; SSE-NEXT: movss {{.*#+}} xmm1 = xmm6[0],xmm1[1,2,3]
1490 ; SSE-NEXT: movaps %xmm1, %xmm2
1491 ; SSE-NEXT: shufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload
1492 ; SSE-NEXT: # xmm7 = xmm7[3,3],mem[3,3]
1493 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
1494 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm7[2,0]
1495 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
1496 ; SSE-NEXT: # xmm6 = mem[3,3,3,3]
1497 ; SSE-NEXT: movss {{.*#+}} xmm1 = xmm6[0],xmm1[1,2,3]
1498 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1499 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
1500 ; SSE-NEXT: shufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
1501 ; SSE-NEXT: # xmm6 = xmm6[3,3],mem[3,3]
1502 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
1503 ; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,3],xmm6[2,0]
1504 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
1505 ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm1[2,3,2,3]
1506 ; SSE-NEXT: movss {{.*#+}} xmm7 = xmm6[0],xmm7[1,2,3]
1507 ; SSE-NEXT: shufps $255, (%rsp), %xmm1 # 16-byte Folded Reload
1508 ; SSE-NEXT: # xmm1 = xmm1[3,3],mem[3,3]
1509 ; SSE-NEXT: shufps {{.*#+}} xmm14 = xmm14[0,3],xmm1[2,0]
1510 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
1511 ; SSE-NEXT: # xmm6 = mem[3,3,3,3]
1512 ; SSE-NEXT: movss {{.*#+}} xmm14 = xmm6[0],xmm14[1,2,3]
1513 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
1514 ; SSE-NEXT: shufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
1515 ; SSE-NEXT: # xmm1 = xmm1[3,3],mem[3,3]
1516 ; SSE-NEXT: shufps {{.*#+}} xmm13 = xmm13[0,3],xmm1[2,0]
1517 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
1518 ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm1[2,3,2,3]
1519 ; SSE-NEXT: movss {{.*#+}} xmm13 = xmm6[0],xmm13[1,2,3]
1520 ; SSE-NEXT: shufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
1521 ; SSE-NEXT: # xmm1 = xmm1[3,3],mem[3,3]
1522 ; SSE-NEXT: shufps {{.*#+}} xmm9 = xmm9[0,3],xmm1[2,0]
1523 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
1524 ; SSE-NEXT: # xmm6 = mem[3,3,3,3]
1525 ; SSE-NEXT: movss {{.*#+}} xmm9 = xmm6[0],xmm9[1,2,3]
1526 ; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax
1527 ; SSE-NEXT: movaps %xmm0, 416(%rax)
1528 ; SSE-NEXT: movaps %xmm4, 400(%rax)
1529 ; SSE-NEXT: movaps %xmm3, 384(%rax)
1530 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1531 ; SSE-NEXT: movaps %xmm0, 352(%rax)
1532 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1533 ; SSE-NEXT: movaps %xmm0, 336(%rax)
1534 ; SSE-NEXT: movdqa %xmm5, 288(%rax)
1535 ; SSE-NEXT: movaps %xmm8, 240(%rax)
1536 ; SSE-NEXT: movdqa %xmm11, 224(%rax)
1537 ; SSE-NEXT: movaps %xmm10, 176(%rax)
1538 ; SSE-NEXT: movaps %xmm12, 128(%rax)
1539 ; SSE-NEXT: movaps %xmm15, 112(%rax)
1540 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1541 ; SSE-NEXT: movaps %xmm0, 64(%rax)
1542 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1543 ; SSE-NEXT: movaps %xmm0, 16(%rax)
1544 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1545 ; SSE-NEXT: movaps %xmm0, (%rax)
1546 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1547 ; SSE-NEXT: movaps %xmm0, 432(%rax)
1548 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1549 ; SSE-NEXT: movaps %xmm0, 368(%rax)
1550 ; SSE-NEXT: movaps %xmm9, 320(%rax)
1551 ; SSE-NEXT: movaps %xmm13, 304(%rax)
1552 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1553 ; SSE-NEXT: movaps %xmm0, 272(%rax)
1554 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1555 ; SSE-NEXT: movaps %xmm0, 256(%rax)
1556 ; SSE-NEXT: movaps %xmm14, 208(%rax)
1557 ; SSE-NEXT: movaps %xmm7, 192(%rax)
1558 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1559 ; SSE-NEXT: movaps %xmm0, 160(%rax)
1560 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1561 ; SSE-NEXT: movaps %xmm0, 144(%rax)
1562 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1563 ; SSE-NEXT: movaps %xmm0, 96(%rax)
1564 ; SSE-NEXT: movaps %xmm2, 80(%rax)
1565 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1566 ; SSE-NEXT: movaps %xmm0, 48(%rax)
1567 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1568 ; SSE-NEXT: movaps %xmm0, 32(%rax)
1569 ; SSE-NEXT: addq $536, %rsp # imm = 0x218
1572 ; AVX1-ONLY-LABEL: store_i32_stride7_vf16:
1573 ; AVX1-ONLY: # %bb.0:
1574 ; AVX1-ONLY-NEXT: subq $488, %rsp # imm = 0x1E8
1575 ; AVX1-ONLY-NEXT: movq {{[0-9]+}}(%rsp), %rax
1576 ; AVX1-ONLY-NEXT: vmovaps 32(%rdi), %ymm5
1577 ; AVX1-ONLY-NEXT: vmovaps 32(%rsi), %ymm6
1578 ; AVX1-ONLY-NEXT: vmovaps 32(%rdx), %ymm4
1579 ; AVX1-ONLY-NEXT: vmovaps 32(%rcx), %ymm7
1580 ; AVX1-ONLY-NEXT: vmovaps 32(%r8), %ymm0
1581 ; AVX1-ONLY-NEXT: vmovaps 32(%r9), %ymm1
1582 ; AVX1-ONLY-NEXT: vmovaps 32(%rax), %ymm2
1583 ; AVX1-ONLY-NEXT: vunpckhps {{.*#+}} ymm3 = ymm4[2],ymm7[2],ymm4[3],ymm7[3],ymm4[6],ymm7[6],ymm4[7],ymm7[7]
1584 ; AVX1-ONLY-NEXT: vmovaps %ymm4, %ymm13
1585 ; AVX1-ONLY-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1586 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} ymm4 = ymm6[1],ymm5[1],ymm6[3],ymm5[3]
1587 ; AVX1-ONLY-NEXT: vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1588 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm4 = ymm5[0,1],ymm4[2,0],ymm5[4,5],ymm4[6,4]
1589 ; AVX1-ONLY-NEXT: vmovaps %ymm5, %ymm8
1590 ; AVX1-ONLY-NEXT: vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1591 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm3 = ymm4[2,3],ymm3[2,3]
1592 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5],ymm0[6,7]
1593 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm1[2,3,2,3]
1594 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm4 = ymm4[1,1,2,2,5,5,6,6]
1595 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0],ymm3[1,2,3,4,5,6],ymm4[7]
1596 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm2[2,3],ymm3[2,3]
1597 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0],ymm4[1],ymm3[2,3,4],ymm4[5],ymm3[6,7]
1598 ; AVX1-ONLY-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1599 ; AVX1-ONLY-NEXT: vmovaps 32(%r9), %xmm9
1600 ; AVX1-ONLY-NEXT: vmovaps %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1601 ; AVX1-ONLY-NEXT: vmovaps 32(%r8), %xmm10
1602 ; AVX1-ONLY-NEXT: vmovaps %xmm10, (%rsp) # 16-byte Spill
1603 ; AVX1-ONLY-NEXT: vmovaps 32(%rdi), %xmm4
1604 ; AVX1-ONLY-NEXT: vmovaps 32(%rsi), %xmm11
1605 ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm3 = xmm11[0],xmm4[0]
1606 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm3 = xmm3[2,0],xmm4[2,1]
1607 ; AVX1-ONLY-NEXT: vmovaps %xmm4, %xmm12
1608 ; AVX1-ONLY-NEXT: vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1609 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm3, %ymm3, %ymm3
1610 ; AVX1-ONLY-NEXT: vmovaps 32(%rcx), %xmm5
1611 ; AVX1-ONLY-NEXT: vmovaps 32(%rdx), %xmm14
1612 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} xmm4 = xmm14[0],xmm5[0],xmm14[1],xmm5[1]
1613 ; AVX1-ONLY-NEXT: vmovaps %xmm14, %xmm15
1614 ; AVX1-ONLY-NEXT: vmovaps %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1615 ; AVX1-ONLY-NEXT: vmovaps %xmm5, %xmm14
1616 ; AVX1-ONLY-NEXT: vmovaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1617 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm4 = xmm4[0,1,0,1]
1618 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm4[2,3],ymm3[4,5,6,7]
1619 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} xmm4 = xmm10[0],xmm9[0],xmm10[1],xmm9[1]
1620 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm4
1621 ; AVX1-ONLY-NEXT: vmovaps 32(%rax), %xmm5
1622 ; AVX1-ONLY-NEXT: vmovaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1623 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm5, %ymm0, %ymm5
1624 ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm4 = ymm4[0],ymm5[0],ymm4[2],ymm5[2]
1625 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm4[4,5,6],ymm3[7]
1626 ; AVX1-ONLY-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1627 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm3 = xmm9[1,1],xmm10[1,1]
1628 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm3, %ymm3, %ymm3
1629 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3,4],ymm5[5],ymm3[6,7]
1630 ; AVX1-ONLY-NEXT: vmovaps %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1631 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm4 = xmm12[1],xmm11[1]
1632 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm4 = xmm11[1,1],xmm4[0,2]
1633 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm4, %ymm4, %ymm4
1634 ; AVX1-ONLY-NEXT: vinsertps {{.*#+}} xmm5 = zero,xmm15[1],xmm14[1],zero
1635 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0],ymm5[1,2],ymm4[3,4,5,6,7]
1636 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0,1,2],ymm3[3,4,5],ymm4[6,7]
1637 ; AVX1-ONLY-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1638 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm3 = ymm6[1,1],ymm8[1,1],ymm6[5,5],ymm8[5,5]
1639 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm3 = ymm3[2,3,2,3]
1640 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm4 = ymm13[1,1],ymm7[1,1],ymm13[5,5],ymm7[5,5]
1641 ; AVX1-ONLY-NEXT: vmovaps %ymm7, %ymm13
1642 ; AVX1-ONLY-NEXT: vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1643 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3,4],ymm4[5,6],ymm3[7]
1644 ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
1645 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm1[2,0],ymm0[2,1],ymm1[6,4],ymm0[6,5]
1646 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm2[2,3],ymm0[2,3]
1647 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm0, %xmm0
1648 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[3]
1649 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm3[3,4,5,6],ymm0[7]
1650 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1651 ; AVX1-ONLY-NEXT: vmovaps (%rdi), %xmm12
1652 ; AVX1-ONLY-NEXT: vmovaps (%rsi), %xmm11
1653 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm12[1],xmm11[1]
1654 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm11[1,1],xmm0[0,2]
1655 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
1656 ; AVX1-ONLY-NEXT: vmovaps (%rcx), %xmm2
1657 ; AVX1-ONLY-NEXT: vmovaps (%rdx), %xmm9
1658 ; AVX1-ONLY-NEXT: vinsertps {{.*#+}} xmm1 = zero,xmm9[1],xmm2[1],zero
1659 ; AVX1-ONLY-NEXT: vmovaps %xmm2, %xmm3
1660 ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1661 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2],ymm0[3,4,5,6,7]
1662 ; AVX1-ONLY-NEXT: vmovaps (%r9), %xmm15
1663 ; AVX1-ONLY-NEXT: vmovaps (%r8), %xmm14
1664 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm15[1,1],xmm14[1,1]
1665 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm1
1666 ; AVX1-ONLY-NEXT: vmovaps (%rax), %xmm2
1667 ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1668 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2
1669 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5],ymm1[6,7]
1670 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5],ymm0[6,7]
1671 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1672 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} xmm0 = xmm14[0],xmm15[0],xmm14[1],xmm15[1]
1673 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
1674 ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm2[0],ymm0[2],ymm2[2]
1675 ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm1 = xmm11[0],xmm12[0]
1676 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm1[2,0],xmm12[2,1]
1677 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm1
1678 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} xmm2 = xmm9[0],xmm3[0],xmm9[1],xmm3[1]
1679 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm2 = xmm2[0,1,0,1]
1680 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3],ymm1[4,5,6,7]
1681 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6],ymm1[7]
1682 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1683 ; AVX1-ONLY-NEXT: vmovaps (%rdi), %ymm10
1684 ; AVX1-ONLY-NEXT: vmovaps (%rsi), %ymm8
1685 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm8[1,1],ymm10[1,1],ymm8[5,5],ymm10[5,5]
1686 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
1687 ; AVX1-ONLY-NEXT: vmovaps (%rdx), %ymm7
1688 ; AVX1-ONLY-NEXT: vmovaps (%rcx), %ymm6
1689 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm7[1,1],ymm6[1,1],ymm7[5,5],ymm6[5,5]
1690 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm5 = ymm0[0,1,2,3,4],ymm1[5,6],ymm0[7]
1691 ; AVX1-ONLY-NEXT: vmovaps (%r8), %ymm2
1692 ; AVX1-ONLY-NEXT: vmovaps (%r9), %ymm1
1693 ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm4 = ymm1[0],ymm2[0],ymm1[2],ymm2[2]
1694 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm4[2,0],ymm2[2,1],ymm4[6,4],ymm2[6,5]
1695 ; AVX1-ONLY-NEXT: vmovaps (%rax), %ymm4
1696 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm3 = ymm4[2,3],ymm0[2,3]
1697 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm0, %xmm0
1698 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[0],ymm3[0],ymm0[2],ymm3[3]
1699 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm5[3,4,5,6],ymm0[7]
1700 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1701 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm8[1],ymm10[1],ymm8[3],ymm10[3]
1702 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,1,2,0,4,5,6,4]
1703 ; AVX1-ONLY-NEXT: vunpckhps {{.*#+}} ymm3 = ymm7[2],ymm6[2],ymm7[3],ymm6[3],ymm7[6],ymm6[6],ymm7[7],ymm6[7]
1704 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm3[2,3]
1705 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} ymm3 = ymm2[1],ymm1[1],ymm2[3],ymm1[3]
1706 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm3 = ymm1[1,1],ymm3[0,2],ymm1[5,5],ymm3[4,6]
1707 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm3 = ymm3[2,3,2,3]
1708 ; AVX1-ONLY-NEXT: vmovaps 16(%rax), %xmm5
1709 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0],ymm5[1],ymm3[2,3,4,5,6,7]
1710 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm3[0,1],ymm0[2,3,4,5],ymm3[6,7]
1711 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1712 ; AVX1-ONLY-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm0 # 32-byte Folded Reload
1713 ; AVX1-ONLY-NEXT: # ymm0 = ymm13[3,3],mem[3,3],ymm13[7,7],mem[7,7]
1714 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
1715 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
1716 ; AVX1-ONLY-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm3 # 32-byte Folded Reload
1717 ; AVX1-ONLY-NEXT: # ymm3 = ymm13[3,3],mem[3,3],ymm13[7,7],mem[7,7]
1718 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm3, %xmm3
1719 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm3[0,1,2],ymm0[3,4,5,6,7]
1720 ; AVX1-ONLY-NEXT: vbroadcastss 60(%r8), %ymm3
1721 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm3[5],ymm0[6,7]
1722 ; AVX1-ONLY-NEXT: vbroadcastss 60(%r9), %ymm3
1723 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm3[6,7]
1724 ; AVX1-ONLY-NEXT: vbroadcastsd 56(%rax), %ymm3
1725 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm3[0],ymm0[1,2,3,4,5,6],ymm3[7]
1726 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1727 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm6[3,3],ymm7[3,3],ymm6[7,7],ymm7[7,7]
1728 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
1729 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm3 = ymm10[3,3],ymm8[3,3],ymm10[7,7],ymm8[7,7]
1730 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm3, %xmm3
1731 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm3[0,1,2],ymm0[3,4,5,6,7]
1732 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm2[3,3],ymm1[3,3],ymm2[7,7],ymm1[7,7]
1733 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm4[2,3],ymm1[1,2],ymm4[6,7],ymm1[5,6]
1734 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[2,3,2,3]
1735 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,2,3,1,4,6,7,5]
1736 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4],ymm1[5,6,7]
1737 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1738 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1739 ; AVX1-ONLY-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
1740 ; AVX1-ONLY-NEXT: # xmm0 = xmm0[3,3],mem[3,3]
1741 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
1742 ; AVX1-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm1 # 16-byte Folded Reload
1743 ; AVX1-ONLY-NEXT: # xmm1 = xmm2[2],mem[2],xmm2[3],mem[3]
1744 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
1745 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
1746 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm3 = ymm1[0,1,2,3,4],ymm0[5,6],ymm1[7]
1747 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
1748 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm5[2,2,2,2]
1749 ; AVX1-ONLY-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
1750 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm0[0,1,2],xmm1[3]
1751 ; AVX1-ONLY-NEXT: vbroadcastsd 40(%rax), %ymm2
1752 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
1753 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm3[0,1],ymm1[2,3,4],ymm3[5,6,7]
1754 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1755 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm12[3,3],xmm11[3,3]
1756 ; AVX1-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm9, %xmm2 # 16-byte Folded Reload
1757 ; AVX1-ONLY-NEXT: # xmm2 = xmm9[2],mem[2],xmm9[3],mem[3]
1758 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm9, %ymm2, %ymm2
1759 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
1760 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3,4],ymm1[5,6],ymm2[7]
1761 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm2 = xmm15[2,2,2,2]
1762 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm2 = xmm14[0,1,2],xmm2[3]
1763 ; AVX1-ONLY-NEXT: vbroadcastsd 8(%rax), %ymm3
1764 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
1765 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3,4],ymm1[5,6,7]
1766 ; AVX1-ONLY-NEXT: vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm2 # 32-byte Folded Reload
1767 ; AVX1-ONLY-NEXT: # ymm2 = ymm13[0],mem[0],ymm13[1],mem[1],ymm13[4],mem[4],ymm13[5],mem[5]
1768 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
1769 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
1770 ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm3 = ymm3[0],ymm4[0],ymm3[2],ymm4[2]
1771 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm3 = ymm4[3,1],ymm3[0,2],ymm4[7,5],ymm3[4,6]
1772 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm2[4,5],ymm3[6,7]
1773 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm3 = xmm0[3,3],xmm5[3,3]
1774 ; AVX1-ONLY-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm3 # 16-byte Folded Reload
1775 ; AVX1-ONLY-NEXT: # xmm3 = xmm3[0,1,2],mem[3]
1776 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0],ymm3[1,2,3],ymm2[4,5,6,7]
1777 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} ymm3 = ymm10[0],ymm8[0],ymm10[1],ymm8[1],ymm10[4],ymm8[4],ymm10[5],ymm8[5]
1778 ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm4 = ymm7[0],ymm6[0],ymm7[2],ymm6[2]
1779 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm4 = ymm6[3,1],ymm4[0,2],ymm6[7,5],ymm4[4,6]
1780 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5],ymm4[6,7]
1781 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm4 = xmm14[3,3],xmm15[3,3]
1782 ; AVX1-ONLY-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm4 # 16-byte Folded Reload
1783 ; AVX1-ONLY-NEXT: # xmm4 = xmm4[0,1,2],mem[3]
1784 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0],ymm4[1,2,3],ymm3[4,5,6,7]
1785 ; AVX1-ONLY-NEXT: movq {{[0-9]+}}(%rsp), %rax
1786 ; AVX1-ONLY-NEXT: vmovaps %ymm3, 96(%rax)
1787 ; AVX1-ONLY-NEXT: vmovaps %ymm2, 320(%rax)
1788 ; AVX1-ONLY-NEXT: vmovaps %ymm1, 64(%rax)
1789 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
1790 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 288(%rax)
1791 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
1792 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 160(%rax)
1793 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
1794 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 192(%rax)
1795 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
1796 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 128(%rax)
1797 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
1798 ; AVX1-ONLY-NEXT: vmovaps %ymm0, (%rax)
1799 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
1800 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 32(%rax)
1801 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
1802 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 352(%rax)
1803 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
1804 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 256(%rax)
1805 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
1806 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 224(%rax)
1807 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
1808 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 416(%rax)
1809 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
1810 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 384(%rax)
1811 ; AVX1-ONLY-NEXT: addq $488, %rsp # imm = 0x1E8
1812 ; AVX1-ONLY-NEXT: vzeroupper
1813 ; AVX1-ONLY-NEXT: retq
1815 ; AVX2-SLOW-LABEL: store_i32_stride7_vf16:
1816 ; AVX2-SLOW: # %bb.0:
1817 ; AVX2-SLOW-NEXT: subq $504, %rsp # imm = 0x1F8
1818 ; AVX2-SLOW-NEXT: movq {{[0-9]+}}(%rsp), %rax
1819 ; AVX2-SLOW-NEXT: vmovaps (%rax), %xmm10
1820 ; AVX2-SLOW-NEXT: vmovaps %xmm10, (%rsp) # 16-byte Spill
1821 ; AVX2-SLOW-NEXT: vmovaps 32(%rax), %xmm0
1822 ; AVX2-SLOW-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1823 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
1824 ; AVX2-SLOW-NEXT: vmovaps (%r8), %xmm13
1825 ; AVX2-SLOW-NEXT: vmovaps %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1826 ; AVX2-SLOW-NEXT: vmovaps 32(%r8), %xmm2
1827 ; AVX2-SLOW-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1828 ; AVX2-SLOW-NEXT: vmovaps (%r9), %xmm14
1829 ; AVX2-SLOW-NEXT: vmovaps %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1830 ; AVX2-SLOW-NEXT: vmovaps 32(%r9), %xmm1
1831 ; AVX2-SLOW-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1832 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,1,1,1]
1833 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2,3]
1834 ; AVX2-SLOW-NEXT: vbroadcastsd %xmm1, %ymm1
1835 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm4 = ymm1[0,1,2,3,4],ymm0[5],ymm1[6,7]
1836 ; AVX2-SLOW-NEXT: vmovaps (%rcx), %xmm1
1837 ; AVX2-SLOW-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1838 ; AVX2-SLOW-NEXT: vmovaps 32(%rcx), %xmm11
1839 ; AVX2-SLOW-NEXT: vmovaps 32(%rdx), %xmm9
1840 ; AVX2-SLOW-NEXT: vinsertps {{.*#+}} xmm5 = zero,xmm9[1],xmm11[1],zero
1841 ; AVX2-SLOW-NEXT: vmovaps (%rdi), %xmm2
1842 ; AVX2-SLOW-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1843 ; AVX2-SLOW-NEXT: vmovaps 32(%rdi), %xmm8
1844 ; AVX2-SLOW-NEXT: vmovaps (%rsi), %xmm12
1845 ; AVX2-SLOW-NEXT: vmovaps 32(%rsi), %xmm7
1846 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm6 = xmm7[1,1,2,2]
1847 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm6 = xmm6[0,1],xmm8[2],xmm6[3]
1848 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[0,1,2,1]
1849 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm5 = ymm6[0],ymm5[1,2],ymm6[3,4,5,6,7]
1850 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm5[0,1,2],ymm4[3,4,5],ymm5[6,7]
1851 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1852 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm10, %ymm0, %ymm4
1853 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm5 = xmm14[1,1,1,1]
1854 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm5 = xmm5[0],xmm13[1],xmm5[2,3]
1855 ; AVX2-SLOW-NEXT: vbroadcastsd %xmm5, %ymm5
1856 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0,1,2,3,4],ymm4[5],ymm5[6,7]
1857 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm5 = xmm12[1,1,2,2]
1858 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm5 = xmm5[0,1],xmm2[2],xmm5[3]
1859 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm6 = ymm5[0,1,2,1]
1860 ; AVX2-SLOW-NEXT: vmovaps (%rdx), %xmm5
1861 ; AVX2-SLOW-NEXT: vinsertps {{.*#+}} xmm10 = zero,xmm5[1],xmm1[1],zero
1862 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0],ymm10[1,2],ymm6[3,4,5,6,7]
1863 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm6[0,1,2],ymm4[3,4,5],ymm6[6,7]
1864 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1865 ; AVX2-SLOW-NEXT: vmovaps (%rdi), %ymm0
1866 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1867 ; AVX2-SLOW-NEXT: vmovaps (%rsi), %ymm1
1868 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1869 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm4 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
1870 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[2,2,2,2]
1871 ; AVX2-SLOW-NEXT: vmovaps (%rdx), %ymm0
1872 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1873 ; AVX2-SLOW-NEXT: vmovaps (%rcx), %ymm13
1874 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm6 = ymm0[2],ymm13[2],ymm0[3],ymm13[3],ymm0[6],ymm13[6],ymm0[7],ymm13[7]
1875 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm6[4,5,6,7]
1876 ; AVX2-SLOW-NEXT: vmovaps (%r8), %ymm0
1877 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1878 ; AVX2-SLOW-NEXT: vmovaps (%r9), %ymm1
1879 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1880 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm6 = ymm1[1,1,2,2,5,5,6,6]
1881 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1],ymm0[2],ymm6[3,4,5],ymm0[6],ymm6[7]
1882 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[2,1,2,3]
1883 ; AVX2-SLOW-NEXT: vmovaps 16(%rax), %xmm14
1884 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0],ymm14[1],ymm6[2,3,4,5,6,7]
1885 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm6[0,1],ymm4[2,3,4,5],ymm6[6,7]
1886 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1887 ; AVX2-SLOW-NEXT: vmovaps 32(%rdi), %ymm14
1888 ; AVX2-SLOW-NEXT: vmovaps 32(%rsi), %ymm15
1889 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm4 = ymm14[2],ymm15[2],ymm14[3],ymm15[3],ymm14[6],ymm15[6],ymm14[7],ymm15[7]
1890 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm4[2,2,2,2]
1891 ; AVX2-SLOW-NEXT: vmovaps 32(%rdx), %ymm4
1892 ; AVX2-SLOW-NEXT: vmovaps 32(%rcx), %ymm6
1893 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm0 = ymm4[2],ymm6[2],ymm4[3],ymm6[3],ymm4[6],ymm6[6],ymm4[7],ymm6[7]
1894 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm0[4,5,6,7]
1895 ; AVX2-SLOW-NEXT: vmovaps 32(%r8), %ymm2
1896 ; AVX2-SLOW-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1897 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm2[6,7]
1898 ; AVX2-SLOW-NEXT: vmovaps 32(%r9), %ymm2
1899 ; AVX2-SLOW-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1900 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm10 = ymm2[1,2,2,3,5,6,6,7]
1901 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm10 = ymm10[2,2,2,2]
1902 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm10[0],ymm1[1,2,3,4,5,6],ymm10[7]
1903 ; AVX2-SLOW-NEXT: vperm2f128 {{.*#+}} ymm0 = mem[2,3],ymm0[2,3]
1904 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3,4],ymm0[5],ymm1[6,7]
1905 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1906 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm0 = ymm6[2],ymm4[2],ymm6[3],ymm4[3],ymm6[6],ymm4[6],ymm6[7],ymm4[7]
1907 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,3,3,3]
1908 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm1 = ymm15[2],ymm14[2],ymm15[3],ymm14[3],ymm15[6],ymm14[6],ymm15[7],ymm14[7]
1909 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[3,3,3,3]
1910 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
1911 ; AVX2-SLOW-NEXT: vbroadcastss 60(%r8), %ymm1
1912 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
1913 ; AVX2-SLOW-NEXT: vbroadcastss 60(%r9), %ymm1
1914 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
1915 ; AVX2-SLOW-NEXT: vbroadcastsd 56(%rax), %ymm1
1916 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5,6],ymm1[7]
1917 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1918 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm8[3,3],xmm7[3,3]
1919 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
1920 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} xmm1 = xmm9[2],xmm11[2],xmm9[3],xmm11[3]
1921 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,1,2,2]
1922 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1]
1923 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm3 = ymm1[0,1,2,3,4],ymm0[5,6],ymm1[7]
1924 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
1925 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm2[2,2,2,2]
1926 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1927 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm1 = xmm0[0,1,2],xmm1[3]
1928 ; AVX2-SLOW-NEXT: vbroadcastsd 40(%rax), %ymm10
1929 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm10[4,5,6,7]
1930 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm3[0,1],ymm1[2,3,4],ymm3[5,6,7]
1931 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1932 ; AVX2-SLOW-NEXT: vbroadcastss %xmm11, %xmm3
1933 ; AVX2-SLOW-NEXT: vbroadcastss %xmm9, %xmm1
1934 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} xmm3 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
1935 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} xmm1 = xmm8[0],xmm7[0],xmm8[1],xmm7[1]
1936 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,1,2,2]
1937 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1]
1938 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm3 = ymm1[0,1],ymm3[2,3],ymm1[4,5,6,7]
1939 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} xmm1 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
1940 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
1941 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
1942 ; AVX2-SLOW-NEXT: vbroadcastsd %xmm7, %ymm2
1943 ; AVX2-SLOW-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[2],ymm2[2]
1944 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm1[4,5,6],ymm3[7]
1945 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1946 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
1947 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm10[3,3],xmm12[3,3]
1948 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
1949 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
1950 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} xmm1 = xmm5[2],xmm9[2],xmm5[3],xmm9[3]
1951 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,1,2,2]
1952 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1]
1953 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5,6],ymm1[7]
1954 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
1955 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm3[2,2,2,2]
1956 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
1957 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm1 = xmm8[0,1,2],xmm1[3]
1958 ; AVX2-SLOW-NEXT: vbroadcastsd 8(%rax), %ymm2
1959 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
1960 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4],ymm0[5,6,7]
1961 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1962 ; AVX2-SLOW-NEXT: vbroadcastss %xmm9, %xmm0
1963 ; AVX2-SLOW-NEXT: vbroadcastss %xmm5, %xmm1
1964 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
1965 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} xmm1 = xmm10[0],xmm12[0],xmm10[1],xmm12[1]
1966 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,1,2,2]
1967 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1]
1968 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5,6,7]
1969 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} xmm1 = xmm8[0],xmm3[0],xmm8[1],xmm3[1]
1970 ; AVX2-SLOW-NEXT: vmovaps %xmm8, %xmm10
1971 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
1972 ; AVX2-SLOW-NEXT: vmovaps (%rsp), %xmm11 # 16-byte Reload
1973 ; AVX2-SLOW-NEXT: vbroadcastsd %xmm11, %ymm2
1974 ; AVX2-SLOW-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[2],ymm2[2]
1975 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6],ymm0[7]
1976 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1977 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm1 = ymm4[1,1],ymm6[1,1],ymm4[5,5],ymm6[5,5]
1978 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm2 = ymm15[1,1,1,1,5,5,5,5]
1979 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0],ymm14[1],ymm2[2,3,4],ymm14[5],ymm2[6,7]
1980 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[2,2,2,2]
1981 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3,4],ymm1[5,6],ymm2[7]
1982 ; AVX2-SLOW-NEXT: vpermilps $0, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
1983 ; AVX2-SLOW-NEXT: # ymm2 = mem[0,0,0,0,4,4,4,4]
1984 ; AVX2-SLOW-NEXT: vpermilps $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Folded Reload
1985 ; AVX2-SLOW-NEXT: # ymm3 = mem[0,1,0,1,4,5,4,5]
1986 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0],ymm2[1],ymm3[2,3,4],ymm2[5],ymm3[6,7]
1987 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[2,1,2,3]
1988 ; AVX2-SLOW-NEXT: vbroadcastsd 48(%rax), %ymm3
1989 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm3[2,3],ymm2[4,5,6,7]
1990 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm9 = ymm2[0,1,2],ymm1[3,4,5,6],ymm2[7]
1991 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
1992 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm2 = ymm12[1,1,1,1,5,5,5,5]
1993 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
1994 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0],ymm5[1],ymm2[2,3,4],ymm5[5],ymm2[6,7]
1995 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm2[2,2,2,2]
1996 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
1997 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm3 = ymm2[1,1],ymm13[1,1],ymm2[5,5],ymm13[5,5]
1998 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm8 = ymm0[0,1,2,3,4],ymm3[5,6],ymm0[7]
1999 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
2000 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm3 = ymm1[0,0,0,0,4,4,4,4]
2001 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2002 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm4 = ymm0[0,1,0,1,4,5,4,5]
2003 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0],ymm3[1],ymm4[2,3,4],ymm3[5],ymm4[6,7]
2004 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[2,1,2,3]
2005 ; AVX2-SLOW-NEXT: vbroadcastsd 16(%rax), %ymm4
2006 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm4[2,3],ymm3[4,5,6,7]
2007 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm8 = ymm3[0,1,2],ymm8[3,4,5,6],ymm3[7]
2008 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm3 = ymm13[2],ymm2[2],ymm13[3],ymm2[3],ymm13[6],ymm2[6],ymm13[7],ymm2[7]
2009 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[3,3,3,3]
2010 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm4 = ymm12[2],ymm5[2],ymm12[3],ymm5[3],ymm12[6],ymm5[6],ymm12[7],ymm5[7]
2011 ; AVX2-SLOW-NEXT: vmovaps %ymm12, %ymm2
2012 ; AVX2-SLOW-NEXT: vmovaps %ymm5, %ymm12
2013 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[3,3,3,3]
2014 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0,1,2],ymm3[3,4,5,6,7]
2015 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm4 = ymm0[3,3],ymm1[3,3],ymm0[7,7],ymm1[7,7]
2016 ; AVX2-SLOW-NEXT: vpermilps {{.*#+}} ymm5 = mem[2,3,2,3,6,7,6,7]
2017 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0],ymm4[1,2],ymm5[3,4],ymm4[5,6],ymm5[7]
2018 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[2,1,2,3]
2019 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0],ymm3[1,2,3,4],ymm4[5,6,7]
2020 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} ymm4 = ymm14[0],ymm15[0],ymm14[1],ymm15[1],ymm14[4],ymm15[4],ymm14[5],ymm15[5]
2021 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm5 = ymm6[3,1,2,0,7,5,6,4]
2022 ; AVX2-SLOW-NEXT: vbroadcastss 48(%rdx), %ymm6
2023 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3,4,5],ymm6[6],ymm5[7]
2024 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0,1,2,3],ymm4[4,5],ymm5[6,7]
2025 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2026 ; AVX2-SLOW-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm5 # 16-byte Folded Reload
2027 ; AVX2-SLOW-NEXT: # xmm5 = xmm0[3,3],mem[3,3]
2028 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm5 = xmm5[0,1,2],xmm7[3]
2029 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0],ymm5[1,2,3],ymm4[4,5,6,7]
2030 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} ymm5 = ymm12[0],ymm2[0],ymm12[1],ymm2[1],ymm12[4],ymm2[4],ymm12[5],ymm2[5]
2031 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm6 = ymm13[3,1,2,0,7,5,6,4]
2032 ; AVX2-SLOW-NEXT: vbroadcastss 16(%rdx), %ymm7
2033 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3,4,5],ymm7[6],ymm6[7]
2034 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm5 = ymm6[0,1,2,3],ymm5[4,5],ymm6[6,7]
2035 ; AVX2-SLOW-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm10, %xmm6 # 16-byte Folded Reload
2036 ; AVX2-SLOW-NEXT: # xmm6 = xmm10[3,3],mem[3,3]
2037 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm6 = xmm6[0,1,2],xmm11[3]
2038 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0],ymm6[1,2,3],ymm5[4,5,6,7]
2039 ; AVX2-SLOW-NEXT: movq {{[0-9]+}}(%rsp), %rax
2040 ; AVX2-SLOW-NEXT: vmovaps %ymm5, 96(%rax)
2041 ; AVX2-SLOW-NEXT: vmovaps %ymm4, 320(%rax)
2042 ; AVX2-SLOW-NEXT: vmovaps %ymm3, 192(%rax)
2043 ; AVX2-SLOW-NEXT: vmovaps %ymm8, 128(%rax)
2044 ; AVX2-SLOW-NEXT: vmovaps %ymm9, 352(%rax)
2045 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
2046 ; AVX2-SLOW-NEXT: vmovaps %ymm1, 160(%rax)
2047 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2048 ; AVX2-SLOW-NEXT: vmovaps %ymm0, (%rax)
2049 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2050 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 32(%rax)
2051 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2052 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 64(%rax)
2053 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2054 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 256(%rax)
2055 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2056 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 224(%rax)
2057 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2058 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 288(%rax)
2059 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2060 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 416(%rax)
2061 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2062 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 384(%rax)
2063 ; AVX2-SLOW-NEXT: addq $504, %rsp # imm = 0x1F8
2064 ; AVX2-SLOW-NEXT: vzeroupper
2065 ; AVX2-SLOW-NEXT: retq
2067 ; AVX2-FAST-LABEL: store_i32_stride7_vf16:
2068 ; AVX2-FAST: # %bb.0:
2069 ; AVX2-FAST-NEXT: subq $536, %rsp # imm = 0x218
2070 ; AVX2-FAST-NEXT: movq {{[0-9]+}}(%rsp), %rax
2071 ; AVX2-FAST-NEXT: vmovaps (%rax), %xmm2
2072 ; AVX2-FAST-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2073 ; AVX2-FAST-NEXT: vmovaps 32(%rax), %xmm0
2074 ; AVX2-FAST-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2075 ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
2076 ; AVX2-FAST-NEXT: vmovaps (%r8), %xmm7
2077 ; AVX2-FAST-NEXT: vmovaps %xmm7, (%rsp) # 16-byte Spill
2078 ; AVX2-FAST-NEXT: vmovaps 32(%r8), %xmm3
2079 ; AVX2-FAST-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2080 ; AVX2-FAST-NEXT: vmovaps (%r9), %xmm8
2081 ; AVX2-FAST-NEXT: vmovaps %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2082 ; AVX2-FAST-NEXT: vmovaps 32(%r9), %xmm4
2083 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm1 = xmm4[1,1,1,1]
2084 ; AVX2-FAST-NEXT: vmovaps %xmm4, %xmm14
2085 ; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0],xmm3[1],xmm1[2,3]
2086 ; AVX2-FAST-NEXT: vbroadcastsd %xmm1, %ymm1
2087 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5],ymm1[6,7]
2088 ; AVX2-FAST-NEXT: vmovaps (%rcx), %xmm11
2089 ; AVX2-FAST-NEXT: vmovaps %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2090 ; AVX2-FAST-NEXT: vmovaps 32(%rcx), %xmm9
2091 ; AVX2-FAST-NEXT: vmovaps 32(%rdx), %xmm6
2092 ; AVX2-FAST-NEXT: vinsertps {{.*#+}} xmm1 = zero,xmm6[1],xmm9[1],zero
2093 ; AVX2-FAST-NEXT: vmovaps (%rdi), %xmm12
2094 ; AVX2-FAST-NEXT: vmovaps %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2095 ; AVX2-FAST-NEXT: vmovaps 32(%rdi), %xmm5
2096 ; AVX2-FAST-NEXT: vmovaps (%rsi), %xmm10
2097 ; AVX2-FAST-NEXT: vmovaps %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2098 ; AVX2-FAST-NEXT: vmovaps 32(%rsi), %xmm3
2099 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm4 = xmm3[1,1,2,2]
2100 ; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm4 = xmm4[0,1],xmm5[2],xmm4[3]
2101 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,1,2,1]
2102 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm4[0],ymm1[1,2],ymm4[3,4,5,6,7]
2103 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
2104 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2105 ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
2106 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm1 = xmm8[1,1,1,1]
2107 ; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0],xmm7[1],xmm1[2,3]
2108 ; AVX2-FAST-NEXT: vbroadcastsd %xmm1, %ymm1
2109 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5],ymm1[6,7]
2110 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm1 = xmm10[1,1,2,2]
2111 ; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm12[2],xmm1[3]
2112 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1]
2113 ; AVX2-FAST-NEXT: vmovaps (%rdx), %xmm2
2114 ; AVX2-FAST-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2115 ; AVX2-FAST-NEXT: vinsertps {{.*#+}} xmm4 = zero,xmm2[1],xmm11[1],zero
2116 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm4[1,2],ymm1[3,4,5,6,7]
2117 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
2118 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2119 ; AVX2-FAST-NEXT: vmovaps (%rdi), %ymm0
2120 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2121 ; AVX2-FAST-NEXT: vmovaps (%rsi), %ymm1
2122 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2123 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
2124 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
2125 ; AVX2-FAST-NEXT: vmovaps (%rdx), %ymm1
2126 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2127 ; AVX2-FAST-NEXT: vmovaps (%rcx), %ymm2
2128 ; AVX2-FAST-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2129 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm1 = ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[6],ymm2[6],ymm1[7],ymm2[7]
2130 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
2131 ; AVX2-FAST-NEXT: vmovaps (%r8), %ymm2
2132 ; AVX2-FAST-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2133 ; AVX2-FAST-NEXT: vmovaps (%r9), %ymm1
2134 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2135 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm1 = ymm1[1,1,2,2,5,5,6,6]
2136 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4,5],ymm2[6],ymm1[7]
2137 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
2138 ; AVX2-FAST-NEXT: vmovaps 16(%rax), %xmm4
2139 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm4[1],ymm1[2,3,4,5,6,7]
2140 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
2141 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2142 ; AVX2-FAST-NEXT: vmovaps 32(%rdi), %ymm13
2143 ; AVX2-FAST-NEXT: vmovaps 32(%rsi), %ymm12
2144 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm0 = ymm13[2],ymm12[2],ymm13[3],ymm12[3],ymm13[6],ymm12[6],ymm13[7],ymm12[7]
2145 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm4 = ymm0[2,2,2,2]
2146 ; AVX2-FAST-NEXT: vmovaps 32(%rdx), %ymm2
2147 ; AVX2-FAST-NEXT: vmovaps 32(%rcx), %ymm11
2148 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm0 = ymm2[2],ymm11[2],ymm2[3],ymm11[3],ymm2[6],ymm11[6],ymm2[7],ymm11[7]
2149 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm0[4,5,6,7]
2150 ; AVX2-FAST-NEXT: vmovaps 32(%r8), %ymm10
2151 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm4[0,1,2,3,4,5],ymm10[6,7]
2152 ; AVX2-FAST-NEXT: vmovaps 32(%r9), %ymm4
2153 ; AVX2-FAST-NEXT: vbroadcastsd {{.*#+}} ymm15 = [5,6,5,6,5,6,5,6]
2154 ; AVX2-FAST-NEXT: vpermps %ymm4, %ymm15, %ymm15
2155 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm15[0],ymm1[1,2,3,4,5,6],ymm15[7]
2156 ; AVX2-FAST-NEXT: vperm2f128 {{.*#+}} ymm0 = mem[2,3],ymm0[2,3]
2157 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3,4],ymm0[5],ymm1[6,7]
2158 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2159 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm0 = ymm11[2],ymm2[2],ymm11[3],ymm2[3],ymm11[6],ymm2[6],ymm11[7],ymm2[7]
2160 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,3,3,3]
2161 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm1 = ymm12[2],ymm13[2],ymm12[3],ymm13[3],ymm12[6],ymm13[6],ymm12[7],ymm13[7]
2162 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[3,3,3,3]
2163 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
2164 ; AVX2-FAST-NEXT: vbroadcastss 60(%r8), %ymm1
2165 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
2166 ; AVX2-FAST-NEXT: vbroadcastss 60(%r9), %ymm1
2167 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
2168 ; AVX2-FAST-NEXT: vbroadcastsd 56(%rax), %ymm1
2169 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5,6],ymm1[7]
2170 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2171 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm0 = xmm5[3,3],xmm3[3,3]
2172 ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm1
2173 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} xmm15 = xmm6[2],xmm9[2],xmm6[3],xmm9[3]
2174 ; AVX2-FAST-NEXT: vbroadcastf128 {{.*#+}} ymm7 = [0,1,2,2,0,1,2,2]
2175 ; AVX2-FAST-NEXT: # ymm7 = mem[0,1,0,1]
2176 ; AVX2-FAST-NEXT: vpermps %ymm15, %ymm7, %ymm15
2177 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm15[0,1,2,3,4],ymm1[5,6],ymm15[7]
2178 ; AVX2-FAST-NEXT: vmovaps %xmm14, %xmm0
2179 ; AVX2-FAST-NEXT: vmovaps %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2180 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm15 = xmm14[2,2,2,2]
2181 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
2182 ; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm15 = xmm8[0,1,2],xmm15[3]
2183 ; AVX2-FAST-NEXT: vbroadcastsd 40(%rax), %ymm14
2184 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm14 = ymm15[0,1,2,3],ymm14[4,5,6,7]
2185 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm14[2,3,4],ymm1[5,6,7]
2186 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2187 ; AVX2-FAST-NEXT: vbroadcastss %xmm9, %xmm1
2188 ; AVX2-FAST-NEXT: vbroadcastss %xmm6, %xmm6
2189 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} xmm1 = xmm6[0],xmm1[0],xmm6[1],xmm1[1]
2190 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} xmm3 = xmm5[0],xmm3[0],xmm5[1],xmm3[1]
2191 ; AVX2-FAST-NEXT: vpermps %ymm3, %ymm7, %ymm3
2192 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm3[0,1],ymm1[2,3],ymm3[4,5,6,7]
2193 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} xmm3 = xmm8[0],xmm0[0],xmm8[1],xmm0[1]
2194 ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm3
2195 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
2196 ; AVX2-FAST-NEXT: vbroadcastsd %xmm15, %ymm5
2197 ; AVX2-FAST-NEXT: vunpcklpd {{.*#+}} ymm3 = ymm3[0],ymm5[0],ymm3[2],ymm5[2]
2198 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm3[4,5,6],ymm1[7]
2199 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2200 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm1 = ymm2[1,1],ymm11[1,1],ymm2[5,5],ymm11[5,5]
2201 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm2 = ymm12[1,1,1,1,5,5,5,5]
2202 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0],ymm13[1],ymm2[2,3,4],ymm13[5],ymm2[6,7]
2203 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[2,2,2,2]
2204 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3,4],ymm1[5,6],ymm2[7]
2205 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm2 = ymm4[0,0,0,0,4,4,4,4]
2206 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm3 = ymm10[0,1,0,1,4,5,4,5]
2207 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0],ymm2[1],ymm3[2,3,4],ymm2[5],ymm3[6,7]
2208 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[2,1,2,3]
2209 ; AVX2-FAST-NEXT: vbroadcastsd 48(%rax), %ymm3
2210 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm3[2,3],ymm2[4,5,6,7]
2211 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2],ymm1[3,4,5,6],ymm2[7]
2212 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2213 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
2214 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
2215 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm1 = xmm2[3,3],xmm6[3,3]
2216 ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
2217 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2218 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
2219 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} xmm3 = xmm9[2],xmm0[2],xmm9[3],xmm0[3]
2220 ; AVX2-FAST-NEXT: vpermps %ymm3, %ymm7, %ymm3
2221 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm3[0,1,2,3,4],ymm1[5,6],ymm3[7]
2222 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
2223 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm3 = xmm10[2,2,2,2]
2224 ; AVX2-FAST-NEXT: vmovaps (%rsp), %xmm14 # 16-byte Reload
2225 ; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm3 = xmm14[0,1,2],xmm3[3]
2226 ; AVX2-FAST-NEXT: vbroadcastsd 8(%rax), %ymm4
2227 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm4[4,5,6,7]
2228 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm3[2,3,4],ymm1[5,6,7]
2229 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2230 ; AVX2-FAST-NEXT: vbroadcastss %xmm0, %xmm3
2231 ; AVX2-FAST-NEXT: vbroadcastss %xmm9, %xmm4
2232 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
2233 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} xmm4 = xmm2[0],xmm6[0],xmm2[1],xmm6[1]
2234 ; AVX2-FAST-NEXT: vpermps %ymm4, %ymm7, %ymm0
2235 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm3[2,3],ymm0[4,5,6,7]
2236 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} xmm3 = xmm14[0],xmm10[0],xmm14[1],xmm10[1]
2237 ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm3
2238 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
2239 ; AVX2-FAST-NEXT: vbroadcastsd %xmm10, %ymm4
2240 ; AVX2-FAST-NEXT: vunpcklpd {{.*#+}} ymm3 = ymm3[0],ymm4[0],ymm3[2],ymm4[2]
2241 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm3[4,5,6],ymm0[7]
2242 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2243 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2244 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm3 = ymm0[1,1,1,1,5,5,5,5]
2245 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
2246 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0],ymm6[1],ymm3[2,3,4],ymm6[5],ymm3[6,7]
2247 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm2 = ymm3[2,2,2,2]
2248 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
2249 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
2250 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm4 = ymm3[1,1],ymm1[1,1],ymm3[5,5],ymm1[5,5]
2251 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm9 = ymm2[0,1,2,3,4],ymm4[5,6],ymm2[7]
2252 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
2253 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm4 = ymm7[0,0,0,0,4,4,4,4]
2254 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
2255 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm5 = ymm2[0,1,0,1,4,5,4,5]
2256 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0],ymm4[1],ymm5[2,3,4],ymm4[5],ymm5[6,7]
2257 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[2,1,2,3]
2258 ; AVX2-FAST-NEXT: vbroadcastsd 16(%rax), %ymm5
2259 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm5[2,3],ymm4[4,5,6,7]
2260 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm9 = ymm4[0,1,2],ymm9[3,4,5,6],ymm4[7]
2261 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm4 = ymm1[2],ymm3[2],ymm1[3],ymm3[3],ymm1[6],ymm3[6],ymm1[7],ymm3[7]
2262 ; AVX2-FAST-NEXT: vmovaps %ymm1, %ymm3
2263 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[3,3,3,3]
2264 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm5 = ymm0[2],ymm6[2],ymm0[3],ymm6[3],ymm0[6],ymm6[6],ymm0[7],ymm6[7]
2265 ; AVX2-FAST-NEXT: vmovaps %ymm0, %ymm1
2266 ; AVX2-FAST-NEXT: vmovaps %ymm6, %ymm0
2267 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[3,3,3,3]
2268 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0,1,2],ymm4[3,4,5,6,7]
2269 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm5 = ymm2[3,3],ymm7[3,3],ymm2[7,7],ymm7[7,7]
2270 ; AVX2-FAST-NEXT: vpermilps {{.*#+}} ymm6 = mem[2,3,2,3,6,7,6,7]
2271 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm5 = ymm6[0],ymm5[1,2],ymm6[3,4],ymm5[5,6],ymm6[7]
2272 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[2,1,2,3]
2273 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0],ymm4[1,2,3,4],ymm5[5,6,7]
2274 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} ymm5 = ymm13[0],ymm12[0],ymm13[1],ymm12[1],ymm13[4],ymm12[4],ymm13[5],ymm12[5]
2275 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm6 = ymm11[3,1,2,0,7,5,6,4]
2276 ; AVX2-FAST-NEXT: vbroadcastss 48(%rdx), %ymm7
2277 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3,4,5],ymm7[6],ymm6[7]
2278 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm5 = ymm6[0,1,2,3],ymm5[4,5],ymm6[6,7]
2279 ; AVX2-FAST-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8, %xmm6 # 16-byte Folded Reload
2280 ; AVX2-FAST-NEXT: # xmm6 = xmm8[3,3],mem[3,3]
2281 ; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm6 = xmm6[0,1,2],xmm15[3]
2282 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0],ymm6[1,2,3],ymm5[4,5,6,7]
2283 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} ymm6 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
2284 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm7 = ymm3[3,1,2,0,7,5,6,4]
2285 ; AVX2-FAST-NEXT: vbroadcastss 16(%rdx), %ymm8
2286 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3,4,5],ymm8[6],ymm7[7]
2287 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm6 = ymm7[0,1,2,3],ymm6[4,5],ymm7[6,7]
2288 ; AVX2-FAST-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm14, %xmm7 # 16-byte Folded Reload
2289 ; AVX2-FAST-NEXT: # xmm7 = xmm14[3,3],mem[3,3]
2290 ; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm7 = xmm7[0,1,2],xmm10[3]
2291 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0],ymm7[1,2,3],ymm6[4,5,6,7]
2292 ; AVX2-FAST-NEXT: movq {{[0-9]+}}(%rsp), %rax
2293 ; AVX2-FAST-NEXT: vmovaps %ymm6, 96(%rax)
2294 ; AVX2-FAST-NEXT: vmovaps %ymm5, 320(%rax)
2295 ; AVX2-FAST-NEXT: vmovaps %ymm4, 192(%rax)
2296 ; AVX2-FAST-NEXT: vmovaps %ymm9, 128(%rax)
2297 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2298 ; AVX2-FAST-NEXT: vmovaps %ymm0, (%rax)
2299 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2300 ; AVX2-FAST-NEXT: vmovaps %ymm0, 64(%rax)
2301 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2302 ; AVX2-FAST-NEXT: vmovaps %ymm0, 352(%rax)
2303 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2304 ; AVX2-FAST-NEXT: vmovaps %ymm0, 224(%rax)
2305 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2306 ; AVX2-FAST-NEXT: vmovaps %ymm0, 288(%rax)
2307 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2308 ; AVX2-FAST-NEXT: vmovaps %ymm0, 160(%rax)
2309 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2310 ; AVX2-FAST-NEXT: vmovaps %ymm0, 32(%rax)
2311 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2312 ; AVX2-FAST-NEXT: vmovaps %ymm0, 256(%rax)
2313 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2314 ; AVX2-FAST-NEXT: vmovaps %ymm0, 416(%rax)
2315 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2316 ; AVX2-FAST-NEXT: vmovaps %ymm0, 384(%rax)
2317 ; AVX2-FAST-NEXT: addq $536, %rsp # imm = 0x218
2318 ; AVX2-FAST-NEXT: vzeroupper
2319 ; AVX2-FAST-NEXT: retq
2321 ; AVX2-FAST-PERLANE-LABEL: store_i32_stride7_vf16:
2322 ; AVX2-FAST-PERLANE: # %bb.0:
2323 ; AVX2-FAST-PERLANE-NEXT: subq $504, %rsp # imm = 0x1F8
2324 ; AVX2-FAST-PERLANE-NEXT: movq {{[0-9]+}}(%rsp), %rax
2325 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rax), %xmm10
2326 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm10, (%rsp) # 16-byte Spill
2327 ; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%rax), %xmm0
2328 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2329 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
2330 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%r8), %xmm13
2331 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2332 ; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%r8), %xmm2
2333 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2334 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%r9), %xmm14
2335 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2336 ; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%r9), %xmm1
2337 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2338 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,1,1,1]
2339 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2,3]
2340 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd %xmm1, %ymm1
2341 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm4 = ymm1[0,1,2,3,4],ymm0[5],ymm1[6,7]
2342 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rcx), %xmm1
2343 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2344 ; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%rcx), %xmm11
2345 ; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%rdx), %xmm9
2346 ; AVX2-FAST-PERLANE-NEXT: vinsertps {{.*#+}} xmm5 = zero,xmm9[1],xmm11[1],zero
2347 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rdi), %xmm2
2348 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2349 ; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%rdi), %xmm8
2350 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rsi), %xmm12
2351 ; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%rsi), %xmm7
2352 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm6 = xmm7[1,1,2,2]
2353 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} xmm6 = xmm6[0,1],xmm8[2],xmm6[3]
2354 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[0,1,2,1]
2355 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm5 = ymm6[0],ymm5[1,2],ymm6[3,4,5,6,7]
2356 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm5[0,1,2],ymm4[3,4,5],ymm5[6,7]
2357 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2358 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm10, %ymm0, %ymm4
2359 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm5 = xmm14[1,1,1,1]
2360 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} xmm5 = xmm5[0],xmm13[1],xmm5[2,3]
2361 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd %xmm5, %ymm5
2362 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0,1,2,3,4],ymm4[5],ymm5[6,7]
2363 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm5 = xmm12[1,1,2,2]
2364 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} xmm5 = xmm5[0,1],xmm2[2],xmm5[3]
2365 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm6 = ymm5[0,1,2,1]
2366 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rdx), %xmm5
2367 ; AVX2-FAST-PERLANE-NEXT: vinsertps {{.*#+}} xmm10 = zero,xmm5[1],xmm1[1],zero
2368 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0],ymm10[1,2],ymm6[3,4,5,6,7]
2369 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm6[0,1,2],ymm4[3,4,5],ymm6[6,7]
2370 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2371 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rdi), %ymm0
2372 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2373 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rsi), %ymm1
2374 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2375 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm4 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
2376 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[2,2,2,2]
2377 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rdx), %ymm0
2378 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2379 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rcx), %ymm13
2380 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm6 = ymm0[2],ymm13[2],ymm0[3],ymm13[3],ymm0[6],ymm13[6],ymm0[7],ymm13[7]
2381 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm6[4,5,6,7]
2382 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%r8), %ymm0
2383 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2384 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%r9), %ymm1
2385 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2386 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm6 = ymm1[1,1,2,2,5,5,6,6]
2387 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1],ymm0[2],ymm6[3,4,5],ymm0[6],ymm6[7]
2388 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[2,1,2,3]
2389 ; AVX2-FAST-PERLANE-NEXT: vmovaps 16(%rax), %xmm14
2390 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0],ymm14[1],ymm6[2,3,4,5,6,7]
2391 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm6[0,1],ymm4[2,3,4,5],ymm6[6,7]
2392 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2393 ; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%rdi), %ymm14
2394 ; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%rsi), %ymm15
2395 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm4 = ymm14[2],ymm15[2],ymm14[3],ymm15[3],ymm14[6],ymm15[6],ymm14[7],ymm15[7]
2396 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm4[2,2,2,2]
2397 ; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%rdx), %ymm4
2398 ; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%rcx), %ymm6
2399 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm0 = ymm4[2],ymm6[2],ymm4[3],ymm6[3],ymm4[6],ymm6[6],ymm4[7],ymm6[7]
2400 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm0[4,5,6,7]
2401 ; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%r8), %ymm2
2402 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2403 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm2[6,7]
2404 ; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%r9), %ymm2
2405 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2406 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm10 = ymm2[1,2,2,3,5,6,6,7]
2407 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm10 = ymm10[2,2,2,2]
2408 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm10[0],ymm1[1,2,3,4,5,6],ymm10[7]
2409 ; AVX2-FAST-PERLANE-NEXT: vperm2f128 {{.*#+}} ymm0 = mem[2,3],ymm0[2,3]
2410 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3,4],ymm0[5],ymm1[6,7]
2411 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2412 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm0 = ymm6[2],ymm4[2],ymm6[3],ymm4[3],ymm6[6],ymm4[6],ymm6[7],ymm4[7]
2413 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,3,3,3]
2414 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm1 = ymm15[2],ymm14[2],ymm15[3],ymm14[3],ymm15[6],ymm14[6],ymm15[7],ymm14[7]
2415 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[3,3,3,3]
2416 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
2417 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss 60(%r8), %ymm1
2418 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
2419 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss 60(%r9), %ymm1
2420 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
2421 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 56(%rax), %ymm1
2422 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5,6],ymm1[7]
2423 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2424 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm0 = xmm8[3,3],xmm7[3,3]
2425 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
2426 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} xmm1 = xmm9[2],xmm11[2],xmm9[3],xmm11[3]
2427 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,1,2,2]
2428 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1]
2429 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm3 = ymm1[0,1,2,3,4],ymm0[5,6],ymm1[7]
2430 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
2431 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm1 = xmm2[2,2,2,2]
2432 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2433 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} xmm1 = xmm0[0,1,2],xmm1[3]
2434 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 40(%rax), %ymm10
2435 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm10[4,5,6,7]
2436 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm3[0,1],ymm1[2,3,4],ymm3[5,6,7]
2437 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2438 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss %xmm11, %xmm3
2439 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss %xmm9, %xmm1
2440 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} xmm3 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
2441 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} xmm1 = xmm8[0],xmm7[0],xmm8[1],xmm7[1]
2442 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,1,2,2]
2443 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1]
2444 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm3 = ymm1[0,1],ymm3[2,3],ymm1[4,5,6,7]
2445 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} xmm1 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
2446 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
2447 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
2448 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd %xmm7, %ymm2
2449 ; AVX2-FAST-PERLANE-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[2],ymm2[2]
2450 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm1[4,5,6],ymm3[7]
2451 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2452 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
2453 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm0 = xmm10[3,3],xmm12[3,3]
2454 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
2455 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
2456 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} xmm1 = xmm5[2],xmm9[2],xmm5[3],xmm9[3]
2457 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,1,2,2]
2458 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1]
2459 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5,6],ymm1[7]
2460 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
2461 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm1 = xmm3[2,2,2,2]
2462 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
2463 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} xmm1 = xmm8[0,1,2],xmm1[3]
2464 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 8(%rax), %ymm2
2465 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
2466 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4],ymm0[5,6,7]
2467 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2468 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss %xmm9, %xmm0
2469 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss %xmm5, %xmm1
2470 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
2471 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} xmm1 = xmm10[0],xmm12[0],xmm10[1],xmm12[1]
2472 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,1,2,2]
2473 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1]
2474 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5,6,7]
2475 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} xmm1 = xmm8[0],xmm3[0],xmm8[1],xmm3[1]
2476 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm8, %xmm10
2477 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
2478 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rsp), %xmm11 # 16-byte Reload
2479 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd %xmm11, %ymm2
2480 ; AVX2-FAST-PERLANE-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[2],ymm2[2]
2481 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6],ymm0[7]
2482 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2483 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm1 = ymm4[1,1],ymm6[1,1],ymm4[5,5],ymm6[5,5]
2484 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm2 = ymm15[1,1,1,1,5,5,5,5]
2485 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0],ymm14[1],ymm2[2,3,4],ymm14[5],ymm2[6,7]
2486 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[2,2,2,2]
2487 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3,4],ymm1[5,6],ymm2[7]
2488 ; AVX2-FAST-PERLANE-NEXT: vpermilps $0, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
2489 ; AVX2-FAST-PERLANE-NEXT: # ymm2 = mem[0,0,0,0,4,4,4,4]
2490 ; AVX2-FAST-PERLANE-NEXT: vpermilps $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Folded Reload
2491 ; AVX2-FAST-PERLANE-NEXT: # ymm3 = mem[0,1,0,1,4,5,4,5]
2492 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0],ymm2[1],ymm3[2,3,4],ymm2[5],ymm3[6,7]
2493 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[2,1,2,3]
2494 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 48(%rax), %ymm3
2495 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm3[2,3],ymm2[4,5,6,7]
2496 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm9 = ymm2[0,1,2],ymm1[3,4,5,6],ymm2[7]
2497 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
2498 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm2 = ymm12[1,1,1,1,5,5,5,5]
2499 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
2500 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0],ymm5[1],ymm2[2,3,4],ymm5[5],ymm2[6,7]
2501 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm2[2,2,2,2]
2502 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
2503 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm3 = ymm2[1,1],ymm13[1,1],ymm2[5,5],ymm13[5,5]
2504 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm8 = ymm0[0,1,2,3,4],ymm3[5,6],ymm0[7]
2505 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
2506 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm3 = ymm1[0,0,0,0,4,4,4,4]
2507 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2508 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm4 = ymm0[0,1,0,1,4,5,4,5]
2509 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0],ymm3[1],ymm4[2,3,4],ymm3[5],ymm4[6,7]
2510 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[2,1,2,3]
2511 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 16(%rax), %ymm4
2512 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm4[2,3],ymm3[4,5,6,7]
2513 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm8 = ymm3[0,1,2],ymm8[3,4,5,6],ymm3[7]
2514 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm3 = ymm13[2],ymm2[2],ymm13[3],ymm2[3],ymm13[6],ymm2[6],ymm13[7],ymm2[7]
2515 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[3,3,3,3]
2516 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm4 = ymm12[2],ymm5[2],ymm12[3],ymm5[3],ymm12[6],ymm5[6],ymm12[7],ymm5[7]
2517 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm12, %ymm2
2518 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm5, %ymm12
2519 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[3,3,3,3]
2520 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0,1,2],ymm3[3,4,5,6,7]
2521 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm4 = ymm0[3,3],ymm1[3,3],ymm0[7,7],ymm1[7,7]
2522 ; AVX2-FAST-PERLANE-NEXT: vpermilps {{.*#+}} ymm5 = mem[2,3,2,3,6,7,6,7]
2523 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0],ymm4[1,2],ymm5[3,4],ymm4[5,6],ymm5[7]
2524 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[2,1,2,3]
2525 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0],ymm3[1,2,3,4],ymm4[5,6,7]
2526 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} ymm4 = ymm14[0],ymm15[0],ymm14[1],ymm15[1],ymm14[4],ymm15[4],ymm14[5],ymm15[5]
2527 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm5 = ymm6[3,1,2,0,7,5,6,4]
2528 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss 48(%rdx), %ymm6
2529 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3,4,5],ymm6[6],ymm5[7]
2530 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0,1,2,3],ymm4[4,5],ymm5[6,7]
2531 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2532 ; AVX2-FAST-PERLANE-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm5 # 16-byte Folded Reload
2533 ; AVX2-FAST-PERLANE-NEXT: # xmm5 = xmm0[3,3],mem[3,3]
2534 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} xmm5 = xmm5[0,1,2],xmm7[3]
2535 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0],ymm5[1,2,3],ymm4[4,5,6,7]
2536 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} ymm5 = ymm12[0],ymm2[0],ymm12[1],ymm2[1],ymm12[4],ymm2[4],ymm12[5],ymm2[5]
2537 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm6 = ymm13[3,1,2,0,7,5,6,4]
2538 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss 16(%rdx), %ymm7
2539 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3,4,5],ymm7[6],ymm6[7]
2540 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm5 = ymm6[0,1,2,3],ymm5[4,5],ymm6[6,7]
2541 ; AVX2-FAST-PERLANE-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm10, %xmm6 # 16-byte Folded Reload
2542 ; AVX2-FAST-PERLANE-NEXT: # xmm6 = xmm10[3,3],mem[3,3]
2543 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} xmm6 = xmm6[0,1,2],xmm11[3]
2544 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0],ymm6[1,2,3],ymm5[4,5,6,7]
2545 ; AVX2-FAST-PERLANE-NEXT: movq {{[0-9]+}}(%rsp), %rax
2546 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm5, 96(%rax)
2547 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm4, 320(%rax)
2548 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm3, 192(%rax)
2549 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm8, 128(%rax)
2550 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm9, 352(%rax)
2551 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
2552 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm1, 160(%rax)
2553 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2554 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, (%rax)
2555 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2556 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 32(%rax)
2557 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2558 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 64(%rax)
2559 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2560 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 256(%rax)
2561 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2562 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 224(%rax)
2563 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2564 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 288(%rax)
2565 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2566 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 416(%rax)
2567 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2568 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 384(%rax)
2569 ; AVX2-FAST-PERLANE-NEXT: addq $504, %rsp # imm = 0x1F8
2570 ; AVX2-FAST-PERLANE-NEXT: vzeroupper
2571 ; AVX2-FAST-PERLANE-NEXT: retq
2573 ; AVX512F-LABEL: store_i32_stride7_vf16:
2575 ; AVX512F-NEXT: movq {{[0-9]+}}(%rsp), %rax
2576 ; AVX512F-NEXT: movq {{[0-9]+}}(%rsp), %r10
2577 ; AVX512F-NEXT: vmovdqa64 (%rdi), %zmm4
2578 ; AVX512F-NEXT: vmovdqa64 (%rsi), %zmm6
2579 ; AVX512F-NEXT: vmovdqa64 (%rdx), %zmm3
2580 ; AVX512F-NEXT: vmovdqa64 (%rcx), %zmm5
2581 ; AVX512F-NEXT: vmovdqa64 (%r8), %zmm1
2582 ; AVX512F-NEXT: vmovdqa64 (%r9), %zmm2
2583 ; AVX512F-NEXT: vmovdqa64 (%r10), %zmm0
2584 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm7 = <u,u,u,u,14,30,u,u,u,u,u,15,31,u,u,u>
2585 ; AVX512F-NEXT: vpermi2d %zmm5, %zmm3, %zmm7
2586 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm8 = <u,u,14,30,u,u,u,u,u,15,31,u,u,u,u,u>
2587 ; AVX512F-NEXT: vpermi2d %zmm6, %zmm4, %zmm8
2588 ; AVX512F-NEXT: movw $6192, %cx # imm = 0x1830
2589 ; AVX512F-NEXT: kmovw %ecx, %k1
2590 ; AVX512F-NEXT: vmovdqa32 %zmm7, %zmm8 {%k1}
2591 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm7 = <13,u,u,u,u,u,30,14,u,u,u,u,u,31,15,u>
2592 ; AVX512F-NEXT: vpermi2d %zmm1, %zmm2, %zmm7
2593 ; AVX512F-NEXT: movw $24769, %cx # imm = 0x60C1
2594 ; AVX512F-NEXT: kmovw %ecx, %k2
2595 ; AVX512F-NEXT: vmovdqa32 %zmm7, %zmm8 {%k2}
2596 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm7 = [0,29,2,3,4,5,6,7,30,9,10,11,12,13,14,31]
2597 ; AVX512F-NEXT: vpermi2d %zmm0, %zmm8, %zmm7
2598 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm9 = <u,u,0,16,u,u,u,u,u,1,17,u,u,u,u,u>
2599 ; AVX512F-NEXT: vpermi2d %zmm5, %zmm3, %zmm9
2600 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm8 = <0,16,u,u,u,u,u,1,17,u,u,u,u,u,2,18>
2601 ; AVX512F-NEXT: vpermi2d %zmm6, %zmm4, %zmm8
2602 ; AVX512F-NEXT: movw $1548, %cx # imm = 0x60C
2603 ; AVX512F-NEXT: kmovw %ecx, %k2
2604 ; AVX512F-NEXT: vmovdqa32 %zmm9, %zmm8 {%k2}
2605 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm9 = <u,u,u,u,0,16,u,u,u,u,u,1,17,u,u,u>
2606 ; AVX512F-NEXT: vpermi2d %zmm2, %zmm1, %zmm9
2607 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm10 = <u,u,u,u,4,5,16,u,u,u,u,11,12,17,u,u>
2608 ; AVX512F-NEXT: vpermi2d %zmm0, %zmm9, %zmm10
2609 ; AVX512F-NEXT: movw $14448, %cx # imm = 0x3870
2610 ; AVX512F-NEXT: kmovw %ecx, %k2
2611 ; AVX512F-NEXT: vmovdqa32 %zmm10, %zmm8 {%k2}
2612 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm10 = <u,u,u,u,u,3,19,u,u,u,u,u,4,20,u,u>
2613 ; AVX512F-NEXT: vpermi2d %zmm6, %zmm4, %zmm10
2614 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm9 = <2,18,u,u,u,u,u,3,19,u,u,u,u,u,4,20>
2615 ; AVX512F-NEXT: vpermi2d %zmm5, %zmm3, %zmm9
2616 ; AVX512F-NEXT: movw $12384, %cx # imm = 0x3060
2617 ; AVX512F-NEXT: kmovw %ecx, %k2
2618 ; AVX512F-NEXT: vmovdqa32 %zmm10, %zmm9 {%k2}
2619 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm10 = <u,u,2,18,u,u,u,u,u,3,19,u,u,u,u,u>
2620 ; AVX512F-NEXT: vpermi2d %zmm2, %zmm1, %zmm10
2621 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm11 = <u,u,2,3,18,u,u,u,u,9,10,19,u,u,u,u>
2622 ; AVX512F-NEXT: vpermi2d %zmm0, %zmm10, %zmm11
2623 ; AVX512F-NEXT: movw $3612, %cx # imm = 0xE1C
2624 ; AVX512F-NEXT: kmovw %ecx, %k3
2625 ; AVX512F-NEXT: vmovdqa32 %zmm11, %zmm9 {%k3}
2626 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm10 = <u,u,u,u,u,5,21,u,u,u,u,u,6,22,u,u>
2627 ; AVX512F-NEXT: vpermi2d %zmm5, %zmm3, %zmm10
2628 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm11 = <u,u,u,5,21,u,u,u,u,u,6,22,u,u,u,u>
2629 ; AVX512F-NEXT: vpermi2d %zmm6, %zmm4, %zmm11
2630 ; AVX512F-NEXT: vmovdqa32 %zmm10, %zmm11 {%k2}
2631 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm12 = <4,20,u,u,u,u,u,5,21,u,u,u,u,u,6,22>
2632 ; AVX512F-NEXT: vpermi2d %zmm2, %zmm1, %zmm12
2633 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm10 = <0,1,20,u,u,u,u,7,8,21,u,u,u,u,14,15>
2634 ; AVX512F-NEXT: vpermi2d %zmm0, %zmm12, %zmm10
2635 ; AVX512F-NEXT: movw $15480, %cx # imm = 0x3C78
2636 ; AVX512F-NEXT: kmovw %ecx, %k2
2637 ; AVX512F-NEXT: vmovdqa32 %zmm11, %zmm10 {%k2}
2638 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm11 = <u,u,u,7,23,u,u,u,u,u,8,24,u,u,u,u>
2639 ; AVX512F-NEXT: vpermi2d %zmm5, %zmm3, %zmm11
2640 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm12 = <u,7,23,u,u,u,u,u,8,24,u,u,u,u,u,9>
2641 ; AVX512F-NEXT: vpermi2d %zmm6, %zmm4, %zmm12
2642 ; AVX512F-NEXT: movw $3096, %cx # imm = 0xC18
2643 ; AVX512F-NEXT: kmovw %ecx, %k2
2644 ; AVX512F-NEXT: vmovdqa32 %zmm11, %zmm12 {%k2}
2645 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm11 = <u,u,u,u,u,7,23,u,u,u,u,u,8,24,u,u>
2646 ; AVX512F-NEXT: vpermi2d %zmm2, %zmm1, %zmm11
2647 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm13 = <22,u,u,u,u,5,6,23,u,u,u,u,12,13,24,u>
2648 ; AVX512F-NEXT: vpermi2d %zmm0, %zmm11, %zmm13
2649 ; AVX512F-NEXT: movw $28897, %cx # imm = 0x70E1
2650 ; AVX512F-NEXT: kmovw %ecx, %k2
2651 ; AVX512F-NEXT: vmovdqa32 %zmm13, %zmm12 {%k2}
2652 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm11 = <u,9,25,u,u,u,u,u,10,26,u,u,u,u,u,11>
2653 ; AVX512F-NEXT: vpermi2d %zmm5, %zmm3, %zmm11
2654 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm13 = <9,u,u,u,u,u,26,10,u,u,u,u,u,27,11,u>
2655 ; AVX512F-NEXT: vpermi2d %zmm4, %zmm6, %zmm13
2656 ; AVX512F-NEXT: movw $-31994, %cx # imm = 0x8306
2657 ; AVX512F-NEXT: kmovw %ecx, %k2
2658 ; AVX512F-NEXT: vmovdqa32 %zmm11, %zmm13 {%k2}
2659 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm11 = <u,u,u,9,25,u,u,u,u,u,10,26,u,u,u,u>
2660 ; AVX512F-NEXT: vpermi2d %zmm2, %zmm1, %zmm11
2661 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm14 = <u,u,u,3,4,25,u,u,u,u,10,11,26,u,u,u>
2662 ; AVX512F-NEXT: vpermi2d %zmm0, %zmm11, %zmm14
2663 ; AVX512F-NEXT: movw $7224, %cx # imm = 0x1C38
2664 ; AVX512F-NEXT: kmovw %ecx, %k2
2665 ; AVX512F-NEXT: vmovdqa32 %zmm14, %zmm13 {%k2}
2666 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm11 = <u,u,u,u,12,28,u,u,u,u,u,13,29,u,u,u>
2667 ; AVX512F-NEXT: vpermi2d %zmm6, %zmm4, %zmm11
2668 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm4 = <11,u,u,u,u,u,28,12,u,u,u,u,u,29,13,u>
2669 ; AVX512F-NEXT: vpermi2d %zmm3, %zmm5, %zmm4
2670 ; AVX512F-NEXT: vmovdqa32 %zmm11, %zmm4 {%k1}
2671 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm3 = <u,11,27,u,u,u,u,u,12,28,u,u,u,u,u,13>
2672 ; AVX512F-NEXT: vpermi2d %zmm2, %zmm1, %zmm3
2673 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm1 = <u,1,2,27,u,u,u,u,8,9,28,u,u,u,u,15>
2674 ; AVX512F-NEXT: vpermi2d %zmm0, %zmm3, %zmm1
2675 ; AVX512F-NEXT: movw $-30962, %cx # imm = 0x870E
2676 ; AVX512F-NEXT: kmovw %ecx, %k1
2677 ; AVX512F-NEXT: vmovdqa32 %zmm1, %zmm4 {%k1}
2678 ; AVX512F-NEXT: vmovdqa64 %zmm4, 320(%rax)
2679 ; AVX512F-NEXT: vmovdqa64 %zmm13, 256(%rax)
2680 ; AVX512F-NEXT: vmovdqa64 %zmm12, 192(%rax)
2681 ; AVX512F-NEXT: vmovdqa64 %zmm10, 128(%rax)
2682 ; AVX512F-NEXT: vmovdqa64 %zmm9, 64(%rax)
2683 ; AVX512F-NEXT: vmovdqa64 %zmm8, (%rax)
2684 ; AVX512F-NEXT: vmovdqa64 %zmm7, 384(%rax)
2685 ; AVX512F-NEXT: vzeroupper
2686 ; AVX512F-NEXT: retq
2688 ; AVX512BW-LABEL: store_i32_stride7_vf16:
2689 ; AVX512BW: # %bb.0:
2690 ; AVX512BW-NEXT: movq {{[0-9]+}}(%rsp), %rax
2691 ; AVX512BW-NEXT: movq {{[0-9]+}}(%rsp), %r10
2692 ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm4
2693 ; AVX512BW-NEXT: vmovdqa64 (%rsi), %zmm6
2694 ; AVX512BW-NEXT: vmovdqa64 (%rdx), %zmm3
2695 ; AVX512BW-NEXT: vmovdqa64 (%rcx), %zmm5
2696 ; AVX512BW-NEXT: vmovdqa64 (%r8), %zmm1
2697 ; AVX512BW-NEXT: vmovdqa64 (%r9), %zmm2
2698 ; AVX512BW-NEXT: vmovdqa64 (%r10), %zmm0
2699 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm7 = <u,u,u,u,14,30,u,u,u,u,u,15,31,u,u,u>
2700 ; AVX512BW-NEXT: vpermi2d %zmm5, %zmm3, %zmm7
2701 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm8 = <u,u,14,30,u,u,u,u,u,15,31,u,u,u,u,u>
2702 ; AVX512BW-NEXT: vpermi2d %zmm6, %zmm4, %zmm8
2703 ; AVX512BW-NEXT: movw $6192, %cx # imm = 0x1830
2704 ; AVX512BW-NEXT: kmovd %ecx, %k1
2705 ; AVX512BW-NEXT: vmovdqa32 %zmm7, %zmm8 {%k1}
2706 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm7 = <13,u,u,u,u,u,30,14,u,u,u,u,u,31,15,u>
2707 ; AVX512BW-NEXT: vpermi2d %zmm1, %zmm2, %zmm7
2708 ; AVX512BW-NEXT: movw $24769, %cx # imm = 0x60C1
2709 ; AVX512BW-NEXT: kmovd %ecx, %k2
2710 ; AVX512BW-NEXT: vmovdqa32 %zmm7, %zmm8 {%k2}
2711 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm7 = [0,29,2,3,4,5,6,7,30,9,10,11,12,13,14,31]
2712 ; AVX512BW-NEXT: vpermi2d %zmm0, %zmm8, %zmm7
2713 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm9 = <u,u,0,16,u,u,u,u,u,1,17,u,u,u,u,u>
2714 ; AVX512BW-NEXT: vpermi2d %zmm5, %zmm3, %zmm9
2715 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm8 = <0,16,u,u,u,u,u,1,17,u,u,u,u,u,2,18>
2716 ; AVX512BW-NEXT: vpermi2d %zmm6, %zmm4, %zmm8
2717 ; AVX512BW-NEXT: movw $1548, %cx # imm = 0x60C
2718 ; AVX512BW-NEXT: kmovd %ecx, %k2
2719 ; AVX512BW-NEXT: vmovdqa32 %zmm9, %zmm8 {%k2}
2720 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm9 = <u,u,u,u,0,16,u,u,u,u,u,1,17,u,u,u>
2721 ; AVX512BW-NEXT: vpermi2d %zmm2, %zmm1, %zmm9
2722 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm10 = <u,u,u,u,4,5,16,u,u,u,u,11,12,17,u,u>
2723 ; AVX512BW-NEXT: vpermi2d %zmm0, %zmm9, %zmm10
2724 ; AVX512BW-NEXT: movw $14448, %cx # imm = 0x3870
2725 ; AVX512BW-NEXT: kmovd %ecx, %k2
2726 ; AVX512BW-NEXT: vmovdqa32 %zmm10, %zmm8 {%k2}
2727 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm10 = <u,u,u,u,u,3,19,u,u,u,u,u,4,20,u,u>
2728 ; AVX512BW-NEXT: vpermi2d %zmm6, %zmm4, %zmm10
2729 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm9 = <2,18,u,u,u,u,u,3,19,u,u,u,u,u,4,20>
2730 ; AVX512BW-NEXT: vpermi2d %zmm5, %zmm3, %zmm9
2731 ; AVX512BW-NEXT: movw $12384, %cx # imm = 0x3060
2732 ; AVX512BW-NEXT: kmovd %ecx, %k2
2733 ; AVX512BW-NEXT: vmovdqa32 %zmm10, %zmm9 {%k2}
2734 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm10 = <u,u,2,18,u,u,u,u,u,3,19,u,u,u,u,u>
2735 ; AVX512BW-NEXT: vpermi2d %zmm2, %zmm1, %zmm10
2736 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm11 = <u,u,2,3,18,u,u,u,u,9,10,19,u,u,u,u>
2737 ; AVX512BW-NEXT: vpermi2d %zmm0, %zmm10, %zmm11
2738 ; AVX512BW-NEXT: movw $3612, %cx # imm = 0xE1C
2739 ; AVX512BW-NEXT: kmovd %ecx, %k3
2740 ; AVX512BW-NEXT: vmovdqa32 %zmm11, %zmm9 {%k3}
2741 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm10 = <u,u,u,u,u,5,21,u,u,u,u,u,6,22,u,u>
2742 ; AVX512BW-NEXT: vpermi2d %zmm5, %zmm3, %zmm10
2743 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm11 = <u,u,u,5,21,u,u,u,u,u,6,22,u,u,u,u>
2744 ; AVX512BW-NEXT: vpermi2d %zmm6, %zmm4, %zmm11
2745 ; AVX512BW-NEXT: vmovdqa32 %zmm10, %zmm11 {%k2}
2746 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm12 = <4,20,u,u,u,u,u,5,21,u,u,u,u,u,6,22>
2747 ; AVX512BW-NEXT: vpermi2d %zmm2, %zmm1, %zmm12
2748 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm10 = <0,1,20,u,u,u,u,7,8,21,u,u,u,u,14,15>
2749 ; AVX512BW-NEXT: vpermi2d %zmm0, %zmm12, %zmm10
2750 ; AVX512BW-NEXT: movw $15480, %cx # imm = 0x3C78
2751 ; AVX512BW-NEXT: kmovd %ecx, %k2
2752 ; AVX512BW-NEXT: vmovdqa32 %zmm11, %zmm10 {%k2}
2753 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm11 = <u,u,u,7,23,u,u,u,u,u,8,24,u,u,u,u>
2754 ; AVX512BW-NEXT: vpermi2d %zmm5, %zmm3, %zmm11
2755 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm12 = <u,7,23,u,u,u,u,u,8,24,u,u,u,u,u,9>
2756 ; AVX512BW-NEXT: vpermi2d %zmm6, %zmm4, %zmm12
2757 ; AVX512BW-NEXT: movw $3096, %cx # imm = 0xC18
2758 ; AVX512BW-NEXT: kmovd %ecx, %k2
2759 ; AVX512BW-NEXT: vmovdqa32 %zmm11, %zmm12 {%k2}
2760 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm11 = <u,u,u,u,u,7,23,u,u,u,u,u,8,24,u,u>
2761 ; AVX512BW-NEXT: vpermi2d %zmm2, %zmm1, %zmm11
2762 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm13 = <22,u,u,u,u,5,6,23,u,u,u,u,12,13,24,u>
2763 ; AVX512BW-NEXT: vpermi2d %zmm0, %zmm11, %zmm13
2764 ; AVX512BW-NEXT: movw $28897, %cx # imm = 0x70E1
2765 ; AVX512BW-NEXT: kmovd %ecx, %k2
2766 ; AVX512BW-NEXT: vmovdqa32 %zmm13, %zmm12 {%k2}
2767 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm11 = <u,9,25,u,u,u,u,u,10,26,u,u,u,u,u,11>
2768 ; AVX512BW-NEXT: vpermi2d %zmm5, %zmm3, %zmm11
2769 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm13 = <9,u,u,u,u,u,26,10,u,u,u,u,u,27,11,u>
2770 ; AVX512BW-NEXT: vpermi2d %zmm4, %zmm6, %zmm13
2771 ; AVX512BW-NEXT: movw $-31994, %cx # imm = 0x8306
2772 ; AVX512BW-NEXT: kmovd %ecx, %k2
2773 ; AVX512BW-NEXT: vmovdqa32 %zmm11, %zmm13 {%k2}
2774 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm11 = <u,u,u,9,25,u,u,u,u,u,10,26,u,u,u,u>
2775 ; AVX512BW-NEXT: vpermi2d %zmm2, %zmm1, %zmm11
2776 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm14 = <u,u,u,3,4,25,u,u,u,u,10,11,26,u,u,u>
2777 ; AVX512BW-NEXT: vpermi2d %zmm0, %zmm11, %zmm14
2778 ; AVX512BW-NEXT: movw $7224, %cx # imm = 0x1C38
2779 ; AVX512BW-NEXT: kmovd %ecx, %k2
2780 ; AVX512BW-NEXT: vmovdqa32 %zmm14, %zmm13 {%k2}
2781 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm11 = <u,u,u,u,12,28,u,u,u,u,u,13,29,u,u,u>
2782 ; AVX512BW-NEXT: vpermi2d %zmm6, %zmm4, %zmm11
2783 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm4 = <11,u,u,u,u,u,28,12,u,u,u,u,u,29,13,u>
2784 ; AVX512BW-NEXT: vpermi2d %zmm3, %zmm5, %zmm4
2785 ; AVX512BW-NEXT: vmovdqa32 %zmm11, %zmm4 {%k1}
2786 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm3 = <u,11,27,u,u,u,u,u,12,28,u,u,u,u,u,13>
2787 ; AVX512BW-NEXT: vpermi2d %zmm2, %zmm1, %zmm3
2788 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm1 = <u,1,2,27,u,u,u,u,8,9,28,u,u,u,u,15>
2789 ; AVX512BW-NEXT: vpermi2d %zmm0, %zmm3, %zmm1
2790 ; AVX512BW-NEXT: movw $-30962, %cx # imm = 0x870E
2791 ; AVX512BW-NEXT: kmovd %ecx, %k1
2792 ; AVX512BW-NEXT: vmovdqa32 %zmm1, %zmm4 {%k1}
2793 ; AVX512BW-NEXT: vmovdqa64 %zmm4, 320(%rax)
2794 ; AVX512BW-NEXT: vmovdqa64 %zmm13, 256(%rax)
2795 ; AVX512BW-NEXT: vmovdqa64 %zmm12, 192(%rax)
2796 ; AVX512BW-NEXT: vmovdqa64 %zmm10, 128(%rax)
2797 ; AVX512BW-NEXT: vmovdqa64 %zmm9, 64(%rax)
2798 ; AVX512BW-NEXT: vmovdqa64 %zmm8, (%rax)
2799 ; AVX512BW-NEXT: vmovdqa64 %zmm7, 384(%rax)
2800 ; AVX512BW-NEXT: vzeroupper
2801 ; AVX512BW-NEXT: retq
2802 %in.vec0 = load <16 x i32>, ptr %in.vecptr0, align 64
2803 %in.vec1 = load <16 x i32>, ptr %in.vecptr1, align 64
2804 %in.vec2 = load <16 x i32>, ptr %in.vecptr2, align 64
2805 %in.vec3 = load <16 x i32>, ptr %in.vecptr3, align 64
2806 %in.vec4 = load <16 x i32>, ptr %in.vecptr4, align 64
2807 %in.vec5 = load <16 x i32>, ptr %in.vecptr5, align 64
2808 %in.vec6 = load <16 x i32>, ptr %in.vecptr6, align 64
2809 %1 = shufflevector <16 x i32> %in.vec0, <16 x i32> %in.vec1, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
2810 %2 = shufflevector <16 x i32> %in.vec2, <16 x i32> %in.vec3, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
2811 %3 = shufflevector <16 x i32> %in.vec4, <16 x i32> %in.vec5, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
2812 %4 = shufflevector <32 x i32> %1, <32 x i32> %2, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
2813 %5 = shufflevector <16 x i32> %in.vec6, <16 x i32> poison, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
2814 %6 = shufflevector <32 x i32> %3, <32 x i32> %5, <48 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47>
2815 %7 = shufflevector <48 x i32> %6, <48 x i32> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
2816 %8 = shufflevector <64 x i32> %4, <64 x i32> %7, <112 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95, i32 96, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111>
2817 %interleaved.vec = shufflevector <112 x i32> %8, <112 x i32> poison, <112 x i32> <i32 0, i32 16, i32 32, i32 48, i32 64, i32 80, i32 96, i32 1, i32 17, i32 33, i32 49, i32 65, i32 81, i32 97, i32 2, i32 18, i32 34, i32 50, i32 66, i32 82, i32 98, i32 3, i32 19, i32 35, i32 51, i32 67, i32 83, i32 99, i32 4, i32 20, i32 36, i32 52, i32 68, i32 84, i32 100, i32 5, i32 21, i32 37, i32 53, i32 69, i32 85, i32 101, i32 6, i32 22, i32 38, i32 54, i32 70, i32 86, i32 102, i32 7, i32 23, i32 39, i32 55, i32 71, i32 87, i32 103, i32 8, i32 24, i32 40, i32 56, i32 72, i32 88, i32 104, i32 9, i32 25, i32 41, i32 57, i32 73, i32 89, i32 105, i32 10, i32 26, i32 42, i32 58, i32 74, i32 90, i32 106, i32 11, i32 27, i32 43, i32 59, i32 75, i32 91, i32 107, i32 12, i32 28, i32 44, i32 60, i32 76, i32 92, i32 108, i32 13, i32 29, i32 45, i32 61, i32 77, i32 93, i32 109, i32 14, i32 30, i32 46, i32 62, i32 78, i32 94, i32 110, i32 15, i32 31, i32 47, i32 63, i32 79, i32 95, i32 111>
2818 store <112 x i32> %interleaved.vec, ptr %out.vec, align 64
2822 define void @store_i32_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecptr2, ptr %in.vecptr3, ptr %in.vecptr4, ptr %in.vecptr5, ptr %in.vecptr6, ptr %out.vec) nounwind {
2823 ; SSE-LABEL: store_i32_stride7_vf32:
2825 ; SSE-NEXT: subq $1256, %rsp # imm = 0x4E8
2826 ; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax
2827 ; SSE-NEXT: movdqa (%rdi), %xmm4
2828 ; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2829 ; SSE-NEXT: movdqa (%rsi), %xmm7
2830 ; SSE-NEXT: movdqa 16(%rsi), %xmm5
2831 ; SSE-NEXT: movaps (%rdx), %xmm9
2832 ; SSE-NEXT: movdqa 16(%rdx), %xmm12
2833 ; SSE-NEXT: movdqa %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2834 ; SSE-NEXT: movaps (%rcx), %xmm10
2835 ; SSE-NEXT: movaps 16(%rcx), %xmm13
2836 ; SSE-NEXT: movaps %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2837 ; SSE-NEXT: movaps (%r8), %xmm0
2838 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2839 ; SSE-NEXT: movaps 16(%r8), %xmm14
2840 ; SSE-NEXT: movaps %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2841 ; SSE-NEXT: movdqa (%r9), %xmm2
2842 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2843 ; SSE-NEXT: movdqa 16(%r9), %xmm15
2844 ; SSE-NEXT: movdqa %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2845 ; SSE-NEXT: movdqa (%rax), %xmm11
2846 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm10[1,1]
2847 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm7[1,1,1,1]
2848 ; SSE-NEXT: movaps %xmm9, %xmm3
2849 ; SSE-NEXT: movss {{.*#+}} xmm3 = xmm1[0],xmm3[1,2,3]
2850 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,1],xmm0[2,0]
2851 ; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2852 ; SSE-NEXT: movdqa %xmm4, %xmm0
2853 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm7[2],xmm0[3],xmm7[3]
2854 ; SSE-NEXT: movdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2855 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm11[1,1,1,1]
2856 ; SSE-NEXT: movdqa %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2857 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,1,1,1]
2858 ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
2859 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm0[0]
2860 ; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2861 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm12[1,1,1,1]
2862 ; SSE-NEXT: movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2863 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm5[1,1,1,1]
2864 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
2865 ; SSE-NEXT: movaps %xmm14, %xmm0
2866 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm13[1,1]
2867 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,0]
2868 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2869 ; SSE-NEXT: movdqa 16(%rax), %xmm0
2870 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2871 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
2872 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm15[1,1,1,1]
2873 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
2874 ; SSE-NEXT: movdqa 16(%rdi), %xmm14
2875 ; SSE-NEXT: movdqa %xmm14, %xmm0
2876 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm5[2],xmm0[3],xmm5[3]
2877 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
2878 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2879 ; SSE-NEXT: movdqa 32(%rsi), %xmm1
2880 ; SSE-NEXT: movaps 32(%rdx), %xmm2
2881 ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2882 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1]
2883 ; SSE-NEXT: movdqa %xmm1, %xmm8
2884 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2885 ; SSE-NEXT: movaps %xmm2, %xmm1
2886 ; SSE-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
2887 ; SSE-NEXT: movaps 32(%rcx), %xmm2
2888 ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2889 ; SSE-NEXT: movaps 32(%r8), %xmm0
2890 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2891 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm2[1,1]
2892 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,0]
2893 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2894 ; SSE-NEXT: movdqa 32(%r9), %xmm1
2895 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2896 ; SSE-NEXT: movdqa 32(%rax), %xmm0
2897 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2898 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
2899 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,1,1]
2900 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
2901 ; SSE-NEXT: movdqa 32(%rdi), %xmm6
2902 ; SSE-NEXT: movdqa %xmm6, %xmm0
2903 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm8[2],xmm0[3],xmm8[3]
2904 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
2905 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2906 ; SSE-NEXT: movdqa 48(%rsi), %xmm8
2907 ; SSE-NEXT: movdqa 48(%rdx), %xmm0
2908 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2909 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
2910 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm8[1,1,1,1]
2911 ; SSE-NEXT: movdqa %xmm8, %xmm12
2912 ; SSE-NEXT: movdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2913 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
2914 ; SSE-NEXT: movaps 48(%rcx), %xmm13
2915 ; SSE-NEXT: movaps 48(%r8), %xmm0
2916 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2917 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm13[1,1]
2918 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,0]
2919 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2920 ; SSE-NEXT: movdqa 48(%r9), %xmm1
2921 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2922 ; SSE-NEXT: movdqa 48(%rax), %xmm0
2923 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2924 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
2925 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,1,1]
2926 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
2927 ; SSE-NEXT: movdqa 48(%rdi), %xmm8
2928 ; SSE-NEXT: movdqa %xmm8, %xmm0
2929 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm12[2],xmm0[3],xmm12[3]
2930 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
2931 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2932 ; SSE-NEXT: movdqa 64(%rsi), %xmm1
2933 ; SSE-NEXT: movaps 64(%rdx), %xmm2
2934 ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2935 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1]
2936 ; SSE-NEXT: movdqa %xmm1, %xmm12
2937 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2938 ; SSE-NEXT: movaps %xmm2, %xmm1
2939 ; SSE-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
2940 ; SSE-NEXT: movaps 64(%rcx), %xmm2
2941 ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2942 ; SSE-NEXT: movaps 64(%r8), %xmm0
2943 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2944 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm2[1,1]
2945 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,0]
2946 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2947 ; SSE-NEXT: movdqa 64(%r9), %xmm1
2948 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2949 ; SSE-NEXT: movdqa 64(%rax), %xmm0
2950 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2951 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
2952 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,1,1]
2953 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
2954 ; SSE-NEXT: movdqa 64(%rdi), %xmm0
2955 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2956 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm12[2],xmm0[3],xmm12[3]
2957 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
2958 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2959 ; SSE-NEXT: movdqa 80(%rsi), %xmm12
2960 ; SSE-NEXT: movdqa 80(%rdx), %xmm0
2961 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2962 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
2963 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm12[1,1,1,1]
2964 ; SSE-NEXT: movdqa %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2965 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
2966 ; SSE-NEXT: movaps 80(%rcx), %xmm2
2967 ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2968 ; SSE-NEXT: movaps 80(%r8), %xmm0
2969 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2970 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm2[1,1]
2971 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,0]
2972 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2973 ; SSE-NEXT: movdqa 80(%r9), %xmm1
2974 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2975 ; SSE-NEXT: movdqa 80(%rax), %xmm0
2976 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2977 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
2978 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,1,1]
2979 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
2980 ; SSE-NEXT: movdqa 80(%rdi), %xmm0
2981 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2982 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm12[2],xmm0[3],xmm12[3]
2983 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
2984 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2985 ; SSE-NEXT: movdqa 96(%rsi), %xmm4
2986 ; SSE-NEXT: movaps 96(%rdx), %xmm12
2987 ; SSE-NEXT: movaps %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2988 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm4[1,1,1,1]
2989 ; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2990 ; SSE-NEXT: movaps %xmm12, %xmm1
2991 ; SSE-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
2992 ; SSE-NEXT: movaps 96(%rcx), %xmm3
2993 ; SSE-NEXT: movaps 96(%r8), %xmm15
2994 ; SSE-NEXT: movaps %xmm15, %xmm0
2995 ; SSE-NEXT: movaps %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2996 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm3[1,1]
2997 ; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2998 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,0]
2999 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3000 ; SSE-NEXT: movdqa 96(%r9), %xmm2
3001 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3002 ; SSE-NEXT: movdqa 96(%rax), %xmm12
3003 ; SSE-NEXT: movdqa %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3004 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm12[1,1,1,1]
3005 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,1,1]
3006 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
3007 ; SSE-NEXT: movdqa 96(%rdi), %xmm0
3008 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3009 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm4[2],xmm0[3],xmm4[3]
3010 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
3011 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3012 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm15[3,3,3,3]
3013 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm3[3,3,3,3]
3014 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
3015 ; SSE-NEXT: movdqa %xmm12, %xmm0
3016 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3],xmm2[3,3]
3017 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,0]
3018 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3019 ; SSE-NEXT: movdqa 112(%rsi), %xmm1
3020 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3021 ; SSE-NEXT: movdqa 112(%rdx), %xmm0
3022 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3023 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
3024 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,1,1]
3025 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
3026 ; SSE-NEXT: movaps 112(%rcx), %xmm3
3027 ; SSE-NEXT: movaps 112(%r8), %xmm0
3028 ; SSE-NEXT: movaps %xmm0, %xmm1
3029 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm3[1,1]
3030 ; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3031 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,0]
3032 ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3033 ; SSE-NEXT: movaps %xmm0, %xmm1
3034 ; SSE-NEXT: movaps 112(%r9), %xmm15
3035 ; SSE-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm15[0]
3036 ; SSE-NEXT: movaps %xmm1, (%rsp) # 16-byte Spill
3037 ; SSE-NEXT: movaps %xmm15, %xmm1
3038 ; SSE-NEXT: movaps %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3039 ; SSE-NEXT: unpckhpd {{.*#+}} xmm15 = xmm15[1],xmm0[1]
3040 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,3,3,3]
3041 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm3[3,3,3,3]
3042 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
3043 ; SSE-NEXT: movaps 112(%rax), %xmm12
3044 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,3],xmm12[3,3]
3045 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1],xmm1[0,2]
3046 ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3047 ; SSE-NEXT: movaps %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3048 ; SSE-NEXT: movaps %xmm9, %xmm0
3049 ; SSE-NEXT: movaps %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3050 ; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm10[0],xmm0[1],xmm10[1]
3051 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
3052 ; SSE-NEXT: movdqa %xmm2, %xmm1
3053 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm7[0],xmm1[1],xmm7[1]
3054 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
3055 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3056 ; SSE-NEXT: shufps {{.*#+}} xmm11 = xmm11[0,1],xmm2[1,3]
3057 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
3058 ; SSE-NEXT: movaps %xmm3, %xmm1
3059 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
3060 ; SSE-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
3061 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm11[0,2]
3062 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3063 ; SSE-NEXT: movaps %xmm3, %xmm0
3064 ; SSE-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
3065 ; SSE-NEXT: movaps %xmm9, %xmm1
3066 ; SSE-NEXT: unpckhps {{.*#+}} xmm1 = xmm1[2],xmm10[2],xmm1[3],xmm10[3]
3067 ; SSE-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
3068 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3069 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
3070 ; SSE-NEXT: movaps %xmm1, %xmm0
3071 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
3072 ; SSE-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm5[0]
3073 ; SSE-NEXT: movdqa %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3074 ; SSE-NEXT: movdqa %xmm14, %xmm2
3075 ; SSE-NEXT: punpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
3076 ; SSE-NEXT: # xmm2 = xmm2[0],mem[0],xmm2[1],mem[1]
3077 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1],xmm0[2,0]
3078 ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3079 ; SSE-NEXT: shufps $197, {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Folded Reload
3080 ; SSE-NEXT: # xmm14 = xmm14[1,1],mem[0,3]
3081 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
3082 ; SSE-NEXT: movaps %xmm2, %xmm3
3083 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
3084 ; SSE-NEXT: unpcklps {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
3085 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,1],xmm14[2,0]
3086 ; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3087 ; SSE-NEXT: movaps %xmm5, %xmm3
3088 ; SSE-NEXT: unpckhps {{.*#+}} xmm3 = xmm3[2],xmm1[2],xmm3[3],xmm1[3]
3089 ; SSE-NEXT: movaps %xmm2, %xmm0
3090 ; SSE-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],xmm4[2],xmm0[3],xmm4[3]
3091 ; SSE-NEXT: movlhps {{.*#+}} xmm3 = xmm3[0],xmm0[0]
3092 ; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3093 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
3094 ; SSE-NEXT: movdqa %xmm2, %xmm0
3095 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
3096 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
3097 ; SSE-NEXT: movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3098 ; SSE-NEXT: movdqa %xmm6, %xmm3
3099 ; SSE-NEXT: punpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
3100 ; SSE-NEXT: # xmm3 = xmm3[0],mem[0],xmm3[1],mem[1]
3101 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm0[0]
3102 ; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3103 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3104 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm6[1,3]
3105 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
3106 ; SSE-NEXT: movaps %xmm5, %xmm4
3107 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
3108 ; SSE-NEXT: unpcklps {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
3109 ; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,1],xmm0[0,2]
3110 ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3111 ; SSE-NEXT: movaps %xmm5, %xmm0
3112 ; SSE-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],xmm3[2],xmm0[3],xmm3[3]
3113 ; SSE-NEXT: movdqa %xmm2, %xmm3
3114 ; SSE-NEXT: punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm1[2],xmm3[3],xmm1[3]
3115 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm0[0]
3116 ; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3117 ; SSE-NEXT: movaps %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3118 ; SSE-NEXT: movaps %xmm13, %xmm0
3119 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
3120 ; SSE-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
3121 ; SSE-NEXT: movdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3122 ; SSE-NEXT: movdqa %xmm8, %xmm3
3123 ; SSE-NEXT: punpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
3124 ; SSE-NEXT: # xmm3 = xmm3[0],mem[0],xmm3[1],mem[1]
3125 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,1],xmm0[2,0]
3126 ; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3127 ; SSE-NEXT: shufps $197, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Folded Reload
3128 ; SSE-NEXT: # xmm8 = xmm8[1,1],mem[0,3]
3129 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
3130 ; SSE-NEXT: movaps %xmm5, %xmm4
3131 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
3132 ; SSE-NEXT: unpcklps {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1]
3133 ; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,1],xmm8[2,0]
3134 ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3135 ; SSE-NEXT: movaps %xmm1, %xmm4
3136 ; SSE-NEXT: unpckhps {{.*#+}} xmm4 = xmm4[2],xmm13[2],xmm4[3],xmm13[3]
3137 ; SSE-NEXT: movaps %xmm5, %xmm0
3138 ; SSE-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
3139 ; SSE-NEXT: movlhps {{.*#+}} xmm4 = xmm4[0],xmm0[0]
3140 ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3141 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
3142 ; SSE-NEXT: movaps %xmm3, %xmm0
3143 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
3144 ; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
3145 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
3146 ; SSE-NEXT: movaps %xmm1, %xmm4
3147 ; SSE-NEXT: unpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
3148 ; SSE-NEXT: # xmm4 = xmm4[0],mem[0],xmm4[1],mem[1]
3149 ; SSE-NEXT: movlhps {{.*#+}} xmm4 = xmm4[0],xmm0[0]
3150 ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3151 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3152 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[1,3]
3153 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
3154 ; SSE-NEXT: movaps %xmm1, %xmm5
3155 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
3156 ; SSE-NEXT: unpcklps {{.*#+}} xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1]
3157 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,1],xmm0[0,2]
3158 ; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3159 ; SSE-NEXT: movaps %xmm1, %xmm0
3160 ; SSE-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],xmm4[2],xmm0[3],xmm4[3]
3161 ; SSE-NEXT: movaps %xmm3, %xmm1
3162 ; SSE-NEXT: unpckhps {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
3163 ; SSE-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
3164 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3165 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
3166 ; SSE-NEXT: movaps %xmm3, %xmm0
3167 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
3168 ; SSE-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm10[0]
3169 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
3170 ; SSE-NEXT: movaps %xmm1, %xmm13
3171 ; SSE-NEXT: unpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Folded Reload
3172 ; SSE-NEXT: # xmm13 = xmm13[0],mem[0],xmm13[1],mem[1]
3173 ; SSE-NEXT: shufps {{.*#+}} xmm13 = xmm13[0,1],xmm0[2,0]
3174 ; SSE-NEXT: movaps %xmm1, %xmm0
3175 ; SSE-NEXT: shufps $197, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
3176 ; SSE-NEXT: # xmm0 = xmm0[1,1],mem[0,3]
3177 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
3178 ; SSE-NEXT: movaps %xmm1, %xmm11
3179 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
3180 ; SSE-NEXT: unpcklps {{.*#+}} xmm11 = xmm11[0],xmm4[0],xmm11[1],xmm4[1]
3181 ; SSE-NEXT: shufps {{.*#+}} xmm11 = xmm11[0,1],xmm0[2,0]
3182 ; SSE-NEXT: unpckhps {{.*#+}} xmm10 = xmm10[2],xmm3[2],xmm10[3],xmm3[3]
3183 ; SSE-NEXT: movaps %xmm1, %xmm0
3184 ; SSE-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],xmm4[2],xmm0[3],xmm4[3]
3185 ; SSE-NEXT: movlhps {{.*#+}} xmm10 = xmm10[0],xmm0[0]
3186 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
3187 ; SSE-NEXT: movaps %xmm7, %xmm0
3188 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
3189 ; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
3190 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
3191 ; SSE-NEXT: movaps %xmm14, %xmm9
3192 ; SSE-NEXT: unpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Folded Reload
3193 ; SSE-NEXT: # xmm9 = xmm9[0],mem[0],xmm9[1],mem[1]
3194 ; SSE-NEXT: movlhps {{.*#+}} xmm9 = xmm9[0],xmm0[0]
3195 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3196 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm14[1,3]
3197 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
3198 ; SSE-NEXT: movaps %xmm1, %xmm8
3199 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
3200 ; SSE-NEXT: unpcklps {{.*#+}} xmm8 = xmm8[0],xmm4[0],xmm8[1],xmm4[1]
3201 ; SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[0,1],xmm0[0,2]
3202 ; SSE-NEXT: unpckhps {{.*#+}} xmm1 = xmm1[2],xmm4[2],xmm1[3],xmm4[3]
3203 ; SSE-NEXT: unpckhps {{.*#+}} xmm7 = xmm7[2],xmm3[2],xmm7[3],xmm3[3]
3204 ; SSE-NEXT: movlhps {{.*#+}} xmm7 = xmm7[0],xmm1[0]
3205 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
3206 ; SSE-NEXT: movaps %xmm2, %xmm5
3207 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
3208 ; SSE-NEXT: unpckhps {{.*#+}} xmm5 = xmm5[2],xmm6[2],xmm5[3],xmm6[3]
3209 ; SSE-NEXT: movlhps {{.*#+}} xmm6 = xmm6[0],xmm2[0]
3210 ; SSE-NEXT: movaps 112(%rdi), %xmm3
3211 ; SSE-NEXT: movaps %xmm3, %xmm4
3212 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
3213 ; SSE-NEXT: unpcklps {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
3214 ; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,1],xmm6[2,0]
3215 ; SSE-NEXT: movaps %xmm3, %xmm0
3216 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm12[0,3]
3217 ; SSE-NEXT: movaps (%rsp), %xmm6 # 16-byte Reload
3218 ; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,2],xmm0[2,0]
3219 ; SSE-NEXT: movaps %xmm6, (%rsp) # 16-byte Spill
3220 ; SSE-NEXT: movaps %xmm3, %xmm0
3221 ; SSE-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
3222 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
3223 ; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[1,1],xmm12[1,1]
3224 ; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,2],xmm0[0,1]
3225 ; SSE-NEXT: movaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3226 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,1],xmm15[2,0]
3227 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,3],xmm1[3,3]
3228 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1],xmm12[2,3]
3229 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[2,0],xmm2[2,0]
3230 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3231 ; SSE-NEXT: shufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
3232 ; SSE-NEXT: # xmm0 = xmm0[3,3],mem[3,3]
3233 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
3234 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm0[2,0]
3235 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
3236 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm15[2,3,2,3]
3237 ; SSE-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
3238 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3239 ; SSE-NEXT: shufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Folded Reload
3240 ; SSE-NEXT: # xmm15 = xmm15[3,3],mem[3,3]
3241 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
3242 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm15[2,0]
3243 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
3244 ; SSE-NEXT: # xmm0 = mem[3,3,3,3]
3245 ; SSE-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
3246 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3247 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3248 ; SSE-NEXT: shufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
3249 ; SSE-NEXT: # xmm0 = xmm0[3,3],mem[3,3]
3250 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
3251 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,3],xmm0[2,0]
3252 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
3253 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
3254 ; SSE-NEXT: movss {{.*#+}} xmm2 = xmm0[0],xmm2[1,2,3]
3255 ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3256 ; SSE-NEXT: shufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
3257 ; SSE-NEXT: # xmm1 = xmm1[3,3],mem[3,3]
3258 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
3259 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,3],xmm1[2,0]
3260 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
3261 ; SSE-NEXT: # xmm0 = mem[3,3,3,3]
3262 ; SSE-NEXT: movss {{.*#+}} xmm2 = xmm0[0],xmm2[1,2,3]
3263 ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3264 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3265 ; SSE-NEXT: shufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
3266 ; SSE-NEXT: # xmm0 = xmm0[3,3],mem[3,3]
3267 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
3268 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,3],xmm0[2,0]
3269 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
3270 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
3271 ; SSE-NEXT: movss {{.*#+}} xmm2 = xmm0[0],xmm2[1,2,3]
3272 ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3273 ; SSE-NEXT: shufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
3274 ; SSE-NEXT: # xmm1 = xmm1[3,3],mem[3,3]
3275 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
3276 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,3],xmm1[2,0]
3277 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
3278 ; SSE-NEXT: # xmm0 = mem[3,3,3,3]
3279 ; SSE-NEXT: movss {{.*#+}} xmm2 = xmm0[0],xmm2[1,2,3]
3280 ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3281 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3282 ; SSE-NEXT: shufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
3283 ; SSE-NEXT: # xmm0 = xmm0[3,3],mem[3,3]
3284 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
3285 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,3],xmm0[2,0]
3286 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
3287 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
3288 ; SSE-NEXT: movss {{.*#+}} xmm2 = xmm0[0],xmm2[1,2,3]
3289 ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3290 ; SSE-NEXT: shufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
3291 ; SSE-NEXT: # xmm1 = xmm1[3,3],mem[3,3]
3292 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
3293 ; SSE-NEXT: shufps {{.*#+}} xmm15 = xmm15[0,3],xmm1[2,0]
3294 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
3295 ; SSE-NEXT: # xmm0 = mem[3,3,3,3]
3296 ; SSE-NEXT: movss {{.*#+}} xmm15 = xmm0[0],xmm15[1,2,3]
3297 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3298 ; SSE-NEXT: shufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
3299 ; SSE-NEXT: # xmm0 = xmm0[3,3],mem[3,3]
3300 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
3301 ; SSE-NEXT: shufps {{.*#+}} xmm12 = xmm12[0,3],xmm0[2,0]
3302 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
3303 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
3304 ; SSE-NEXT: movss {{.*#+}} xmm12 = xmm0[0],xmm12[1,2,3]
3305 ; SSE-NEXT: shufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
3306 ; SSE-NEXT: # xmm1 = xmm1[3,3],mem[3,3]
3307 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
3308 ; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,3],xmm1[2,0]
3309 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
3310 ; SSE-NEXT: # xmm0 = mem[3,3,3,3]
3311 ; SSE-NEXT: movss {{.*#+}} xmm6 = xmm0[0],xmm6[1,2,3]
3312 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3313 ; SSE-NEXT: shufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
3314 ; SSE-NEXT: # xmm0 = xmm0[3,3],mem[3,3]
3315 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
3316 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,3],xmm0[2,0]
3317 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
3318 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
3319 ; SSE-NEXT: movss {{.*#+}} xmm2 = xmm0[0],xmm2[1,2,3]
3320 ; SSE-NEXT: shufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
3321 ; SSE-NEXT: # xmm1 = xmm1[3,3],mem[3,3]
3322 ; SSE-NEXT: movaps %xmm1, %xmm0
3323 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
3324 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm0[2,0]
3325 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
3326 ; SSE-NEXT: # xmm0 = mem[3,3,3,3]
3327 ; SSE-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
3328 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3329 ; SSE-NEXT: shufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
3330 ; SSE-NEXT: # xmm0 = xmm0[3,3],mem[3,3]
3331 ; SSE-NEXT: shufps {{.*#+}} xmm14 = xmm14[0,3],xmm0[2,0]
3332 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
3333 ; SSE-NEXT: # xmm0 = mem[2,3,2,3]
3334 ; SSE-NEXT: movss {{.*#+}} xmm14 = xmm0[0],xmm14[1,2,3]
3335 ; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax
3336 ; SSE-NEXT: movaps %xmm3, 864(%rax)
3337 ; SSE-NEXT: movaps %xmm5, 848(%rax)
3338 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3339 ; SSE-NEXT: movaps %xmm0, 832(%rax)
3340 ; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
3341 ; SSE-NEXT: movaps %xmm0, 800(%rax)
3342 ; SSE-NEXT: movaps %xmm4, 784(%rax)
3343 ; SSE-NEXT: movaps %xmm7, 736(%rax)
3344 ; SSE-NEXT: movaps %xmm8, 688(%rax)
3345 ; SSE-NEXT: movaps %xmm9, 672(%rax)
3346 ; SSE-NEXT: movaps %xmm10, 624(%rax)
3347 ; SSE-NEXT: movaps %xmm11, 576(%rax)
3348 ; SSE-NEXT: movaps %xmm13, 560(%rax)
3349 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3350 ; SSE-NEXT: movaps %xmm0, 512(%rax)
3351 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3352 ; SSE-NEXT: movaps %xmm0, 464(%rax)
3353 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3354 ; SSE-NEXT: movaps %xmm0, 448(%rax)
3355 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3356 ; SSE-NEXT: movaps %xmm0, 400(%rax)
3357 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3358 ; SSE-NEXT: movaps %xmm0, 352(%rax)
3359 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3360 ; SSE-NEXT: movaps %xmm0, 336(%rax)
3361 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3362 ; SSE-NEXT: movaps %xmm0, 288(%rax)
3363 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3364 ; SSE-NEXT: movaps %xmm0, 240(%rax)
3365 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3366 ; SSE-NEXT: movaps %xmm0, 224(%rax)
3367 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3368 ; SSE-NEXT: movaps %xmm0, 176(%rax)
3369 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3370 ; SSE-NEXT: movaps %xmm0, 128(%rax)
3371 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3372 ; SSE-NEXT: movaps %xmm0, 112(%rax)
3373 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3374 ; SSE-NEXT: movaps %xmm0, 64(%rax)
3375 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3376 ; SSE-NEXT: movaps %xmm0, 16(%rax)
3377 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3378 ; SSE-NEXT: movaps %xmm0, (%rax)
3379 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3380 ; SSE-NEXT: movaps %xmm0, 880(%rax)
3381 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3382 ; SSE-NEXT: movaps %xmm0, 816(%rax)
3383 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3384 ; SSE-NEXT: movaps %xmm0, 768(%rax)
3385 ; SSE-NEXT: movaps %xmm14, 752(%rax)
3386 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3387 ; SSE-NEXT: movaps %xmm0, 720(%rax)
3388 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3389 ; SSE-NEXT: movaps %xmm0, 704(%rax)
3390 ; SSE-NEXT: movaps %xmm1, 656(%rax)
3391 ; SSE-NEXT: movaps %xmm2, 640(%rax)
3392 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3393 ; SSE-NEXT: movaps %xmm0, 608(%rax)
3394 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3395 ; SSE-NEXT: movaps %xmm0, 592(%rax)
3396 ; SSE-NEXT: movaps %xmm6, 544(%rax)
3397 ; SSE-NEXT: movaps %xmm12, 528(%rax)
3398 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3399 ; SSE-NEXT: movaps %xmm0, 496(%rax)
3400 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3401 ; SSE-NEXT: movaps %xmm0, 480(%rax)
3402 ; SSE-NEXT: movaps %xmm15, 432(%rax)
3403 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3404 ; SSE-NEXT: movaps %xmm0, 416(%rax)
3405 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3406 ; SSE-NEXT: movaps %xmm0, 384(%rax)
3407 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3408 ; SSE-NEXT: movaps %xmm0, 368(%rax)
3409 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3410 ; SSE-NEXT: movaps %xmm0, 320(%rax)
3411 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3412 ; SSE-NEXT: movaps %xmm0, 304(%rax)
3413 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3414 ; SSE-NEXT: movaps %xmm0, 272(%rax)
3415 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3416 ; SSE-NEXT: movaps %xmm0, 256(%rax)
3417 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3418 ; SSE-NEXT: movaps %xmm0, 208(%rax)
3419 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3420 ; SSE-NEXT: movaps %xmm0, 192(%rax)
3421 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3422 ; SSE-NEXT: movaps %xmm0, 160(%rax)
3423 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3424 ; SSE-NEXT: movaps %xmm0, 144(%rax)
3425 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3426 ; SSE-NEXT: movaps %xmm0, 96(%rax)
3427 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3428 ; SSE-NEXT: movaps %xmm0, 80(%rax)
3429 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3430 ; SSE-NEXT: movaps %xmm0, 48(%rax)
3431 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3432 ; SSE-NEXT: movaps %xmm0, 32(%rax)
3433 ; SSE-NEXT: addq $1256, %rsp # imm = 0x4E8
3436 ; AVX1-ONLY-LABEL: store_i32_stride7_vf32:
3437 ; AVX1-ONLY: # %bb.0:
3438 ; AVX1-ONLY-NEXT: subq $1656, %rsp # imm = 0x678
3439 ; AVX1-ONLY-NEXT: movq {{[0-9]+}}(%rsp), %rax
3440 ; AVX1-ONLY-NEXT: vmovaps 96(%rdi), %ymm2
3441 ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3442 ; AVX1-ONLY-NEXT: vmovaps 96(%rsi), %ymm4
3443 ; AVX1-ONLY-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3444 ; AVX1-ONLY-NEXT: vmovaps 96(%rdx), %ymm0
3445 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3446 ; AVX1-ONLY-NEXT: vmovaps 96(%rcx), %ymm1
3447 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3448 ; AVX1-ONLY-NEXT: vmovaps 96(%rax), %ymm3
3449 ; AVX1-ONLY-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3450 ; AVX1-ONLY-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
3451 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} ymm1 = ymm4[1],ymm2[1],ymm4[3],ymm2[3]
3452 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm2[0,1],ymm1[2,0],ymm2[4,5],ymm1[6,4]
3453 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
3454 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],mem[6,7]
3455 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = mem[2,3,2,3]
3456 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[1,1,2,2,5,5,6,6]
3457 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5,6],ymm1[7]
3458 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm3[2,3],ymm0[2,3]
3459 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4],ymm1[5],ymm0[6,7]
3460 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3461 ; AVX1-ONLY-NEXT: vmovaps (%rax), %xmm0
3462 ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3463 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
3464 ; AVX1-ONLY-NEXT: vmovaps (%r9), %xmm3
3465 ; AVX1-ONLY-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3466 ; AVX1-ONLY-NEXT: vmovaps (%r8), %xmm4
3467 ; AVX1-ONLY-NEXT: vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3468 ; AVX1-ONLY-NEXT: vmovaps (%rdi), %xmm2
3469 ; AVX1-ONLY-NEXT: vmovaps (%rsi), %xmm5
3470 ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm1 = xmm5[0],xmm2[0]
3471 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm1[2,0],xmm2[2,1]
3472 ; AVX1-ONLY-NEXT: vmovaps %xmm2, %xmm6
3473 ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3474 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm1
3475 ; AVX1-ONLY-NEXT: vmovaps (%rcx), %xmm7
3476 ; AVX1-ONLY-NEXT: vmovaps (%rdx), %xmm8
3477 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} xmm2 = xmm8[0],xmm7[0],xmm8[1],xmm7[1]
3478 ; AVX1-ONLY-NEXT: vmovaps %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3479 ; AVX1-ONLY-NEXT: vmovaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3480 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm2 = xmm2[0,1,0,1]
3481 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3],ymm1[4,5,6,7]
3482 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} xmm2 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
3483 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2
3484 ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm2 = ymm2[0],ymm0[0],ymm2[2],ymm0[2]
3485 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6],ymm1[7]
3486 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3487 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm3[1,1],xmm4[1,1]
3488 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm1
3489 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5],ymm1[6,7]
3490 ; AVX1-ONLY-NEXT: vmovaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3491 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm6[1],xmm5[1]
3492 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm5[1,1],xmm1[0,2]
3493 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm1
3494 ; AVX1-ONLY-NEXT: vinsertps {{.*#+}} xmm2 = zero,xmm8[1],xmm7[1],zero
3495 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2],ymm1[3,4,5,6,7]
3496 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
3497 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3498 ; AVX1-ONLY-NEXT: vmovaps (%rdi), %ymm1
3499 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3500 ; AVX1-ONLY-NEXT: vmovaps (%rsi), %ymm0
3501 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3502 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[1,1],ymm1[1,1],ymm0[5,5],ymm1[5,5]
3503 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
3504 ; AVX1-ONLY-NEXT: vmovaps (%rdx), %ymm2
3505 ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3506 ; AVX1-ONLY-NEXT: vmovaps (%rcx), %ymm1
3507 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3508 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm2[1,1],ymm1[1,1],ymm2[5,5],ymm1[5,5]
3509 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6],ymm0[7]
3510 ; AVX1-ONLY-NEXT: vmovaps (%r8), %ymm2
3511 ; AVX1-ONLY-NEXT: vmovups %ymm2, (%rsp) # 32-byte Spill
3512 ; AVX1-ONLY-NEXT: vmovaps (%r9), %ymm1
3513 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3514 ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[2],ymm2[2]
3515 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[2,0],ymm2[2,1],ymm1[6,4],ymm2[6,5]
3516 ; AVX1-ONLY-NEXT: vmovaps (%rax), %ymm2
3517 ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3518 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm2[2,3],ymm1[2,3]
3519 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm1, %xmm1
3520 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[2],ymm2[3]
3521 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6],ymm1[7]
3522 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3523 ; AVX1-ONLY-NEXT: vmovaps 32(%rdi), %xmm1
3524 ; AVX1-ONLY-NEXT: vmovaps 32(%rsi), %xmm5
3525 ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm0 = xmm5[0],xmm1[0]
3526 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,1]
3527 ; AVX1-ONLY-NEXT: vmovaps %xmm1, %xmm7
3528 ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3529 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
3530 ; AVX1-ONLY-NEXT: vmovaps 32(%rcx), %xmm2
3531 ; AVX1-ONLY-NEXT: vmovaps 32(%rdx), %xmm3
3532 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} xmm1 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
3533 ; AVX1-ONLY-NEXT: vmovaps %xmm3, %xmm6
3534 ; AVX1-ONLY-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3535 ; AVX1-ONLY-NEXT: vmovaps %xmm2, %xmm8
3536 ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3537 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,1,0,1]
3538 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
3539 ; AVX1-ONLY-NEXT: vmovaps 32(%r9), %xmm3
3540 ; AVX1-ONLY-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3541 ; AVX1-ONLY-NEXT: vmovaps 32(%r8), %xmm4
3542 ; AVX1-ONLY-NEXT: vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3543 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} xmm1 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
3544 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
3545 ; AVX1-ONLY-NEXT: vmovaps 32(%rax), %xmm2
3546 ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3547 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2
3548 ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[2],ymm2[2]
3549 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6],ymm0[7]
3550 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3551 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm3[1,1],xmm4[1,1]
3552 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
3553 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm2[5],ymm0[6,7]
3554 ; AVX1-ONLY-NEXT: vmovaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3555 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm7[1],xmm5[1]
3556 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm5[1,1],xmm1[0,2]
3557 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm1
3558 ; AVX1-ONLY-NEXT: vinsertps {{.*#+}} xmm2 = zero,xmm6[1],xmm8[1],zero
3559 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2],ymm1[3,4,5,6,7]
3560 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
3561 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3562 ; AVX1-ONLY-NEXT: vmovaps 32(%rdi), %ymm0
3563 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3564 ; AVX1-ONLY-NEXT: vmovaps 32(%rsi), %ymm1
3565 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3566 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm1[1,1],ymm0[1,1],ymm1[5,5],ymm0[5,5]
3567 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
3568 ; AVX1-ONLY-NEXT: vmovaps 32(%rdx), %ymm11
3569 ; AVX1-ONLY-NEXT: vmovaps 32(%rcx), %ymm1
3570 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3571 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm11[1,1],ymm1[1,1],ymm11[5,5],ymm1[5,5]
3572 ; AVX1-ONLY-NEXT: vmovups %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3573 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6],ymm0[7]
3574 ; AVX1-ONLY-NEXT: vmovaps 32(%r8), %ymm2
3575 ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3576 ; AVX1-ONLY-NEXT: vmovaps 32(%r9), %ymm1
3577 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3578 ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[2],ymm2[2]
3579 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[2,0],ymm2[2,1],ymm1[6,4],ymm2[6,5]
3580 ; AVX1-ONLY-NEXT: vmovaps 32(%rax), %ymm2
3581 ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3582 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm2[2,3],ymm1[2,3]
3583 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm1, %xmm1
3584 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[2],ymm2[3]
3585 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6],ymm1[7]
3586 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3587 ; AVX1-ONLY-NEXT: vmovaps 64(%rdi), %xmm1
3588 ; AVX1-ONLY-NEXT: vmovaps 64(%rsi), %xmm6
3589 ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm0 = xmm6[0],xmm1[0]
3590 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,1]
3591 ; AVX1-ONLY-NEXT: vmovaps %xmm1, %xmm7
3592 ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3593 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
3594 ; AVX1-ONLY-NEXT: vmovaps 64(%rcx), %xmm3
3595 ; AVX1-ONLY-NEXT: vmovaps 64(%rdx), %xmm5
3596 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} xmm1 = xmm5[0],xmm3[0],xmm5[1],xmm3[1]
3597 ; AVX1-ONLY-NEXT: vmovaps %xmm3, %xmm8
3598 ; AVX1-ONLY-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3599 ; AVX1-ONLY-NEXT: vmovaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3600 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,1,0,1]
3601 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
3602 ; AVX1-ONLY-NEXT: vmovaps 64(%r9), %xmm3
3603 ; AVX1-ONLY-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3604 ; AVX1-ONLY-NEXT: vmovaps 64(%r8), %xmm4
3605 ; AVX1-ONLY-NEXT: vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3606 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} xmm1 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
3607 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
3608 ; AVX1-ONLY-NEXT: vmovaps 64(%rax), %xmm2
3609 ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3610 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2
3611 ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[2],ymm2[2]
3612 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6],ymm0[7]
3613 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3614 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm3[1,1],xmm4[1,1]
3615 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
3616 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm2[5],ymm0[6,7]
3617 ; AVX1-ONLY-NEXT: vmovaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3618 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm7[1],xmm6[1]
3619 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm6[1,1],xmm1[0,2]
3620 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm1
3621 ; AVX1-ONLY-NEXT: vinsertps {{.*#+}} xmm2 = zero,xmm5[1],xmm8[1],zero
3622 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2],ymm1[3,4,5,6,7]
3623 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
3624 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3625 ; AVX1-ONLY-NEXT: vmovaps 64(%rdi), %ymm13
3626 ; AVX1-ONLY-NEXT: vmovaps 64(%rsi), %ymm14
3627 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm14[1,1],ymm13[1,1],ymm14[5,5],ymm13[5,5]
3628 ; AVX1-ONLY-NEXT: vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3629 ; AVX1-ONLY-NEXT: vmovups %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3630 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
3631 ; AVX1-ONLY-NEXT: vmovaps 64(%rdx), %ymm9
3632 ; AVX1-ONLY-NEXT: vmovaps 64(%rcx), %ymm8
3633 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm9[1,1],ymm8[1,1],ymm9[5,5],ymm8[5,5]
3634 ; AVX1-ONLY-NEXT: vmovups %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3635 ; AVX1-ONLY-NEXT: vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3636 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6],ymm0[7]
3637 ; AVX1-ONLY-NEXT: vmovaps 64(%r8), %ymm10
3638 ; AVX1-ONLY-NEXT: vmovaps 64(%r9), %ymm12
3639 ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm12[0],ymm10[0],ymm12[2],ymm10[2]
3640 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[2,0],ymm10[2,1],ymm1[6,4],ymm10[6,5]
3641 ; AVX1-ONLY-NEXT: vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3642 ; AVX1-ONLY-NEXT: vmovaps 64(%rax), %ymm2
3643 ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3644 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm2[2,3],ymm1[2,3]
3645 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm1, %xmm1
3646 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[2],ymm2[3]
3647 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6],ymm1[7]
3648 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3649 ; AVX1-ONLY-NEXT: vmovaps 96(%rdi), %xmm7
3650 ; AVX1-ONLY-NEXT: vmovaps 96(%rsi), %xmm2
3651 ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm0 = xmm2[0],xmm7[0]
3652 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,0],xmm7[2,1]
3653 ; AVX1-ONLY-NEXT: vmovaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3654 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
3655 ; AVX1-ONLY-NEXT: vmovaps 96(%rcx), %xmm4
3656 ; AVX1-ONLY-NEXT: vmovaps 96(%rdx), %xmm3
3657 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} xmm15 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
3658 ; AVX1-ONLY-NEXT: vmovaps %xmm4, %xmm6
3659 ; AVX1-ONLY-NEXT: vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3660 ; AVX1-ONLY-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3661 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm15 = xmm15[0,1,0,1]
3662 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm0[0,1],ymm15[2,3],ymm0[4,5,6,7]
3663 ; AVX1-ONLY-NEXT: vmovaps 96(%r9), %xmm5
3664 ; AVX1-ONLY-NEXT: vmovaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3665 ; AVX1-ONLY-NEXT: vmovaps 96(%r8), %xmm4
3666 ; AVX1-ONLY-NEXT: vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3667 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} xmm15 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
3668 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm15, %ymm0, %ymm15
3669 ; AVX1-ONLY-NEXT: vinsertf128 $1, 96(%rax), %ymm0, %ymm0
3670 ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm15 = ymm15[0],ymm0[0],ymm15[2],ymm0[2]
3671 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm15[4,5,6],ymm1[7]
3672 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3673 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm5[1,1],xmm4[1,1]
3674 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm1
3675 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5],ymm1[6,7]
3676 ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3677 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm7[1],xmm2[1]
3678 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm2[1,1],xmm1[0,2]
3679 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm1
3680 ; AVX1-ONLY-NEXT: vinsertps {{.*#+}} xmm15 = zero,xmm3[1],xmm6[1],zero
3681 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm15[1,2],ymm1[3,4,5,6,7]
3682 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
3683 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3684 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3685 ; AVX1-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
3686 ; AVX1-ONLY-NEXT: # ymm0 = ymm0[1],mem[1],ymm0[3],mem[3]
3687 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,1,2,0,4,5,6,4]
3688 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
3689 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
3690 ; AVX1-ONLY-NEXT: vunpckhps {{.*#+}} ymm1 = ymm4[2],ymm3[2],ymm4[3],ymm3[3],ymm4[6],ymm3[6],ymm4[7],ymm3[7]
3691 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
3692 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
3693 ; AVX1-ONLY-NEXT: vmovups (%rsp), %ymm1 # 32-byte Reload
3694 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} ymm1 = ymm1[1],ymm2[1],ymm1[3],ymm2[3]
3695 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm2[1,1],ymm1[0,2],ymm2[5,5],ymm1[4,6]
3696 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[2,3,2,3]
3697 ; AVX1-ONLY-NEXT: vmovaps 16(%rax), %xmm15
3698 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm15[1],ymm1[2,3,4,5,6,7]
3699 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
3700 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3701 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3702 ; AVX1-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
3703 ; AVX1-ONLY-NEXT: # ymm0 = ymm0[1],mem[1],ymm0[3],mem[3]
3704 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,1,2,0,4,5,6,4]
3705 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
3706 ; AVX1-ONLY-NEXT: vunpckhps {{.*#+}} ymm1 = ymm11[2],ymm5[2],ymm11[3],ymm5[3],ymm11[6],ymm5[6],ymm11[7],ymm5[7]
3707 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
3708 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
3709 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
3710 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} ymm1 = ymm1[1],ymm11[1],ymm1[3],ymm11[3]
3711 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm11[1,1],ymm1[0,2],ymm11[5,5],ymm1[4,6]
3712 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[2,3,2,3]
3713 ; AVX1-ONLY-NEXT: vmovaps 48(%rax), %xmm15
3714 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm15[1],ymm1[2,3,4,5,6,7]
3715 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
3716 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3717 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm14[1],ymm13[1],ymm14[3],ymm13[3]
3718 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,1,2,0,4,5,6,4]
3719 ; AVX1-ONLY-NEXT: vunpckhps {{.*#+}} ymm1 = ymm9[2],ymm8[2],ymm9[3],ymm8[3],ymm9[6],ymm8[6],ymm9[7],ymm8[7]
3720 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
3721 ; AVX1-ONLY-NEXT: vmovups %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3722 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} ymm1 = ymm10[1],ymm12[1],ymm10[3],ymm12[3]
3723 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm12[1,1],ymm1[0,2],ymm12[5,5],ymm1[4,6]
3724 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[2,3,2,3]
3725 ; AVX1-ONLY-NEXT: vmovaps 80(%rax), %xmm15
3726 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm15[1],ymm1[2,3,4,5,6,7]
3727 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
3728 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3729 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
3730 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
3731 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm14[3,3],ymm7[3,3],ymm14[7,7],ymm7[7,7]
3732 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
3733 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
3734 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
3735 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm6[3,3],ymm2[3,3],ymm6[7,7],ymm2[7,7]
3736 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm1, %xmm1
3737 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
3738 ; AVX1-ONLY-NEXT: vbroadcastss 124(%r8), %ymm1
3739 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
3740 ; AVX1-ONLY-NEXT: vbroadcastss 124(%r9), %ymm1
3741 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
3742 ; AVX1-ONLY-NEXT: vbroadcastsd 120(%rax), %ymm1
3743 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5,6],ymm1[7]
3744 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3745 ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm7[0],ymm14[0],ymm7[2],ymm14[2]
3746 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm14[3,1],ymm0[0,2],ymm14[7,5],ymm0[4,6]
3747 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} ymm1 = ymm6[0],ymm2[0],ymm6[1],ymm2[1],ymm6[4],ymm2[4],ymm6[5],ymm2[5]
3748 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
3749 ; AVX1-ONLY-NEXT: vbroadcastss 108(%r8), %ymm1
3750 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4,5,6,7]
3751 ; AVX1-ONLY-NEXT: vbroadcastss 108(%r9), %xmm1
3752 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
3753 ; AVX1-ONLY-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
3754 ; AVX1-ONLY-NEXT: # ymm0 = ymm0[0,1,2],mem[3],ymm0[4,5,6,7]
3755 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3756 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm7[1,1],ymm14[1,1],ymm7[5,5],ymm14[5,5]
3757 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm2[1,1],ymm6[1,1],ymm2[5,5],ymm6[5,5]
3758 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[2,3,2,3]
3759 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5,6],ymm1[7]
3760 ; AVX1-ONLY-NEXT: vbroadcastsd 112(%r8), %ymm1
3761 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5,6],ymm1[7]
3762 ; AVX1-ONLY-NEXT: vbroadcastss 112(%r9), %xmm1
3763 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4,5,6,7]
3764 ; AVX1-ONLY-NEXT: vbroadcastss 112(%rax), %ymm1
3765 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4,5,6,7]
3766 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3767 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3768 ; AVX1-ONLY-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
3769 ; AVX1-ONLY-NEXT: # xmm0 = xmm0[3,3],mem[3,3]
3770 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
3771 ; AVX1-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6, %xmm1 # 16-byte Folded Reload
3772 ; AVX1-ONLY-NEXT: # xmm1 = xmm6[2],mem[2],xmm6[3],mem[3]
3773 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm6, %ymm1, %ymm1
3774 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
3775 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5,6],ymm1[7]
3776 ; AVX1-ONLY-NEXT: vpermilps $170, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
3777 ; AVX1-ONLY-NEXT: # xmm1 = mem[2,2,2,2]
3778 ; AVX1-ONLY-NEXT: vblendps $7, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
3779 ; AVX1-ONLY-NEXT: # xmm1 = mem[0,1,2],xmm1[3]
3780 ; AVX1-ONLY-NEXT: vbroadcastsd 8(%rax), %ymm15
3781 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm15[4,5,6,7]
3782 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4],ymm0[5,6,7]
3783 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3784 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm3[3,3],ymm4[3,3],ymm3[7,7],ymm4[7,7]
3785 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
3786 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
3787 ; AVX1-ONLY-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
3788 ; AVX1-ONLY-NEXT: # ymm1 = ymm1[3,3],mem[3,3],ymm1[7,7],mem[7,7]
3789 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm1, %xmm1
3790 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
3791 ; AVX1-ONLY-NEXT: vmovups (%rsp), %ymm1 # 32-byte Reload
3792 ; AVX1-ONLY-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
3793 ; AVX1-ONLY-NEXT: # ymm1 = ymm1[3,3],mem[3,3],ymm1[7,7],mem[7,7]
3794 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
3795 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm2[2,3],ymm1[1,2],ymm2[6,7],ymm1[5,6]
3796 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[2,3,2,3]
3797 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,2,3,1,4,6,7,5]
3798 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4],ymm1[5,6,7]
3799 ; AVX1-ONLY-NEXT: vmovups %ymm0, (%rsp) # 32-byte Spill
3800 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3801 ; AVX1-ONLY-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm1 # 16-byte Folded Reload
3802 ; AVX1-ONLY-NEXT: # xmm1 = xmm0[3,3],mem[3,3]
3803 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3804 ; AVX1-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm15 # 16-byte Folded Reload
3805 ; AVX1-ONLY-NEXT: # xmm15 = xmm0[2],mem[2],xmm0[3],mem[3]
3806 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm15, %ymm13
3807 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
3808 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm13[0,1,2,3,4],ymm1[5,6],ymm13[7]
3809 ; AVX1-ONLY-NEXT: vpermilps $170, {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Folded Reload
3810 ; AVX1-ONLY-NEXT: # xmm13 = mem[2,2,2,2]
3811 ; AVX1-ONLY-NEXT: vblendps $7, {{[-0-9]+}}(%r{{[sb]}}p), %xmm13, %xmm13 # 16-byte Folded Reload
3812 ; AVX1-ONLY-NEXT: # xmm13 = mem[0,1,2],xmm13[3]
3813 ; AVX1-ONLY-NEXT: vbroadcastsd 40(%rax), %ymm15
3814 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm13 = ymm13[0,1,2,3],ymm15[4,5,6,7]
3815 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm13[2,3,4],ymm1[5,6,7]
3816 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3817 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
3818 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm13 = ymm5[3,3],ymm14[3,3],ymm5[7,7],ymm14[7,7]
3819 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm13 = ymm13[2,3,2,3]
3820 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
3821 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
3822 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm15 = ymm12[3,3],ymm7[3,3],ymm12[7,7],ymm7[7,7]
3823 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm15, %xmm15
3824 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm13 = ymm15[0,1,2],ymm13[3,4,5,6,7]
3825 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3826 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm11 = ymm0[3,3],ymm11[3,3],ymm0[7,7],ymm11[7,7]
3827 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3828 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm11 = ymm0[2,3],ymm11[1,2],ymm0[6,7],ymm11[5,6]
3829 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm11 = ymm11[2,3,2,3]
3830 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm11 = ymm11[0,2,3,1,4,6,7,5]
3831 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm11[0],ymm13[1,2,3,4],ymm11[5,6,7]
3832 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3833 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3834 ; AVX1-ONLY-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm9 # 16-byte Folded Reload
3835 ; AVX1-ONLY-NEXT: # xmm9 = xmm0[3,3],mem[3,3]
3836 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3837 ; AVX1-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm10 # 16-byte Folded Reload
3838 ; AVX1-ONLY-NEXT: # xmm10 = xmm0[2],mem[2],xmm0[3],mem[3]
3839 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm10, %ymm8
3840 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm9, %ymm0, %ymm9
3841 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1,2,3,4],ymm9[5,6],ymm8[7]
3842 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
3843 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm9 = xmm13[2,2,2,2]
3844 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
3845 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm9 = xmm15[0,1,2],xmm9[3]
3846 ; AVX1-ONLY-NEXT: vbroadcastsd 72(%rax), %ymm10
3847 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm9 = ymm9[0,1,2,3],ymm10[4,5,6,7]
3848 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm11 = ymm8[0,1],ymm9[2,3,4],ymm8[5,6,7]
3849 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
3850 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
3851 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm9 = ymm6[3,3],ymm8[3,3],ymm6[7,7],ymm8[7,7]
3852 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm9 = ymm9[2,3,2,3]
3853 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
3854 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3855 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm10 = ymm1[3,3],ymm0[3,3],ymm1[7,7],ymm0[7,7]
3856 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm10, %xmm10
3857 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm9 = ymm10[0,1,2],ymm9[3,4,5,6,7]
3858 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
3859 ; AVX1-ONLY-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm4 # 32-byte Folded Reload
3860 ; AVX1-ONLY-NEXT: # ymm4 = ymm2[3,3],mem[3,3],ymm2[7,7],mem[7,7]
3861 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
3862 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm4 = ymm2[2,3],ymm4[1,2],ymm2[6,7],ymm4[5,6]
3863 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm4[2,3,2,3]
3864 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm4 = ymm4[0,2,3,1,4,6,7,5]
3865 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0],ymm9[1,2,3,4],ymm4[5,6,7]
3866 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
3867 ; AVX1-ONLY-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm3 # 16-byte Folded Reload
3868 ; AVX1-ONLY-NEXT: # xmm3 = xmm2[3,3],mem[3,3]
3869 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
3870 ; AVX1-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm5 # 16-byte Folded Reload
3871 ; AVX1-ONLY-NEXT: # xmm5 = xmm2[2],mem[2],xmm2[3],mem[3]
3872 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm5, %ymm2
3873 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm3
3874 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm3[5,6],ymm2[7]
3875 ; AVX1-ONLY-NEXT: vpermilps $170, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
3876 ; AVX1-ONLY-NEXT: # xmm3 = mem[2,2,2,2]
3877 ; AVX1-ONLY-NEXT: vblendps $7, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm3 # 16-byte Folded Reload
3878 ; AVX1-ONLY-NEXT: # xmm3 = mem[0,1,2],xmm3[3]
3879 ; AVX1-ONLY-NEXT: vbroadcastsd 104(%rax), %ymm5
3880 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm5[4,5,6,7]
3881 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm3[2,3,4],ymm2[5,6,7]
3882 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
3883 ; AVX1-ONLY-NEXT: vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
3884 ; AVX1-ONLY-NEXT: # ymm3 = ymm3[0],mem[0],ymm3[1],mem[1],ymm3[4],mem[4],ymm3[5],mem[5]
3885 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
3886 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
3887 ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm5 = ymm5[0],ymm9[0],ymm5[2],ymm9[2]
3888 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm5 = ymm9[3,1],ymm5[0,2],ymm9[7,5],ymm5[4,6]
3889 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm3 = ymm5[0,1,2,3],ymm3[4,5],ymm5[6,7]
3890 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
3891 ; AVX1-ONLY-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5, %xmm5 # 16-byte Folded Reload
3892 ; AVX1-ONLY-NEXT: # xmm5 = xmm5[3,3],mem[3,3]
3893 ; AVX1-ONLY-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5, %xmm5 # 16-byte Folded Reload
3894 ; AVX1-ONLY-NEXT: # xmm5 = xmm5[0,1,2],mem[3]
3895 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0],ymm5[1,2,3],ymm3[4,5,6,7]
3896 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} ymm5 = ymm12[0],ymm7[0],ymm12[1],ymm7[1],ymm12[4],ymm7[4],ymm12[5],ymm7[5]
3897 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
3898 ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm7 = ymm14[0],ymm9[0],ymm14[2],ymm9[2]
3899 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm7 = ymm9[3,1],ymm7[0,2],ymm9[7,5],ymm7[4,6]
3900 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm5 = ymm7[0,1,2,3],ymm5[4,5],ymm7[6,7]
3901 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
3902 ; AVX1-ONLY-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7, %xmm7 # 16-byte Folded Reload
3903 ; AVX1-ONLY-NEXT: # xmm7 = xmm7[3,3],mem[3,3]
3904 ; AVX1-ONLY-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7, %xmm7 # 16-byte Folded Reload
3905 ; AVX1-ONLY-NEXT: # xmm7 = xmm7[0,1,2],mem[3]
3906 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0],ymm7[1,2,3],ymm5[4,5,6,7]
3907 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} ymm7 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[4],ymm0[4],ymm1[5],ymm0[5]
3908 ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm9 = ymm8[0],ymm6[0],ymm8[2],ymm6[2]
3909 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm6 = ymm6[3,1],ymm9[0,2],ymm6[7,5],ymm9[4,6]
3910 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm7[4,5],ymm6[6,7]
3911 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm7 = xmm15[3,3],xmm13[3,3]
3912 ; AVX1-ONLY-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7, %xmm7 # 16-byte Folded Reload
3913 ; AVX1-ONLY-NEXT: # xmm7 = xmm7[0,1,2],mem[3]
3914 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0],ymm7[1,2,3],ymm6[4,5,6,7]
3915 ; AVX1-ONLY-NEXT: movq {{[0-9]+}}(%rsp), %rax
3916 ; AVX1-ONLY-NEXT: vmovaps %ymm6, 544(%rax)
3917 ; AVX1-ONLY-NEXT: vmovaps %ymm5, 320(%rax)
3918 ; AVX1-ONLY-NEXT: vmovaps %ymm3, 96(%rax)
3919 ; AVX1-ONLY-NEXT: vmovaps %ymm2, 736(%rax)
3920 ; AVX1-ONLY-NEXT: vmovaps %ymm4, 640(%rax)
3921 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3922 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 608(%rax)
3923 ; AVX1-ONLY-NEXT: vmovaps %ymm11, 512(%rax)
3924 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3925 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 416(%rax)
3926 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3927 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 384(%rax)
3928 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3929 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 288(%rax)
3930 ; AVX1-ONLY-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
3931 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 192(%rax)
3932 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3933 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 160(%rax)
3934 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3935 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 64(%rax)
3936 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3937 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 800(%rax)
3938 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3939 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 768(%rax)
3940 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3941 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 704(%rax)
3942 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3943 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 672(%rax)
3944 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3945 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 576(%rax)
3946 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3947 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 480(%rax)
3948 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3949 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 448(%rax)
3950 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3951 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 352(%rax)
3952 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3953 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 256(%rax)
3954 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3955 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 224(%rax)
3956 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3957 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 128(%rax)
3958 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3959 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 32(%rax)
3960 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3961 ; AVX1-ONLY-NEXT: vmovaps %ymm0, (%rax)
3962 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3963 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 864(%rax)
3964 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3965 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 832(%rax)
3966 ; AVX1-ONLY-NEXT: addq $1656, %rsp # imm = 0x678
3967 ; AVX1-ONLY-NEXT: vzeroupper
3968 ; AVX1-ONLY-NEXT: retq
3970 ; AVX2-SLOW-LABEL: store_i32_stride7_vf32:
3971 ; AVX2-SLOW: # %bb.0:
3972 ; AVX2-SLOW-NEXT: subq $1320, %rsp # imm = 0x528
3973 ; AVX2-SLOW-NEXT: movq {{[0-9]+}}(%rsp), %rax
3974 ; AVX2-SLOW-NEXT: vmovaps (%rax), %xmm0
3975 ; AVX2-SLOW-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3976 ; AVX2-SLOW-NEXT: vmovaps 32(%rax), %xmm3
3977 ; AVX2-SLOW-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3978 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
3979 ; AVX2-SLOW-NEXT: vmovaps (%r8), %xmm14
3980 ; AVX2-SLOW-NEXT: vmovaps 32(%r8), %xmm6
3981 ; AVX2-SLOW-NEXT: vmovaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3982 ; AVX2-SLOW-NEXT: vmovaps (%r9), %xmm8
3983 ; AVX2-SLOW-NEXT: vmovaps 32(%r9), %xmm9
3984 ; AVX2-SLOW-NEXT: vmovaps %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3985 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm8[1,1,1,1]
3986 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0],xmm14[1],xmm1[2,3]
3987 ; AVX2-SLOW-NEXT: vbroadcastsd %xmm1, %ymm1
3988 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5],ymm1[6,7]
3989 ; AVX2-SLOW-NEXT: vmovaps (%rcx), %xmm10
3990 ; AVX2-SLOW-NEXT: vmovaps 32(%rcx), %xmm11
3991 ; AVX2-SLOW-NEXT: vmovaps %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3992 ; AVX2-SLOW-NEXT: vmovaps (%rdx), %xmm7
3993 ; AVX2-SLOW-NEXT: vinsertps {{.*#+}} xmm1 = zero,xmm7[1],xmm10[1],zero
3994 ; AVX2-SLOW-NEXT: vmovaps (%rdi), %xmm5
3995 ; AVX2-SLOW-NEXT: vmovaps 32(%rdi), %xmm13
3996 ; AVX2-SLOW-NEXT: vmovaps %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3997 ; AVX2-SLOW-NEXT: vmovaps (%rsi), %xmm4
3998 ; AVX2-SLOW-NEXT: vmovaps 32(%rsi), %xmm12
3999 ; AVX2-SLOW-NEXT: vmovaps %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4000 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm2 = xmm4[1,1,2,2]
4001 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm2 = xmm2[0,1],xmm5[2],xmm2[3]
4002 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,1,2,1]
4003 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0],ymm1[1,2],ymm2[3,4,5,6,7]
4004 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
4005 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4006 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0
4007 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm9[1,1,1,1]
4008 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0],xmm6[1],xmm1[2,3]
4009 ; AVX2-SLOW-NEXT: vbroadcastsd %xmm1, %ymm1
4010 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5],ymm1[6,7]
4011 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm12[1,1,2,2]
4012 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm13[2],xmm1[3]
4013 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1]
4014 ; AVX2-SLOW-NEXT: vmovaps 32(%rdx), %xmm6
4015 ; AVX2-SLOW-NEXT: vinsertps {{.*#+}} xmm2 = zero,xmm6[1],xmm11[1],zero
4016 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2],ymm1[3,4,5,6,7]
4017 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
4018 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4019 ; AVX2-SLOW-NEXT: vmovaps 64(%r8), %xmm1
4020 ; AVX2-SLOW-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4021 ; AVX2-SLOW-NEXT: vmovaps 64(%r9), %xmm0
4022 ; AVX2-SLOW-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4023 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
4024 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
4025 ; AVX2-SLOW-NEXT: vbroadcastsd %xmm0, %ymm0
4026 ; AVX2-SLOW-NEXT: vmovaps 64(%rax), %xmm1
4027 ; AVX2-SLOW-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4028 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
4029 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
4030 ; AVX2-SLOW-NEXT: vmovaps 64(%rdi), %xmm2
4031 ; AVX2-SLOW-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4032 ; AVX2-SLOW-NEXT: vmovaps 64(%rsi), %xmm1
4033 ; AVX2-SLOW-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4034 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,1,2,2]
4035 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3]
4036 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1]
4037 ; AVX2-SLOW-NEXT: vmovaps 64(%rcx), %xmm13
4038 ; AVX2-SLOW-NEXT: vmovaps 64(%rdx), %xmm12
4039 ; AVX2-SLOW-NEXT: vinsertps {{.*#+}} xmm2 = zero,xmm12[1],xmm13[1],zero
4040 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2],ymm1[3,4,5,6,7]
4041 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
4042 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4043 ; AVX2-SLOW-NEXT: vmovaps 96(%r8), %xmm1
4044 ; AVX2-SLOW-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4045 ; AVX2-SLOW-NEXT: vmovaps 96(%r9), %xmm11
4046 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm11[1,1,1,1]
4047 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
4048 ; AVX2-SLOW-NEXT: vbroadcastsd %xmm0, %ymm0
4049 ; AVX2-SLOW-NEXT: vmovaps 96(%rax), %xmm1
4050 ; AVX2-SLOW-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4051 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
4052 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
4053 ; AVX2-SLOW-NEXT: vmovaps 96(%rdi), %xmm2
4054 ; AVX2-SLOW-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4055 ; AVX2-SLOW-NEXT: vmovaps 96(%rsi), %xmm1
4056 ; AVX2-SLOW-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4057 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,1,2,2]
4058 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3]
4059 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1]
4060 ; AVX2-SLOW-NEXT: vmovaps 96(%rcx), %xmm3
4061 ; AVX2-SLOW-NEXT: vmovaps %xmm3, (%rsp) # 16-byte Spill
4062 ; AVX2-SLOW-NEXT: vmovaps 96(%rdx), %xmm2
4063 ; AVX2-SLOW-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4064 ; AVX2-SLOW-NEXT: vinsertps {{.*#+}} xmm2 = zero,xmm2[1],xmm3[1],zero
4065 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2],ymm1[3,4,5,6,7]
4066 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
4067 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4068 ; AVX2-SLOW-NEXT: vmovaps (%rdi), %ymm0
4069 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4070 ; AVX2-SLOW-NEXT: vmovaps (%rsi), %ymm1
4071 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4072 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
4073 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
4074 ; AVX2-SLOW-NEXT: vmovaps (%rdx), %ymm2
4075 ; AVX2-SLOW-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4076 ; AVX2-SLOW-NEXT: vmovaps (%rcx), %ymm1
4077 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4078 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm1 = ymm2[2],ymm1[2],ymm2[3],ymm1[3],ymm2[6],ymm1[6],ymm2[7],ymm1[7]
4079 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
4080 ; AVX2-SLOW-NEXT: vmovaps (%r8), %ymm2
4081 ; AVX2-SLOW-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4082 ; AVX2-SLOW-NEXT: vmovaps (%r9), %ymm1
4083 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4084 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm1 = ymm1[1,1,2,2,5,5,6,6]
4085 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4,5],ymm2[6],ymm1[7]
4086 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
4087 ; AVX2-SLOW-NEXT: vmovaps 16(%rax), %xmm2
4088 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7]
4089 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
4090 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4091 ; AVX2-SLOW-NEXT: vmovaps 32(%rdi), %ymm1
4092 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4093 ; AVX2-SLOW-NEXT: vmovaps 32(%rsi), %ymm0
4094 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4095 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm0 = ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[6],ymm0[6],ymm1[7],ymm0[7]
4096 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
4097 ; AVX2-SLOW-NEXT: vmovaps 32(%rdx), %ymm1
4098 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4099 ; AVX2-SLOW-NEXT: vmovaps 32(%rcx), %ymm2
4100 ; AVX2-SLOW-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4101 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm1 = ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[6],ymm2[6],ymm1[7],ymm2[7]
4102 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
4103 ; AVX2-SLOW-NEXT: vmovaps 32(%r8), %ymm2
4104 ; AVX2-SLOW-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4105 ; AVX2-SLOW-NEXT: vmovaps 32(%r9), %ymm1
4106 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4107 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm1 = ymm1[1,1,2,2,5,5,6,6]
4108 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4,5],ymm2[6],ymm1[7]
4109 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
4110 ; AVX2-SLOW-NEXT: vmovaps 48(%rax), %xmm2
4111 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7]
4112 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
4113 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4114 ; AVX2-SLOW-NEXT: vmovaps 64(%rdi), %ymm1
4115 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4116 ; AVX2-SLOW-NEXT: vmovaps 64(%rsi), %ymm0
4117 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4118 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm0 = ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[6],ymm0[6],ymm1[7],ymm0[7]
4119 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
4120 ; AVX2-SLOW-NEXT: vmovaps 64(%rdx), %ymm1
4121 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4122 ; AVX2-SLOW-NEXT: vmovaps 64(%rcx), %ymm2
4123 ; AVX2-SLOW-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4124 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm1 = ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[6],ymm2[6],ymm1[7],ymm2[7]
4125 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
4126 ; AVX2-SLOW-NEXT: vmovaps 64(%r8), %ymm2
4127 ; AVX2-SLOW-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4128 ; AVX2-SLOW-NEXT: vmovaps 64(%r9), %ymm1
4129 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4130 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm1 = ymm1[1,1,2,2,5,5,6,6]
4131 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4,5],ymm2[6],ymm1[7]
4132 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
4133 ; AVX2-SLOW-NEXT: vmovaps 80(%rax), %xmm2
4134 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7]
4135 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
4136 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4137 ; AVX2-SLOW-NEXT: vmovaps 96(%rdi), %ymm3
4138 ; AVX2-SLOW-NEXT: vmovaps 96(%rsi), %ymm2
4139 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm0 = ymm2[1,1,1,1,5,5,5,5]
4140 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm3[1],ymm0[2,3,4],ymm3[5],ymm0[6,7]
4141 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
4142 ; AVX2-SLOW-NEXT: vmovaps 96(%rdx), %ymm9
4143 ; AVX2-SLOW-NEXT: vmovups %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4144 ; AVX2-SLOW-NEXT: vmovaps 96(%rcx), %ymm1
4145 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm15 = ymm9[1,1],ymm1[1,1],ymm9[5,5],ymm1[5,5]
4146 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm15[5,6],ymm0[7]
4147 ; AVX2-SLOW-NEXT: vbroadcastsd 112(%r8), %ymm15
4148 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm15[0],ymm0[1,2,3,4,5,6],ymm15[7]
4149 ; AVX2-SLOW-NEXT: vbroadcastss 112(%r9), %xmm15
4150 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm15[1],ymm0[2,3,4,5,6,7]
4151 ; AVX2-SLOW-NEXT: vbroadcastss 112(%rax), %ymm15
4152 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm15[2],ymm0[3,4,5,6,7]
4153 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4154 ; AVX2-SLOW-NEXT: vbroadcastss %xmm10, %xmm0
4155 ; AVX2-SLOW-NEXT: vbroadcastss %xmm7, %xmm15
4156 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} xmm0 = xmm15[0],xmm0[0],xmm15[1],xmm0[1]
4157 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} xmm15 = xmm5[0],xmm4[0],xmm5[1],xmm4[1]
4158 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm15 = xmm15[0,1,2,2]
4159 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm15 = ymm15[0,1,2,1]
4160 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm15[0,1],ymm0[2,3],ymm15[4,5,6,7]
4161 ; AVX2-SLOW-NEXT: vmovaps %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4162 ; AVX2-SLOW-NEXT: vmovaps %xmm14, %xmm9
4163 ; AVX2-SLOW-NEXT: vmovaps %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4164 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} xmm15 = xmm14[0],xmm8[0],xmm14[1],xmm8[1]
4165 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm15, %ymm0, %ymm15
4166 ; AVX2-SLOW-NEXT: vbroadcastsd {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 16-byte Folded Reload
4167 ; AVX2-SLOW-NEXT: vunpcklpd {{.*#+}} ymm14 = ymm15[0],ymm14[0],ymm15[2],ymm14[2]
4168 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm14[4,5,6],ymm0[7]
4169 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4170 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm5[3,3],xmm4[3,3]
4171 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} xmm4 = xmm7[2],xmm10[2],xmm7[3],xmm10[3]
4172 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
4173 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm4 = xmm4[0,1,2,2]
4174 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,1,2,1]
4175 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm4[0,1,2,3,4],ymm0[5,6],ymm4[7]
4176 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm4 = xmm8[2,2,2,2]
4177 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm4 = xmm9[0,1,2],xmm4[3]
4178 ; AVX2-SLOW-NEXT: vbroadcastsd 8(%rax), %ymm5
4179 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm5[4,5,6,7]
4180 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm4[2,3,4],ymm0[5,6,7]
4181 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4182 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
4183 ; AVX2-SLOW-NEXT: vbroadcastss %xmm8, %xmm0
4184 ; AVX2-SLOW-NEXT: vbroadcastss %xmm6, %xmm4
4185 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} xmm0 = xmm4[0],xmm0[0],xmm4[1],xmm0[1]
4186 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
4187 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
4188 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} xmm4 = xmm10[0],xmm14[0],xmm10[1],xmm14[1]
4189 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm4 = xmm4[0,1,2,2]
4190 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,1,2,1]
4191 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm4[0,1],ymm0[2,3],ymm4[4,5,6,7]
4192 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
4193 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
4194 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} xmm4 = xmm7[0],xmm9[0],xmm7[1],xmm9[1]
4195 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm4
4196 ; AVX2-SLOW-NEXT: vbroadcastsd {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 16-byte Folded Reload
4197 ; AVX2-SLOW-NEXT: vunpcklpd {{.*#+}} ymm4 = ymm4[0],ymm5[0],ymm4[2],ymm5[2]
4198 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm4[4,5,6],ymm0[7]
4199 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4200 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm10[3,3],xmm14[3,3]
4201 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} xmm4 = xmm6[2],xmm8[2],xmm6[3],xmm8[3]
4202 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
4203 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm4 = xmm4[0,1,2,2]
4204 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,1,2,1]
4205 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm4[0,1,2,3,4],ymm0[5,6],ymm4[7]
4206 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm4 = xmm9[2,2,2,2]
4207 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm4 = xmm7[0,1,2],xmm4[3]
4208 ; AVX2-SLOW-NEXT: vbroadcastsd 40(%rax), %ymm5
4209 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm5[4,5,6,7]
4210 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm4[2,3,4],ymm0[5,6,7]
4211 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4212 ; AVX2-SLOW-NEXT: vbroadcastss %xmm13, %xmm0
4213 ; AVX2-SLOW-NEXT: vbroadcastss %xmm12, %xmm4
4214 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} xmm0 = xmm4[0],xmm0[0],xmm4[1],xmm0[1]
4215 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
4216 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
4217 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} xmm4 = xmm9[0],xmm8[0],xmm9[1],xmm8[1]
4218 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm4 = xmm4[0,1,2,2]
4219 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,1,2,1]
4220 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm4[0,1],ymm0[2,3],ymm4[4,5,6,7]
4221 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
4222 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
4223 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} xmm4 = xmm7[0],xmm6[0],xmm7[1],xmm6[1]
4224 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm4
4225 ; AVX2-SLOW-NEXT: vbroadcastsd {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 16-byte Folded Reload
4226 ; AVX2-SLOW-NEXT: vunpcklpd {{.*#+}} ymm4 = ymm4[0],ymm5[0],ymm4[2],ymm5[2]
4227 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm4[4,5,6],ymm0[7]
4228 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4229 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm9[3,3],xmm8[3,3]
4230 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} xmm4 = xmm12[2],xmm13[2],xmm12[3],xmm13[3]
4231 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
4232 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm4 = xmm4[0,1,2,2]
4233 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,1,2,1]
4234 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm4[0,1,2,3,4],ymm0[5,6],ymm4[7]
4235 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm4 = xmm6[2,2,2,2]
4236 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm4 = xmm7[0,1,2],xmm4[3]
4237 ; AVX2-SLOW-NEXT: vbroadcastsd 72(%rax), %ymm5
4238 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm5[4,5,6,7]
4239 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm4[2,3,4],ymm0[5,6,7]
4240 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4241 ; AVX2-SLOW-NEXT: vmovaps (%rsp), %xmm9 # 16-byte Reload
4242 ; AVX2-SLOW-NEXT: vbroadcastss %xmm9, %xmm0
4243 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
4244 ; AVX2-SLOW-NEXT: vbroadcastss %xmm10, %xmm4
4245 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} xmm0 = xmm4[0],xmm0[0],xmm4[1],xmm0[1]
4246 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
4247 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
4248 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} xmm4 = xmm8[0],xmm7[0],xmm8[1],xmm7[1]
4249 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm4 = xmm4[0,1,2,2]
4250 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,1,2,1]
4251 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm4[0,1],ymm0[2,3],ymm4[4,5,6,7]
4252 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
4253 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} xmm4 = xmm6[0],xmm11[0],xmm6[1],xmm11[1]
4254 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm4
4255 ; AVX2-SLOW-NEXT: vbroadcastsd {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 16-byte Folded Reload
4256 ; AVX2-SLOW-NEXT: vunpcklpd {{.*#+}} ymm4 = ymm4[0],ymm5[0],ymm4[2],ymm5[2]
4257 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm4[4,5,6],ymm0[7]
4258 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4259 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm8[3,3],xmm7[3,3]
4260 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} xmm4 = xmm10[2],xmm9[2],xmm10[3],xmm9[3]
4261 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
4262 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm4 = xmm4[0,1,2,2]
4263 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,1,2,1]
4264 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm4[0,1,2,3,4],ymm0[5,6],ymm4[7]
4265 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm4 = xmm11[2,2,2,2]
4266 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm4 = xmm6[0,1,2],xmm4[3]
4267 ; AVX2-SLOW-NEXT: vbroadcastsd 104(%rax), %ymm5
4268 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm5[4,5,6,7]
4269 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm4[2,3,4],ymm0[5,6,7]
4270 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4271 ; AVX2-SLOW-NEXT: vbroadcastss 112(%rdx), %ymm0
4272 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm5 = ymm1[3,1,2,0,7,5,6,4]
4273 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm5[0,1,2,3,4,5],ymm0[6],ymm5[7]
4274 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} ymm5 = ymm3[0],ymm2[0],ymm3[1],ymm2[1],ymm3[4],ymm2[4],ymm3[5],ymm2[5]
4275 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm5[4,5],ymm0[6,7]
4276 ; AVX2-SLOW-NEXT: vbroadcastss 108(%r8), %ymm5
4277 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm5[1],ymm0[2,3,4,5,6,7]
4278 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm5 = xmm11[2,2,3,3]
4279 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm5[2,3],ymm0[4,5,6,7]
4280 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm5 = ymm3[2],ymm2[2],ymm3[3],ymm2[3],ymm3[6],ymm2[6],ymm3[7],ymm2[7]
4281 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[2,2,2,2]
4282 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
4283 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm6 = ymm4[2],ymm1[2],ymm4[3],ymm1[3],ymm4[6],ymm1[6],ymm4[7],ymm1[7]
4284 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm6[4,5,6,7]
4285 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3,4,5],mem[6,7]
4286 ; AVX2-SLOW-NEXT: vpermilps {{.*#+}} ymm7 = mem[1,2,2,3,5,6,6,7]
4287 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm7 = ymm7[2,2,2,2]
4288 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm5 = ymm7[0],ymm5[1,2,3,4,5,6],ymm7[7]
4289 ; AVX2-SLOW-NEXT: vmovaps 96(%rax), %ymm7
4290 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm7[3],ymm0[4,5,6,7]
4291 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4292 ; AVX2-SLOW-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm7[2,3],ymm6[2,3]
4293 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm5[0],ymm0[1],ymm5[2,3,4],ymm0[5],ymm5[6,7]
4294 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4295 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm0 = ymm1[2],ymm4[2],ymm1[3],ymm4[3],ymm1[6],ymm4[6],ymm1[7],ymm4[7]
4296 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm1 = ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[6],ymm3[6],ymm2[7],ymm3[7]
4297 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,3,3,3]
4298 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[3,3,3,3]
4299 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
4300 ; AVX2-SLOW-NEXT: vbroadcastss 124(%r8), %ymm1
4301 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
4302 ; AVX2-SLOW-NEXT: vbroadcastss 124(%r9), %ymm1
4303 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
4304 ; AVX2-SLOW-NEXT: vbroadcastsd 120(%rax), %ymm1
4305 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5,6],ymm1[7]
4306 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4307 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
4308 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm1 = ymm8[1,1,1,1,5,5,5,5]
4309 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
4310 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm5[1],ymm1[2,3,4],ymm5[5],ymm1[6,7]
4311 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,2,2,2]
4312 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
4313 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
4314 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm2 = ymm9[1,1],ymm4[1,1],ymm9[5,5],ymm4[5,5]
4315 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5,6],ymm1[7]
4316 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
4317 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm2 = ymm13[0,0,0,0,4,4,4,4]
4318 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
4319 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm3 = ymm11[0,1,0,1,4,5,4,5]
4320 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0],ymm2[1],ymm3[2,3,4],ymm2[5],ymm3[6,7]
4321 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[2,1,2,3]
4322 ; AVX2-SLOW-NEXT: vbroadcastsd 16(%rax), %ymm3
4323 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm3[2,3],ymm2[4,5,6,7]
4324 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2],ymm1[3,4,5,6],ymm2[7]
4325 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4326 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
4327 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm2 = ymm14[1,1,1,1,5,5,5,5]
4328 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
4329 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0],ymm12[1],ymm2[2,3,4],ymm12[5],ymm2[6,7]
4330 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[2,2,2,2]
4331 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
4332 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4333 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm3 = ymm0[1,1],ymm10[1,1],ymm0[5,5],ymm10[5,5]
4334 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm3[5,6],ymm2[7]
4335 ; AVX2-SLOW-NEXT: vpermilps $0, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Folded Reload
4336 ; AVX2-SLOW-NEXT: # ymm3 = mem[0,0,0,0,4,4,4,4]
4337 ; AVX2-SLOW-NEXT: vpermilps $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Folded Reload
4338 ; AVX2-SLOW-NEXT: # ymm6 = mem[0,1,0,1,4,5,4,5]
4339 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm3 = ymm6[0],ymm3[1],ymm6[2,3,4],ymm3[5],ymm6[6,7]
4340 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[2,1,2,3]
4341 ; AVX2-SLOW-NEXT: vbroadcastsd 48(%rax), %ymm6
4342 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm6[2,3],ymm3[4,5,6,7]
4343 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm3[0,1,2],ymm2[3,4,5,6],ymm3[7]
4344 ; AVX2-SLOW-NEXT: vmovups %ymm0, (%rsp) # 32-byte Spill
4345 ; AVX2-SLOW-NEXT: vpermilps $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Folded Reload
4346 ; AVX2-SLOW-NEXT: # ymm3 = mem[1,1,1,1,5,5,5,5]
4347 ; AVX2-SLOW-NEXT: vblendps $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
4348 ; AVX2-SLOW-NEXT: # ymm3 = ymm3[0],mem[1],ymm3[2,3,4],mem[5],ymm3[6,7]
4349 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[2,2,2,2]
4350 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
4351 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4352 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm6 = ymm0[1,1],ymm15[1,1],ymm0[5,5],ymm15[5,5]
4353 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3,4],ymm6[5,6],ymm3[7]
4354 ; AVX2-SLOW-NEXT: vpermilps $0, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Folded Reload
4355 ; AVX2-SLOW-NEXT: # ymm6 = mem[0,0,0,0,4,4,4,4]
4356 ; AVX2-SLOW-NEXT: vpermilps $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Folded Reload
4357 ; AVX2-SLOW-NEXT: # ymm7 = mem[0,1,0,1,4,5,4,5]
4358 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm6 = ymm7[0],ymm6[1],ymm7[2,3,4],ymm6[5],ymm7[6,7]
4359 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[2,1,2,3]
4360 ; AVX2-SLOW-NEXT: vbroadcastsd 80(%rax), %ymm7
4361 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1],ymm7[2,3],ymm6[4,5,6,7]
4362 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2],ymm3[3,4,5,6],ymm6[7]
4363 ; AVX2-SLOW-NEXT: vbroadcastss 16(%rdx), %ymm3
4364 ; AVX2-SLOW-NEXT: vmovaps %ymm4, %ymm1
4365 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm7 = ymm4[3,1,2,0,7,5,6,4]
4366 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm3 = ymm7[0,1,2,3,4,5],ymm3[6],ymm7[7]
4367 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} ymm7 = ymm5[0],ymm8[0],ymm5[1],ymm8[1],ymm5[4],ymm8[4],ymm5[5],ymm8[5]
4368 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm7[4,5],ymm3[6,7]
4369 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
4370 ; AVX2-SLOW-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm7 # 16-byte Folded Reload
4371 ; AVX2-SLOW-NEXT: # xmm7 = xmm4[3,3],mem[3,3]
4372 ; AVX2-SLOW-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7, %xmm7 # 16-byte Folded Reload
4373 ; AVX2-SLOW-NEXT: # xmm7 = xmm7[0,1,2],mem[3]
4374 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm7 = ymm3[0],ymm7[1,2,3],ymm3[4,5,6,7]
4375 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm3 = ymm1[2],ymm9[2],ymm1[3],ymm9[3],ymm1[6],ymm9[6],ymm1[7],ymm9[7]
4376 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm8 = ymm8[2],ymm5[2],ymm8[3],ymm5[3],ymm8[6],ymm5[6],ymm8[7],ymm5[7]
4377 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[3,3,3,3]
4378 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm8 = ymm8[3,3,3,3]
4379 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm3 = ymm8[0,1,2],ymm3[3,4,5,6,7]
4380 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm8 = ymm11[3,3],ymm13[3,3],ymm11[7,7],ymm13[7,7]
4381 ; AVX2-SLOW-NEXT: vpermilps {{.*#+}} ymm9 = mem[2,3,2,3,6,7,6,7]
4382 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm8 = ymm9[0],ymm8[1,2],ymm9[3,4],ymm8[5,6],ymm9[7]
4383 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm8 = ymm8[2,1,2,3]
4384 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0],ymm3[1,2,3,4],ymm8[5,6,7]
4385 ; AVX2-SLOW-NEXT: vbroadcastss 48(%rdx), %ymm3
4386 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm9 = ymm10[3,1,2,0,7,5,6,4]
4387 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm3 = ymm9[0,1,2,3,4,5],ymm3[6],ymm9[7]
4388 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} ymm9 = ymm12[0],ymm14[0],ymm12[1],ymm14[1],ymm12[4],ymm14[4],ymm12[5],ymm14[5]
4389 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm9[4,5],ymm3[6,7]
4390 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
4391 ; AVX2-SLOW-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm9 # 16-byte Folded Reload
4392 ; AVX2-SLOW-NEXT: # xmm9 = xmm2[3,3],mem[3,3]
4393 ; AVX2-SLOW-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9, %xmm9 # 16-byte Folded Reload
4394 ; AVX2-SLOW-NEXT: # xmm9 = xmm9[0,1,2],mem[3]
4395 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm9 = ymm3[0],ymm9[1,2,3],ymm3[4,5,6,7]
4396 ; AVX2-SLOW-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm3 # 32-byte Folded Reload
4397 ; AVX2-SLOW-NEXT: # ymm3 = ymm10[2],mem[2],ymm10[3],mem[3],ymm10[6],mem[6],ymm10[7],mem[7]
4398 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm11 = ymm14[2],ymm12[2],ymm14[3],ymm12[3],ymm14[6],ymm12[6],ymm14[7],ymm12[7]
4399 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[3,3,3,3]
4400 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm11 = ymm11[3,3,3,3]
4401 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm3 = ymm11[0,1,2],ymm3[3,4,5,6,7]
4402 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4403 ; AVX2-SLOW-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm11 # 32-byte Folded Reload
4404 ; AVX2-SLOW-NEXT: # ymm11 = ymm0[3,3],mem[3,3],ymm0[7,7],mem[7,7]
4405 ; AVX2-SLOW-NEXT: vpermilps {{.*#+}} ymm12 = mem[2,3,2,3,6,7,6,7]
4406 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm11 = ymm12[0],ymm11[1,2],ymm12[3,4],ymm11[5,6],ymm12[7]
4407 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm11 = ymm11[2,1,2,3]
4408 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm3 = ymm11[0],ymm3[1,2,3,4],ymm11[5,6,7]
4409 ; AVX2-SLOW-NEXT: vbroadcastss 80(%rdx), %ymm11
4410 ; AVX2-SLOW-NEXT: vmovaps %ymm15, %ymm1
4411 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm12 = ymm15[3,1,2,0,7,5,6,4]
4412 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm11 = ymm12[0,1,2,3,4,5],ymm11[6],ymm12[7]
4413 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
4414 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4415 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} ymm12 = ymm15[0],ymm0[0],ymm15[1],ymm0[1],ymm15[4],ymm0[4],ymm15[5],ymm0[5]
4416 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm11 = ymm11[0,1,2,3],ymm12[4,5],ymm11[6,7]
4417 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
4418 ; AVX2-SLOW-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm12 # 16-byte Folded Reload
4419 ; AVX2-SLOW-NEXT: # xmm12 = xmm2[3,3],mem[3,3]
4420 ; AVX2-SLOW-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm12, %xmm12 # 16-byte Folded Reload
4421 ; AVX2-SLOW-NEXT: # xmm12 = xmm12[0,1,2],mem[3]
4422 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm11 = ymm11[0],ymm12[1,2,3],ymm11[4,5,6,7]
4423 ; AVX2-SLOW-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm12 # 32-byte Folded Reload
4424 ; AVX2-SLOW-NEXT: # ymm12 = ymm1[2],mem[2],ymm1[3],mem[3],ymm1[6],mem[6],ymm1[7],mem[7]
4425 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm13 = ymm0[2],ymm15[2],ymm0[3],ymm15[3],ymm0[6],ymm15[6],ymm0[7],ymm15[7]
4426 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm12 = ymm12[3,3,3,3]
4427 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm13 = ymm13[3,3,3,3]
4428 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm12 = ymm13[0,1,2],ymm12[3,4,5,6,7]
4429 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4430 ; AVX2-SLOW-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm13 # 32-byte Folded Reload
4431 ; AVX2-SLOW-NEXT: # ymm13 = ymm0[3,3],mem[3,3],ymm0[7,7],mem[7,7]
4432 ; AVX2-SLOW-NEXT: vpermilps {{.*#+}} ymm14 = mem[2,3,2,3,6,7,6,7]
4433 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm13 = ymm14[0],ymm13[1,2],ymm14[3,4],ymm13[5,6],ymm14[7]
4434 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm13 = ymm13[2,1,2,3]
4435 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm12 = ymm13[0],ymm12[1,2,3,4],ymm13[5,6,7]
4436 ; AVX2-SLOW-NEXT: movq {{[0-9]+}}(%rsp), %rax
4437 ; AVX2-SLOW-NEXT: vmovaps %ymm12, 640(%rax)
4438 ; AVX2-SLOW-NEXT: vmovaps %ymm11, 544(%rax)
4439 ; AVX2-SLOW-NEXT: vmovaps %ymm3, 416(%rax)
4440 ; AVX2-SLOW-NEXT: vmovaps %ymm9, 320(%rax)
4441 ; AVX2-SLOW-NEXT: vmovaps %ymm8, 192(%rax)
4442 ; AVX2-SLOW-NEXT: vmovaps %ymm7, 96(%rax)
4443 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
4444 ; AVX2-SLOW-NEXT: vmovaps %ymm3, 608(%rax)
4445 ; AVX2-SLOW-NEXT: vmovaps %ymm6, 576(%rax)
4446 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
4447 ; AVX2-SLOW-NEXT: vmovaps %ymm3, 384(%rax)
4448 ; AVX2-SLOW-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
4449 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 352(%rax)
4450 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
4451 ; AVX2-SLOW-NEXT: vmovaps %ymm2, 160(%rax)
4452 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4453 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 128(%rax)
4454 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4455 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 864(%rax)
4456 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4457 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 832(%rax)
4458 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4459 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 768(%rax)
4460 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4461 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 736(%rax)
4462 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4463 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 704(%rax)
4464 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4465 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 672(%rax)
4466 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4467 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 512(%rax)
4468 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4469 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 480(%rax)
4470 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4471 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 448(%rax)
4472 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4473 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 288(%rax)
4474 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4475 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 256(%rax)
4476 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4477 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 224(%rax)
4478 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4479 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 64(%rax)
4480 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4481 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 32(%rax)
4482 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4483 ; AVX2-SLOW-NEXT: vmovaps %ymm0, (%rax)
4484 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4485 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 800(%rax)
4486 ; AVX2-SLOW-NEXT: addq $1320, %rsp # imm = 0x528
4487 ; AVX2-SLOW-NEXT: vzeroupper
4488 ; AVX2-SLOW-NEXT: retq
4490 ; AVX2-FAST-LABEL: store_i32_stride7_vf32:
4491 ; AVX2-FAST: # %bb.0:
4492 ; AVX2-FAST-NEXT: subq $1400, %rsp # imm = 0x578
4493 ; AVX2-FAST-NEXT: movq {{[0-9]+}}(%rsp), %rax
4494 ; AVX2-FAST-NEXT: vmovaps (%rax), %xmm0
4495 ; AVX2-FAST-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
4496 ; AVX2-FAST-NEXT: vmovaps 32(%rax), %xmm2
4497 ; AVX2-FAST-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4498 ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
4499 ; AVX2-FAST-NEXT: vmovaps (%r8), %xmm3
4500 ; AVX2-FAST-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4501 ; AVX2-FAST-NEXT: vmovaps 32(%r8), %xmm4
4502 ; AVX2-FAST-NEXT: vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4503 ; AVX2-FAST-NEXT: vmovaps (%r9), %xmm1
4504 ; AVX2-FAST-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4505 ; AVX2-FAST-NEXT: vmovaps 32(%r9), %xmm5
4506 ; AVX2-FAST-NEXT: vmovaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4507 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,1,1,1]
4508 ; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0],xmm3[1],xmm1[2,3]
4509 ; AVX2-FAST-NEXT: vbroadcastsd %xmm1, %ymm1
4510 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5],ymm1[6,7]
4511 ; AVX2-FAST-NEXT: vmovaps (%rcx), %xmm6
4512 ; AVX2-FAST-NEXT: vmovaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4513 ; AVX2-FAST-NEXT: vmovaps 32(%rcx), %xmm3
4514 ; AVX2-FAST-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4515 ; AVX2-FAST-NEXT: vmovaps (%rdx), %xmm1
4516 ; AVX2-FAST-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4517 ; AVX2-FAST-NEXT: vinsertps {{.*#+}} xmm1 = zero,xmm1[1],xmm6[1],zero
4518 ; AVX2-FAST-NEXT: vmovaps (%rdi), %xmm9
4519 ; AVX2-FAST-NEXT: vmovaps %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4520 ; AVX2-FAST-NEXT: vmovaps 32(%rdi), %xmm7
4521 ; AVX2-FAST-NEXT: vmovaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4522 ; AVX2-FAST-NEXT: vmovaps (%rsi), %xmm13
4523 ; AVX2-FAST-NEXT: vmovaps 32(%rsi), %xmm8
4524 ; AVX2-FAST-NEXT: vmovaps %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4525 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm6 = xmm13[1,1,2,2]
4526 ; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm6 = xmm6[0,1],xmm9[2],xmm6[3]
4527 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[0,1,2,1]
4528 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm6[0],ymm1[1,2],ymm6[3,4,5,6,7]
4529 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
4530 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4531 ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
4532 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm1 = xmm5[1,1,1,1]
4533 ; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0],xmm4[1],xmm1[2,3]
4534 ; AVX2-FAST-NEXT: vbroadcastsd %xmm1, %ymm1
4535 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5],ymm1[6,7]
4536 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm1 = xmm8[1,1,2,2]
4537 ; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm7[2],xmm1[3]
4538 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1]
4539 ; AVX2-FAST-NEXT: vmovaps 32(%rdx), %xmm2
4540 ; AVX2-FAST-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4541 ; AVX2-FAST-NEXT: vinsertps {{.*#+}} xmm7 = zero,xmm2[1],xmm3[1],zero
4542 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm7[1,2],ymm1[3,4,5,6,7]
4543 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
4544 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4545 ; AVX2-FAST-NEXT: vmovaps 64(%r8), %xmm1
4546 ; AVX2-FAST-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4547 ; AVX2-FAST-NEXT: vmovaps 64(%r9), %xmm0
4548 ; AVX2-FAST-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4549 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
4550 ; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
4551 ; AVX2-FAST-NEXT: vbroadcastsd %xmm0, %ymm0
4552 ; AVX2-FAST-NEXT: vmovaps 64(%rax), %xmm1
4553 ; AVX2-FAST-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4554 ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
4555 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
4556 ; AVX2-FAST-NEXT: vmovaps 64(%rdi), %xmm2
4557 ; AVX2-FAST-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4558 ; AVX2-FAST-NEXT: vmovaps 64(%rsi), %xmm1
4559 ; AVX2-FAST-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4560 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,1,2,2]
4561 ; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3]
4562 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1]
4563 ; AVX2-FAST-NEXT: vmovaps 64(%rcx), %xmm2
4564 ; AVX2-FAST-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4565 ; AVX2-FAST-NEXT: vmovaps 64(%rdx), %xmm3
4566 ; AVX2-FAST-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4567 ; AVX2-FAST-NEXT: vinsertps {{.*#+}} xmm7 = zero,xmm3[1],xmm2[1],zero
4568 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm7[1,2],ymm1[3,4,5,6,7]
4569 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
4570 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4571 ; AVX2-FAST-NEXT: vmovaps 96(%r8), %xmm1
4572 ; AVX2-FAST-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4573 ; AVX2-FAST-NEXT: vmovaps 96(%r9), %xmm2
4574 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm0 = xmm2[1,1,1,1]
4575 ; AVX2-FAST-NEXT: vmovaps %xmm2, %xmm12
4576 ; AVX2-FAST-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4577 ; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
4578 ; AVX2-FAST-NEXT: vbroadcastsd %xmm0, %ymm0
4579 ; AVX2-FAST-NEXT: vmovaps 96(%rax), %xmm1
4580 ; AVX2-FAST-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4581 ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
4582 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
4583 ; AVX2-FAST-NEXT: vmovaps 96(%rdi), %xmm2
4584 ; AVX2-FAST-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4585 ; AVX2-FAST-NEXT: vmovaps 96(%rsi), %xmm1
4586 ; AVX2-FAST-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4587 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,1,2,2]
4588 ; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3]
4589 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1]
4590 ; AVX2-FAST-NEXT: vmovaps 96(%rcx), %xmm2
4591 ; AVX2-FAST-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4592 ; AVX2-FAST-NEXT: vmovaps 96(%rdx), %xmm3
4593 ; AVX2-FAST-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4594 ; AVX2-FAST-NEXT: vinsertps {{.*#+}} xmm7 = zero,xmm3[1],xmm2[1],zero
4595 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm7[1,2],ymm1[3,4,5,6,7]
4596 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
4597 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4598 ; AVX2-FAST-NEXT: vmovaps (%rdi), %ymm0
4599 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4600 ; AVX2-FAST-NEXT: vmovaps (%rsi), %ymm10
4601 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm10[2],ymm0[3],ymm10[3],ymm0[6],ymm10[6],ymm0[7],ymm10[7]
4602 ; AVX2-FAST-NEXT: vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4603 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
4604 ; AVX2-FAST-NEXT: vmovaps (%rdx), %ymm9
4605 ; AVX2-FAST-NEXT: vmovaps (%rcx), %ymm8
4606 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm1 = ymm9[2],ymm8[2],ymm9[3],ymm8[3],ymm9[6],ymm8[6],ymm9[7],ymm8[7]
4607 ; AVX2-FAST-NEXT: vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4608 ; AVX2-FAST-NEXT: vmovups %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4609 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
4610 ; AVX2-FAST-NEXT: vmovaps (%r8), %ymm6
4611 ; AVX2-FAST-NEXT: vmovaps (%r9), %ymm5
4612 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm1 = ymm5[1,1,2,2,5,5,6,6]
4613 ; AVX2-FAST-NEXT: vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4614 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm6[2],ymm1[3,4,5],ymm6[6],ymm1[7]
4615 ; AVX2-FAST-NEXT: vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4616 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
4617 ; AVX2-FAST-NEXT: vmovaps 16(%rax), %xmm11
4618 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm11[1],ymm1[2,3,4,5,6,7]
4619 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
4620 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4621 ; AVX2-FAST-NEXT: vmovaps 32(%rdi), %ymm1
4622 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4623 ; AVX2-FAST-NEXT: vmovaps 32(%rsi), %ymm0
4624 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4625 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm0 = ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[6],ymm0[6],ymm1[7],ymm0[7]
4626 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
4627 ; AVX2-FAST-NEXT: vmovaps 32(%rdx), %ymm1
4628 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4629 ; AVX2-FAST-NEXT: vmovaps 32(%rcx), %ymm2
4630 ; AVX2-FAST-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4631 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm1 = ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[6],ymm2[6],ymm1[7],ymm2[7]
4632 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
4633 ; AVX2-FAST-NEXT: vmovaps 32(%r8), %ymm2
4634 ; AVX2-FAST-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4635 ; AVX2-FAST-NEXT: vmovaps 32(%r9), %ymm1
4636 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4637 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm1 = ymm1[1,1,2,2,5,5,6,6]
4638 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4,5],ymm2[6],ymm1[7]
4639 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
4640 ; AVX2-FAST-NEXT: vmovaps 48(%rax), %xmm15
4641 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm15[1],ymm1[2,3,4,5,6,7]
4642 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
4643 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4644 ; AVX2-FAST-NEXT: vmovaps 64(%rdi), %ymm0
4645 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4646 ; AVX2-FAST-NEXT: vmovaps 64(%rsi), %ymm1
4647 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4648 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
4649 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
4650 ; AVX2-FAST-NEXT: vmovaps 64(%rdx), %ymm1
4651 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4652 ; AVX2-FAST-NEXT: vmovaps 64(%rcx), %ymm2
4653 ; AVX2-FAST-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4654 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm1 = ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[6],ymm2[6],ymm1[7],ymm2[7]
4655 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
4656 ; AVX2-FAST-NEXT: vmovaps 64(%r8), %ymm2
4657 ; AVX2-FAST-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4658 ; AVX2-FAST-NEXT: vmovaps 64(%r9), %ymm1
4659 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4660 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm1 = ymm1[1,1,2,2,5,5,6,6]
4661 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4,5],ymm2[6],ymm1[7]
4662 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
4663 ; AVX2-FAST-NEXT: vmovaps 80(%rax), %xmm15
4664 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm15[1],ymm1[2,3,4,5,6,7]
4665 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
4666 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4667 ; AVX2-FAST-NEXT: vmovaps 96(%rdi), %ymm3
4668 ; AVX2-FAST-NEXT: vmovaps 96(%rsi), %ymm2
4669 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm15 = ymm2[1,1,1,1,5,5,5,5]
4670 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm15 = ymm15[0],ymm3[1],ymm15[2,3,4],ymm3[5],ymm15[6,7]
4671 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm15 = ymm15[2,2,2,2]
4672 ; AVX2-FAST-NEXT: vmovaps 96(%rdx), %ymm4
4673 ; AVX2-FAST-NEXT: vmovaps 96(%rcx), %ymm1
4674 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm14 = ymm4[1,1],ymm1[1,1],ymm4[5,5],ymm1[5,5]
4675 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm14 = ymm15[0,1,2,3,4],ymm14[5,6],ymm15[7]
4676 ; AVX2-FAST-NEXT: vbroadcastsd 112(%r8), %ymm15
4677 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm14 = ymm15[0],ymm14[1,2,3,4,5,6],ymm15[7]
4678 ; AVX2-FAST-NEXT: vbroadcastss 112(%r9), %xmm15
4679 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm14 = ymm14[0],ymm15[1],ymm14[2,3,4,5,6,7]
4680 ; AVX2-FAST-NEXT: vbroadcastss 112(%rax), %ymm15
4681 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm14[0,1],ymm15[2],ymm14[3,4,5,6,7]
4682 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4683 ; AVX2-FAST-NEXT: vbroadcastss 112(%rdx), %ymm14
4684 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm15 = ymm1[3,1,2,0,7,5,6,4]
4685 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm14 = ymm15[0,1,2,3,4,5],ymm14[6],ymm15[7]
4686 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} ymm15 = ymm3[0],ymm2[0],ymm3[1],ymm2[1],ymm3[4],ymm2[4],ymm3[5],ymm2[5]
4687 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm14 = ymm14[0,1,2,3],ymm15[4,5],ymm14[6,7]
4688 ; AVX2-FAST-NEXT: vbroadcastss 108(%r8), %ymm15
4689 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm14 = ymm14[0],ymm15[1],ymm14[2,3,4,5,6,7]
4690 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm15 = xmm12[2,2,3,3]
4691 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm14 = ymm14[0,1],ymm15[2,3],ymm14[4,5,6,7]
4692 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm15 = ymm3[2],ymm2[2],ymm3[3],ymm2[3],ymm3[6],ymm2[6],ymm3[7],ymm2[7]
4693 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm15 = ymm15[2,2,2,2]
4694 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm0 = ymm4[2],ymm1[2],ymm4[3],ymm1[3],ymm4[6],ymm1[6],ymm4[7],ymm1[7]
4695 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm15 = ymm15[0,1,2,3],ymm0[4,5,6,7]
4696 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm15 = ymm15[0,1,2,3,4,5],mem[6,7]
4697 ; AVX2-FAST-NEXT: vbroadcastsd {{.*#+}} ymm11 = [5,6,5,6,5,6,5,6]
4698 ; AVX2-FAST-NEXT: vpermps 96(%r9), %ymm11, %ymm11
4699 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm11 = ymm11[0],ymm15[1,2,3,4,5,6],ymm11[7]
4700 ; AVX2-FAST-NEXT: vmovaps 96(%rax), %ymm15
4701 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm12 = ymm14[0,1,2],ymm15[3],ymm14[4,5,6,7]
4702 ; AVX2-FAST-NEXT: vmovups %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4703 ; AVX2-FAST-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm15[2,3],ymm0[2,3]
4704 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm11[0],ymm0[1],ymm11[2,3,4],ymm0[5],ymm11[6,7]
4705 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4706 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm0 = ymm1[2],ymm4[2],ymm1[3],ymm4[3],ymm1[6],ymm4[6],ymm1[7],ymm4[7]
4707 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm1 = ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[6],ymm3[6],ymm2[7],ymm3[7]
4708 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,3,3,3]
4709 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[3,3,3,3]
4710 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
4711 ; AVX2-FAST-NEXT: vbroadcastss 124(%r8), %ymm1
4712 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
4713 ; AVX2-FAST-NEXT: vbroadcastss 124(%r9), %ymm1
4714 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
4715 ; AVX2-FAST-NEXT: vbroadcastsd 120(%rax), %ymm1
4716 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5,6],ymm1[7]
4717 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4718 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
4719 ; AVX2-FAST-NEXT: vbroadcastss %xmm14, %xmm0
4720 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
4721 ; AVX2-FAST-NEXT: vbroadcastss %xmm12, %xmm1
4722 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
4723 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
4724 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} xmm2 = xmm15[0],xmm13[0],xmm15[1],xmm13[1]
4725 ; AVX2-FAST-NEXT: vbroadcastf128 {{.*#+}} ymm7 = [0,1,2,2,0,1,2,2]
4726 ; AVX2-FAST-NEXT: # ymm7 = mem[0,1,0,1]
4727 ; AVX2-FAST-NEXT: vpermps %ymm2, %ymm7, %ymm2
4728 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1],ymm1[2,3],ymm2[4,5,6,7]
4729 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
4730 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
4731 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} xmm2 = xmm4[0],xmm11[0],xmm4[1],xmm11[1]
4732 ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2
4733 ; AVX2-FAST-NEXT: vbroadcastsd (%rsp), %ymm3 # 16-byte Folded Reload
4734 ; AVX2-FAST-NEXT: vunpcklpd {{.*#+}} ymm2 = ymm2[0],ymm3[0],ymm2[2],ymm3[2]
4735 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm2[4,5,6],ymm1[7]
4736 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4737 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm1 = xmm15[3,3],xmm13[3,3]
4738 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} xmm2 = xmm12[2],xmm14[2],xmm12[3],xmm14[3]
4739 ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
4740 ; AVX2-FAST-NEXT: vpermps %ymm2, %ymm7, %ymm2
4741 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3,4],ymm1[5,6],ymm2[7]
4742 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm2 = xmm11[2,2,2,2]
4743 ; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm2 = xmm4[0,1,2],xmm2[3]
4744 ; AVX2-FAST-NEXT: vbroadcastsd 8(%rax), %ymm3
4745 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
4746 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm2[2,3,4],ymm1[5,6,7]
4747 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4748 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm1 = ymm10[1,1,1,1,5,5,5,5]
4749 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
4750 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm10[1],ymm1[2,3,4],ymm10[5],ymm1[6,7]
4751 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,2,2,2]
4752 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm2 = ymm9[1,1],ymm8[1,1],ymm9[5,5],ymm8[5,5]
4753 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5,6],ymm1[7]
4754 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm2 = ymm5[0,0,0,0,4,4,4,4]
4755 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm3 = ymm6[0,1,0,1,4,5,4,5]
4756 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0],ymm2[1],ymm3[2,3,4],ymm2[5],ymm3[6,7]
4757 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[2,1,2,3]
4758 ; AVX2-FAST-NEXT: vbroadcastsd 16(%rax), %ymm3
4759 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm3[2,3],ymm2[4,5,6,7]
4760 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2],ymm1[3,4,5,6],ymm2[7]
4761 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4762 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
4763 ; AVX2-FAST-NEXT: vbroadcastss %xmm0, %xmm1
4764 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
4765 ; AVX2-FAST-NEXT: vbroadcastss %xmm6, %xmm2
4766 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
4767 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
4768 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
4769 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} xmm2 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
4770 ; AVX2-FAST-NEXT: vpermps %ymm2, %ymm7, %ymm2
4771 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1],ymm1[2,3],ymm2[4,5,6,7]
4772 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
4773 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
4774 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} xmm2 = xmm9[0],xmm8[0],xmm9[1],xmm8[1]
4775 ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2
4776 ; AVX2-FAST-NEXT: vbroadcastsd {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 16-byte Folded Reload
4777 ; AVX2-FAST-NEXT: vunpcklpd {{.*#+}} ymm2 = ymm2[0],ymm3[0],ymm2[2],ymm3[2]
4778 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6],ymm1[7]
4779 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4780 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm1 = xmm4[3,3],xmm5[3,3]
4781 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} xmm2 = xmm6[2],xmm0[2],xmm6[3],xmm0[3]
4782 ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
4783 ; AVX2-FAST-NEXT: vpermps %ymm2, %ymm7, %ymm2
4784 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3,4],ymm1[5,6],ymm2[7]
4785 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm2 = xmm8[2,2,2,2]
4786 ; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm2 = xmm9[0,1,2],xmm2[3]
4787 ; AVX2-FAST-NEXT: vbroadcastsd 40(%rax), %ymm3
4788 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
4789 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm2[2,3,4],ymm1[5,6,7]
4790 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4791 ; AVX2-FAST-NEXT: vpermilps $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
4792 ; AVX2-FAST-NEXT: # ymm1 = mem[1,1,1,1,5,5,5,5]
4793 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
4794 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm12[1],ymm1[2,3,4],ymm12[5],ymm1[6,7]
4795 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,2,2,2]
4796 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
4797 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
4798 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm3 = ymm3[1,1],ymm11[1,1],ymm3[5,5],ymm11[5,5]
4799 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm3[5,6],ymm1[7]
4800 ; AVX2-FAST-NEXT: vpermilps $0, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Folded Reload
4801 ; AVX2-FAST-NEXT: # ymm3 = mem[0,0,0,0,4,4,4,4]
4802 ; AVX2-FAST-NEXT: vpermilps $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Folded Reload
4803 ; AVX2-FAST-NEXT: # ymm4 = mem[0,1,0,1,4,5,4,5]
4804 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0],ymm3[1],ymm4[2,3,4],ymm3[5],ymm4[6,7]
4805 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[2,1,2,3]
4806 ; AVX2-FAST-NEXT: vbroadcastsd 48(%rax), %ymm4
4807 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm4[2,3],ymm3[4,5,6,7]
4808 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm3[0,1,2],ymm1[3,4,5,6],ymm3[7]
4809 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4810 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
4811 ; AVX2-FAST-NEXT: vbroadcastss %xmm8, %xmm3
4812 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
4813 ; AVX2-FAST-NEXT: vbroadcastss %xmm9, %xmm4
4814 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
4815 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
4816 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
4817 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} xmm4 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
4818 ; AVX2-FAST-NEXT: vpermps %ymm4, %ymm7, %ymm4
4819 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0,1],ymm3[2,3],ymm4[4,5,6,7]
4820 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
4821 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
4822 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} xmm4 = xmm6[0],xmm1[0],xmm6[1],xmm1[1]
4823 ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm4
4824 ; AVX2-FAST-NEXT: vbroadcastsd {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 16-byte Folded Reload
4825 ; AVX2-FAST-NEXT: vunpcklpd {{.*#+}} ymm4 = ymm4[0],ymm5[0],ymm4[2],ymm5[2]
4826 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm4[4,5,6],ymm3[7]
4827 ; AVX2-FAST-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4828 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm4 = xmm2[3,3],xmm0[3,3]
4829 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} xmm5 = xmm9[2],xmm8[2],xmm9[3],xmm8[3]
4830 ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm4
4831 ; AVX2-FAST-NEXT: vpermps %ymm5, %ymm7, %ymm5
4832 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0,1,2,3,4],ymm4[5,6],ymm5[7]
4833 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm5 = xmm1[2,2,2,2]
4834 ; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm5 = xmm6[0,1,2],xmm5[3]
4835 ; AVX2-FAST-NEXT: vbroadcastsd 72(%rax), %ymm6
4836 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm6[4,5,6,7]
4837 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm4[0,1],ymm5[2,3,4],ymm4[5,6,7]
4838 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4839 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
4840 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm5 = ymm15[1,1,1,1,5,5,5,5]
4841 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
4842 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0],ymm14[1],ymm5[2,3,4],ymm14[5],ymm5[6,7]
4843 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[2,2,2,2]
4844 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
4845 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4846 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm6 = ymm0[1,1],ymm13[1,1],ymm0[5,5],ymm13[5,5]
4847 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3,4],ymm6[5,6],ymm5[7]
4848 ; AVX2-FAST-NEXT: vpermilps $0, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Folded Reload
4849 ; AVX2-FAST-NEXT: # ymm6 = mem[0,0,0,0,4,4,4,4]
4850 ; AVX2-FAST-NEXT: vpermilps $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Folded Reload
4851 ; AVX2-FAST-NEXT: # ymm8 = mem[0,1,0,1,4,5,4,5]
4852 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm6 = ymm8[0],ymm6[1],ymm8[2,3,4],ymm6[5],ymm8[6,7]
4853 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[2,1,2,3]
4854 ; AVX2-FAST-NEXT: vbroadcastsd 80(%rax), %ymm8
4855 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1],ymm8[2,3],ymm6[4,5,6,7]
4856 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm6[0,1,2],ymm5[3,4,5,6],ymm6[7]
4857 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4858 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
4859 ; AVX2-FAST-NEXT: vbroadcastss %xmm4, %xmm6
4860 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
4861 ; AVX2-FAST-NEXT: vbroadcastss %xmm3, %xmm8
4862 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} xmm6 = xmm8[0],xmm6[0],xmm8[1],xmm6[1]
4863 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
4864 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
4865 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} xmm8 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
4866 ; AVX2-FAST-NEXT: vpermps %ymm8, %ymm7, %ymm8
4867 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm6 = ymm8[0,1],ymm6[2,3],ymm8[4,5,6,7]
4868 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
4869 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
4870 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} xmm8 = xmm1[0],xmm5[0],xmm1[1],xmm5[1]
4871 ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm8, %ymm0, %ymm8
4872 ; AVX2-FAST-NEXT: vbroadcastsd {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 16-byte Folded Reload
4873 ; AVX2-FAST-NEXT: vunpcklpd {{.*#+}} ymm8 = ymm8[0],ymm9[0],ymm8[2],ymm9[2]
4874 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm8 = ymm6[0,1,2,3],ymm8[4,5,6],ymm6[7]
4875 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm6 = xmm0[3,3],xmm2[3,3]
4876 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} xmm9 = xmm3[2],xmm4[2],xmm3[3],xmm4[3]
4877 ; AVX2-FAST-NEXT: vpermps %ymm9, %ymm7, %ymm0
4878 ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm6, %ymm0, %ymm6
4879 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm6[5,6],ymm0[7]
4880 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm6 = xmm5[2,2,2,2]
4881 ; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm6 = xmm1[0,1,2],xmm6[3]
4882 ; AVX2-FAST-NEXT: vbroadcastsd 104(%rax), %ymm9
4883 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm9[4,5,6,7]
4884 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm5 = ymm0[0,1],ymm6[2,3,4],ymm0[5,6,7]
4885 ; AVX2-FAST-NEXT: vbroadcastss 16(%rdx), %ymm6
4886 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4887 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm9 = ymm0[3,1,2,0,7,5,6,4]
4888 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm6 = ymm9[0,1,2,3,4,5],ymm6[6],ymm9[7]
4889 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
4890 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} ymm9 = ymm10[0],ymm1[0],ymm10[1],ymm1[1],ymm10[4],ymm1[4],ymm10[5],ymm1[5]
4891 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm9[4,5],ymm6[6,7]
4892 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
4893 ; AVX2-FAST-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm9 # 16-byte Folded Reload
4894 ; AVX2-FAST-NEXT: # xmm9 = xmm3[3,3],mem[3,3]
4895 ; AVX2-FAST-NEXT: vblendps $8, (%rsp), %xmm9, %xmm9 # 16-byte Folded Reload
4896 ; AVX2-FAST-NEXT: # xmm9 = xmm9[0,1,2],mem[3]
4897 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm9 = ymm6[0],ymm9[1,2,3],ymm6[4,5,6,7]
4898 ; AVX2-FAST-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm6 # 32-byte Folded Reload
4899 ; AVX2-FAST-NEXT: # ymm6 = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7]
4900 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm7 = ymm1[2],ymm10[2],ymm1[3],ymm10[3],ymm1[6],ymm10[6],ymm1[7],ymm10[7]
4901 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[3,3,3,3]
4902 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm7 = ymm7[3,3,3,3]
4903 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm6 = ymm7[0,1,2],ymm6[3,4,5,6,7]
4904 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4905 ; AVX2-FAST-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm7 # 32-byte Folded Reload
4906 ; AVX2-FAST-NEXT: # ymm7 = ymm0[3,3],mem[3,3],ymm0[7,7],mem[7,7]
4907 ; AVX2-FAST-NEXT: vpermilps {{.*#+}} ymm10 = mem[2,3,2,3,6,7,6,7]
4908 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm7 = ymm10[0],ymm7[1,2],ymm10[3,4],ymm7[5,6],ymm10[7]
4909 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm7 = ymm7[2,1,2,3]
4910 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm6 = ymm7[0],ymm6[1,2,3,4],ymm7[5,6,7]
4911 ; AVX2-FAST-NEXT: vbroadcastss 48(%rdx), %ymm7
4912 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm10 = ymm11[3,1,2,0,7,5,6,4]
4913 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm7 = ymm10[0,1,2,3,4,5],ymm7[6],ymm10[7]
4914 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4915 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} ymm10 = ymm12[0],ymm0[0],ymm12[1],ymm0[1],ymm12[4],ymm0[4],ymm12[5],ymm0[5]
4916 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm10[4,5],ymm7[6,7]
4917 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
4918 ; AVX2-FAST-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm10 # 16-byte Folded Reload
4919 ; AVX2-FAST-NEXT: # xmm10 = xmm3[3,3],mem[3,3]
4920 ; AVX2-FAST-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm10, %xmm10 # 16-byte Folded Reload
4921 ; AVX2-FAST-NEXT: # xmm10 = xmm10[0,1,2],mem[3]
4922 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0],ymm10[1,2,3],ymm7[4,5,6,7]
4923 ; AVX2-FAST-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm10 # 32-byte Folded Reload
4924 ; AVX2-FAST-NEXT: # ymm10 = ymm11[2],mem[2],ymm11[3],mem[3],ymm11[6],mem[6],ymm11[7],mem[7]
4925 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm11 = ymm0[2],ymm12[2],ymm0[3],ymm12[3],ymm0[6],ymm12[6],ymm0[7],ymm12[7]
4926 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm10 = ymm10[3,3,3,3]
4927 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm11 = ymm11[3,3,3,3]
4928 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm10 = ymm11[0,1,2],ymm10[3,4,5,6,7]
4929 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4930 ; AVX2-FAST-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm11 # 32-byte Folded Reload
4931 ; AVX2-FAST-NEXT: # ymm11 = ymm0[3,3],mem[3,3],ymm0[7,7],mem[7,7]
4932 ; AVX2-FAST-NEXT: vpermilps {{.*#+}} ymm12 = mem[2,3,2,3,6,7,6,7]
4933 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm11 = ymm12[0],ymm11[1,2],ymm12[3,4],ymm11[5,6],ymm12[7]
4934 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm11 = ymm11[2,1,2,3]
4935 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm10 = ymm11[0],ymm10[1,2,3,4],ymm11[5,6,7]
4936 ; AVX2-FAST-NEXT: vbroadcastss 80(%rdx), %ymm11
4937 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm12 = ymm13[3,1,2,0,7,5,6,4]
4938 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm11 = ymm12[0,1,2,3,4,5],ymm11[6],ymm12[7]
4939 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} ymm12 = ymm14[0],ymm15[0],ymm14[1],ymm15[1],ymm14[4],ymm15[4],ymm14[5],ymm15[5]
4940 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm11 = ymm11[0,1,2,3],ymm12[4,5],ymm11[6,7]
4941 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
4942 ; AVX2-FAST-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm12 # 16-byte Folded Reload
4943 ; AVX2-FAST-NEXT: # xmm12 = xmm1[3,3],mem[3,3]
4944 ; AVX2-FAST-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm12, %xmm12 # 16-byte Folded Reload
4945 ; AVX2-FAST-NEXT: # xmm12 = xmm12[0,1,2],mem[3]
4946 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm11 = ymm11[0],ymm12[1,2,3],ymm11[4,5,6,7]
4947 ; AVX2-FAST-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm12 # 32-byte Folded Reload
4948 ; AVX2-FAST-NEXT: # ymm12 = ymm13[2],mem[2],ymm13[3],mem[3],ymm13[6],mem[6],ymm13[7],mem[7]
4949 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm13 = ymm15[2],ymm14[2],ymm15[3],ymm14[3],ymm15[6],ymm14[6],ymm15[7],ymm14[7]
4950 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm12 = ymm12[3,3,3,3]
4951 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm13 = ymm13[3,3,3,3]
4952 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm12 = ymm13[0,1,2],ymm12[3,4,5,6,7]
4953 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4954 ; AVX2-FAST-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm13 # 32-byte Folded Reload
4955 ; AVX2-FAST-NEXT: # ymm13 = ymm0[3,3],mem[3,3],ymm0[7,7],mem[7,7]
4956 ; AVX2-FAST-NEXT: vpermilps {{.*#+}} ymm14 = mem[2,3,2,3,6,7,6,7]
4957 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm13 = ymm14[0],ymm13[1,2],ymm14[3,4],ymm13[5,6],ymm14[7]
4958 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm13 = ymm13[2,1,2,3]
4959 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm12 = ymm13[0],ymm12[1,2,3,4],ymm13[5,6,7]
4960 ; AVX2-FAST-NEXT: movq {{[0-9]+}}(%rsp), %rax
4961 ; AVX2-FAST-NEXT: vmovaps %ymm12, 640(%rax)
4962 ; AVX2-FAST-NEXT: vmovaps %ymm11, 544(%rax)
4963 ; AVX2-FAST-NEXT: vmovaps %ymm10, 416(%rax)
4964 ; AVX2-FAST-NEXT: vmovaps %ymm7, 320(%rax)
4965 ; AVX2-FAST-NEXT: vmovaps %ymm6, 192(%rax)
4966 ; AVX2-FAST-NEXT: vmovaps %ymm9, 96(%rax)
4967 ; AVX2-FAST-NEXT: vmovaps %ymm5, 736(%rax)
4968 ; AVX2-FAST-NEXT: vmovaps %ymm8, 672(%rax)
4969 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4970 ; AVX2-FAST-NEXT: vmovaps %ymm0, 608(%rax)
4971 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4972 ; AVX2-FAST-NEXT: vmovaps %ymm0, 576(%rax)
4973 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4974 ; AVX2-FAST-NEXT: vmovaps %ymm0, 512(%rax)
4975 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4976 ; AVX2-FAST-NEXT: vmovaps %ymm0, 448(%rax)
4977 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4978 ; AVX2-FAST-NEXT: vmovaps %ymm0, 384(%rax)
4979 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4980 ; AVX2-FAST-NEXT: vmovaps %ymm0, 352(%rax)
4981 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4982 ; AVX2-FAST-NEXT: vmovaps %ymm0, 288(%rax)
4983 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4984 ; AVX2-FAST-NEXT: vmovaps %ymm0, 224(%rax)
4985 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4986 ; AVX2-FAST-NEXT: vmovaps %ymm0, 160(%rax)
4987 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4988 ; AVX2-FAST-NEXT: vmovaps %ymm0, 128(%rax)
4989 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4990 ; AVX2-FAST-NEXT: vmovaps %ymm0, 64(%rax)
4991 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4992 ; AVX2-FAST-NEXT: vmovaps %ymm0, (%rax)
4993 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4994 ; AVX2-FAST-NEXT: vmovaps %ymm0, 864(%rax)
4995 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4996 ; AVX2-FAST-NEXT: vmovaps %ymm0, 832(%rax)
4997 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4998 ; AVX2-FAST-NEXT: vmovaps %ymm0, 768(%rax)
4999 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5000 ; AVX2-FAST-NEXT: vmovaps %ymm0, 704(%rax)
5001 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5002 ; AVX2-FAST-NEXT: vmovaps %ymm0, 480(%rax)
5003 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5004 ; AVX2-FAST-NEXT: vmovaps %ymm0, 256(%rax)
5005 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5006 ; AVX2-FAST-NEXT: vmovaps %ymm0, 32(%rax)
5007 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5008 ; AVX2-FAST-NEXT: vmovaps %ymm0, 800(%rax)
5009 ; AVX2-FAST-NEXT: addq $1400, %rsp # imm = 0x578
5010 ; AVX2-FAST-NEXT: vzeroupper
5011 ; AVX2-FAST-NEXT: retq
5013 ; AVX2-FAST-PERLANE-LABEL: store_i32_stride7_vf32:
5014 ; AVX2-FAST-PERLANE: # %bb.0:
5015 ; AVX2-FAST-PERLANE-NEXT: subq $1320, %rsp # imm = 0x528
5016 ; AVX2-FAST-PERLANE-NEXT: movq {{[0-9]+}}(%rsp), %rax
5017 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rax), %xmm0
5018 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5019 ; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%rax), %xmm3
5020 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5021 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
5022 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%r8), %xmm14
5023 ; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%r8), %xmm6
5024 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5025 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%r9), %xmm8
5026 ; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%r9), %xmm9
5027 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5028 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm1 = xmm8[1,1,1,1]
5029 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0],xmm14[1],xmm1[2,3]
5030 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd %xmm1, %ymm1
5031 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5],ymm1[6,7]
5032 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rcx), %xmm10
5033 ; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%rcx), %xmm11
5034 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5035 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rdx), %xmm7
5036 ; AVX2-FAST-PERLANE-NEXT: vinsertps {{.*#+}} xmm1 = zero,xmm7[1],xmm10[1],zero
5037 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rdi), %xmm5
5038 ; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%rdi), %xmm13
5039 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5040 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rsi), %xmm4
5041 ; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%rsi), %xmm12
5042 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5043 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm2 = xmm4[1,1,2,2]
5044 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} xmm2 = xmm2[0,1],xmm5[2],xmm2[3]
5045 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,1,2,1]
5046 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0],ymm1[1,2],ymm2[3,4,5,6,7]
5047 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
5048 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5049 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0
5050 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm1 = xmm9[1,1,1,1]
5051 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0],xmm6[1],xmm1[2,3]
5052 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd %xmm1, %ymm1
5053 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5],ymm1[6,7]
5054 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm1 = xmm12[1,1,2,2]
5055 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm13[2],xmm1[3]
5056 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1]
5057 ; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%rdx), %xmm6
5058 ; AVX2-FAST-PERLANE-NEXT: vinsertps {{.*#+}} xmm2 = zero,xmm6[1],xmm11[1],zero
5059 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2],ymm1[3,4,5,6,7]
5060 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
5061 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5062 ; AVX2-FAST-PERLANE-NEXT: vmovaps 64(%r8), %xmm1
5063 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5064 ; AVX2-FAST-PERLANE-NEXT: vmovaps 64(%r9), %xmm0
5065 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5066 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
5067 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
5068 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd %xmm0, %ymm0
5069 ; AVX2-FAST-PERLANE-NEXT: vmovaps 64(%rax), %xmm1
5070 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5071 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
5072 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
5073 ; AVX2-FAST-PERLANE-NEXT: vmovaps 64(%rdi), %xmm2
5074 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5075 ; AVX2-FAST-PERLANE-NEXT: vmovaps 64(%rsi), %xmm1
5076 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5077 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,1,2,2]
5078 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3]
5079 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1]
5080 ; AVX2-FAST-PERLANE-NEXT: vmovaps 64(%rcx), %xmm13
5081 ; AVX2-FAST-PERLANE-NEXT: vmovaps 64(%rdx), %xmm12
5082 ; AVX2-FAST-PERLANE-NEXT: vinsertps {{.*#+}} xmm2 = zero,xmm12[1],xmm13[1],zero
5083 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2],ymm1[3,4,5,6,7]
5084 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
5085 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5086 ; AVX2-FAST-PERLANE-NEXT: vmovaps 96(%r8), %xmm1
5087 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5088 ; AVX2-FAST-PERLANE-NEXT: vmovaps 96(%r9), %xmm11
5089 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm0 = xmm11[1,1,1,1]
5090 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
5091 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd %xmm0, %ymm0
5092 ; AVX2-FAST-PERLANE-NEXT: vmovaps 96(%rax), %xmm1
5093 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5094 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
5095 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
5096 ; AVX2-FAST-PERLANE-NEXT: vmovaps 96(%rdi), %xmm2
5097 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5098 ; AVX2-FAST-PERLANE-NEXT: vmovaps 96(%rsi), %xmm1
5099 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5100 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,1,2,2]
5101 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3]
5102 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1]
5103 ; AVX2-FAST-PERLANE-NEXT: vmovaps 96(%rcx), %xmm3
5104 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm3, (%rsp) # 16-byte Spill
5105 ; AVX2-FAST-PERLANE-NEXT: vmovaps 96(%rdx), %xmm2
5106 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5107 ; AVX2-FAST-PERLANE-NEXT: vinsertps {{.*#+}} xmm2 = zero,xmm2[1],xmm3[1],zero
5108 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2],ymm1[3,4,5,6,7]
5109 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
5110 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5111 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rdi), %ymm0
5112 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5113 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rsi), %ymm1
5114 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5115 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
5116 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
5117 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rdx), %ymm2
5118 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5119 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rcx), %ymm1
5120 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5121 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm1 = ymm2[2],ymm1[2],ymm2[3],ymm1[3],ymm2[6],ymm1[6],ymm2[7],ymm1[7]
5122 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
5123 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%r8), %ymm2
5124 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5125 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%r9), %ymm1
5126 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5127 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm1 = ymm1[1,1,2,2,5,5,6,6]
5128 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4,5],ymm2[6],ymm1[7]
5129 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
5130 ; AVX2-FAST-PERLANE-NEXT: vmovaps 16(%rax), %xmm2
5131 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7]
5132 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
5133 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5134 ; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%rdi), %ymm1
5135 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5136 ; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%rsi), %ymm0
5137 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5138 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm0 = ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[6],ymm0[6],ymm1[7],ymm0[7]
5139 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
5140 ; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%rdx), %ymm1
5141 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5142 ; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%rcx), %ymm2
5143 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5144 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm1 = ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[6],ymm2[6],ymm1[7],ymm2[7]
5145 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
5146 ; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%r8), %ymm2
5147 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5148 ; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%r9), %ymm1
5149 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5150 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm1 = ymm1[1,1,2,2,5,5,6,6]
5151 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4,5],ymm2[6],ymm1[7]
5152 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
5153 ; AVX2-FAST-PERLANE-NEXT: vmovaps 48(%rax), %xmm2
5154 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7]
5155 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
5156 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5157 ; AVX2-FAST-PERLANE-NEXT: vmovaps 64(%rdi), %ymm1
5158 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5159 ; AVX2-FAST-PERLANE-NEXT: vmovaps 64(%rsi), %ymm0
5160 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5161 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm0 = ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[6],ymm0[6],ymm1[7],ymm0[7]
5162 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
5163 ; AVX2-FAST-PERLANE-NEXT: vmovaps 64(%rdx), %ymm1
5164 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5165 ; AVX2-FAST-PERLANE-NEXT: vmovaps 64(%rcx), %ymm2
5166 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5167 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm1 = ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[6],ymm2[6],ymm1[7],ymm2[7]
5168 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
5169 ; AVX2-FAST-PERLANE-NEXT: vmovaps 64(%r8), %ymm2
5170 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5171 ; AVX2-FAST-PERLANE-NEXT: vmovaps 64(%r9), %ymm1
5172 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5173 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm1 = ymm1[1,1,2,2,5,5,6,6]
5174 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4,5],ymm2[6],ymm1[7]
5175 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
5176 ; AVX2-FAST-PERLANE-NEXT: vmovaps 80(%rax), %xmm2
5177 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7]
5178 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
5179 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5180 ; AVX2-FAST-PERLANE-NEXT: vmovaps 96(%rdi), %ymm3
5181 ; AVX2-FAST-PERLANE-NEXT: vmovaps 96(%rsi), %ymm2
5182 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm0 = ymm2[1,1,1,1,5,5,5,5]
5183 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm3[1],ymm0[2,3,4],ymm3[5],ymm0[6,7]
5184 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
5185 ; AVX2-FAST-PERLANE-NEXT: vmovaps 96(%rdx), %ymm9
5186 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5187 ; AVX2-FAST-PERLANE-NEXT: vmovaps 96(%rcx), %ymm1
5188 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm15 = ymm9[1,1],ymm1[1,1],ymm9[5,5],ymm1[5,5]
5189 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm15[5,6],ymm0[7]
5190 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 112(%r8), %ymm15
5191 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm15[0],ymm0[1,2,3,4,5,6],ymm15[7]
5192 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss 112(%r9), %xmm15
5193 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm15[1],ymm0[2,3,4,5,6,7]
5194 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss 112(%rax), %ymm15
5195 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm15[2],ymm0[3,4,5,6,7]
5196 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5197 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss %xmm10, %xmm0
5198 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss %xmm7, %xmm15
5199 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} xmm0 = xmm15[0],xmm0[0],xmm15[1],xmm0[1]
5200 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} xmm15 = xmm5[0],xmm4[0],xmm5[1],xmm4[1]
5201 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm15 = xmm15[0,1,2,2]
5202 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm15 = ymm15[0,1,2,1]
5203 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm15[0,1],ymm0[2,3],ymm15[4,5,6,7]
5204 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5205 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm14, %xmm9
5206 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5207 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} xmm15 = xmm14[0],xmm8[0],xmm14[1],xmm8[1]
5208 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm15, %ymm0, %ymm15
5209 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 16-byte Folded Reload
5210 ; AVX2-FAST-PERLANE-NEXT: vunpcklpd {{.*#+}} ymm14 = ymm15[0],ymm14[0],ymm15[2],ymm14[2]
5211 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm14[4,5,6],ymm0[7]
5212 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5213 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm0 = xmm5[3,3],xmm4[3,3]
5214 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} xmm4 = xmm7[2],xmm10[2],xmm7[3],xmm10[3]
5215 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
5216 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm4 = xmm4[0,1,2,2]
5217 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,1,2,1]
5218 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm4[0,1,2,3,4],ymm0[5,6],ymm4[7]
5219 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm4 = xmm8[2,2,2,2]
5220 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} xmm4 = xmm9[0,1,2],xmm4[3]
5221 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 8(%rax), %ymm5
5222 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm5[4,5,6,7]
5223 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm4[2,3,4],ymm0[5,6,7]
5224 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5225 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
5226 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss %xmm8, %xmm0
5227 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss %xmm6, %xmm4
5228 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} xmm0 = xmm4[0],xmm0[0],xmm4[1],xmm0[1]
5229 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
5230 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
5231 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} xmm4 = xmm10[0],xmm14[0],xmm10[1],xmm14[1]
5232 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm4 = xmm4[0,1,2,2]
5233 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,1,2,1]
5234 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm4[0,1],ymm0[2,3],ymm4[4,5,6,7]
5235 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
5236 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
5237 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} xmm4 = xmm7[0],xmm9[0],xmm7[1],xmm9[1]
5238 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm4
5239 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 16-byte Folded Reload
5240 ; AVX2-FAST-PERLANE-NEXT: vunpcklpd {{.*#+}} ymm4 = ymm4[0],ymm5[0],ymm4[2],ymm5[2]
5241 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm4[4,5,6],ymm0[7]
5242 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5243 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm0 = xmm10[3,3],xmm14[3,3]
5244 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} xmm4 = xmm6[2],xmm8[2],xmm6[3],xmm8[3]
5245 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
5246 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm4 = xmm4[0,1,2,2]
5247 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,1,2,1]
5248 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm4[0,1,2,3,4],ymm0[5,6],ymm4[7]
5249 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm4 = xmm9[2,2,2,2]
5250 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} xmm4 = xmm7[0,1,2],xmm4[3]
5251 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 40(%rax), %ymm5
5252 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm5[4,5,6,7]
5253 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm4[2,3,4],ymm0[5,6,7]
5254 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5255 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss %xmm13, %xmm0
5256 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss %xmm12, %xmm4
5257 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} xmm0 = xmm4[0],xmm0[0],xmm4[1],xmm0[1]
5258 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
5259 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
5260 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} xmm4 = xmm9[0],xmm8[0],xmm9[1],xmm8[1]
5261 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm4 = xmm4[0,1,2,2]
5262 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,1,2,1]
5263 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm4[0,1],ymm0[2,3],ymm4[4,5,6,7]
5264 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
5265 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
5266 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} xmm4 = xmm7[0],xmm6[0],xmm7[1],xmm6[1]
5267 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm4
5268 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 16-byte Folded Reload
5269 ; AVX2-FAST-PERLANE-NEXT: vunpcklpd {{.*#+}} ymm4 = ymm4[0],ymm5[0],ymm4[2],ymm5[2]
5270 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm4[4,5,6],ymm0[7]
5271 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5272 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm0 = xmm9[3,3],xmm8[3,3]
5273 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} xmm4 = xmm12[2],xmm13[2],xmm12[3],xmm13[3]
5274 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
5275 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm4 = xmm4[0,1,2,2]
5276 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,1,2,1]
5277 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm4[0,1,2,3,4],ymm0[5,6],ymm4[7]
5278 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm4 = xmm6[2,2,2,2]
5279 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} xmm4 = xmm7[0,1,2],xmm4[3]
5280 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 72(%rax), %ymm5
5281 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm5[4,5,6,7]
5282 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm4[2,3,4],ymm0[5,6,7]
5283 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5284 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rsp), %xmm9 # 16-byte Reload
5285 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss %xmm9, %xmm0
5286 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
5287 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss %xmm10, %xmm4
5288 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} xmm0 = xmm4[0],xmm0[0],xmm4[1],xmm0[1]
5289 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
5290 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
5291 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} xmm4 = xmm8[0],xmm7[0],xmm8[1],xmm7[1]
5292 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm4 = xmm4[0,1,2,2]
5293 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,1,2,1]
5294 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm4[0,1],ymm0[2,3],ymm4[4,5,6,7]
5295 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
5296 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} xmm4 = xmm6[0],xmm11[0],xmm6[1],xmm11[1]
5297 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm4
5298 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 16-byte Folded Reload
5299 ; AVX2-FAST-PERLANE-NEXT: vunpcklpd {{.*#+}} ymm4 = ymm4[0],ymm5[0],ymm4[2],ymm5[2]
5300 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm4[4,5,6],ymm0[7]
5301 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5302 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm0 = xmm8[3,3],xmm7[3,3]
5303 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} xmm4 = xmm10[2],xmm9[2],xmm10[3],xmm9[3]
5304 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
5305 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm4 = xmm4[0,1,2,2]
5306 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,1,2,1]
5307 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm4[0,1,2,3,4],ymm0[5,6],ymm4[7]
5308 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm4 = xmm11[2,2,2,2]
5309 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} xmm4 = xmm6[0,1,2],xmm4[3]
5310 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 104(%rax), %ymm5
5311 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm5[4,5,6,7]
5312 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm4[2,3,4],ymm0[5,6,7]
5313 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5314 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss 112(%rdx), %ymm0
5315 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm5 = ymm1[3,1,2,0,7,5,6,4]
5316 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm5[0,1,2,3,4,5],ymm0[6],ymm5[7]
5317 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} ymm5 = ymm3[0],ymm2[0],ymm3[1],ymm2[1],ymm3[4],ymm2[4],ymm3[5],ymm2[5]
5318 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm5[4,5],ymm0[6,7]
5319 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss 108(%r8), %ymm5
5320 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm5[1],ymm0[2,3,4,5,6,7]
5321 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm5 = xmm11[2,2,3,3]
5322 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm5[2,3],ymm0[4,5,6,7]
5323 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm5 = ymm3[2],ymm2[2],ymm3[3],ymm2[3],ymm3[6],ymm2[6],ymm3[7],ymm2[7]
5324 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[2,2,2,2]
5325 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
5326 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm6 = ymm4[2],ymm1[2],ymm4[3],ymm1[3],ymm4[6],ymm1[6],ymm4[7],ymm1[7]
5327 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm6[4,5,6,7]
5328 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3,4,5],mem[6,7]
5329 ; AVX2-FAST-PERLANE-NEXT: vpermilps {{.*#+}} ymm7 = mem[1,2,2,3,5,6,6,7]
5330 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm7 = ymm7[2,2,2,2]
5331 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm5 = ymm7[0],ymm5[1,2,3,4,5,6],ymm7[7]
5332 ; AVX2-FAST-PERLANE-NEXT: vmovaps 96(%rax), %ymm7
5333 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm7[3],ymm0[4,5,6,7]
5334 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5335 ; AVX2-FAST-PERLANE-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm7[2,3],ymm6[2,3]
5336 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm5[0],ymm0[1],ymm5[2,3,4],ymm0[5],ymm5[6,7]
5337 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5338 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm0 = ymm1[2],ymm4[2],ymm1[3],ymm4[3],ymm1[6],ymm4[6],ymm1[7],ymm4[7]
5339 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm1 = ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[6],ymm3[6],ymm2[7],ymm3[7]
5340 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,3,3,3]
5341 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[3,3,3,3]
5342 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
5343 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss 124(%r8), %ymm1
5344 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
5345 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss 124(%r9), %ymm1
5346 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
5347 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 120(%rax), %ymm1
5348 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5,6],ymm1[7]
5349 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5350 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
5351 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm1 = ymm8[1,1,1,1,5,5,5,5]
5352 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
5353 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm5[1],ymm1[2,3,4],ymm5[5],ymm1[6,7]
5354 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,2,2,2]
5355 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
5356 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
5357 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm2 = ymm9[1,1],ymm4[1,1],ymm9[5,5],ymm4[5,5]
5358 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5,6],ymm1[7]
5359 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
5360 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm2 = ymm13[0,0,0,0,4,4,4,4]
5361 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
5362 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm3 = ymm11[0,1,0,1,4,5,4,5]
5363 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0],ymm2[1],ymm3[2,3,4],ymm2[5],ymm3[6,7]
5364 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[2,1,2,3]
5365 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 16(%rax), %ymm3
5366 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm3[2,3],ymm2[4,5,6,7]
5367 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2],ymm1[3,4,5,6],ymm2[7]
5368 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5369 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
5370 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm2 = ymm14[1,1,1,1,5,5,5,5]
5371 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
5372 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0],ymm12[1],ymm2[2,3,4],ymm12[5],ymm2[6,7]
5373 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[2,2,2,2]
5374 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
5375 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5376 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm3 = ymm0[1,1],ymm10[1,1],ymm0[5,5],ymm10[5,5]
5377 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm3[5,6],ymm2[7]
5378 ; AVX2-FAST-PERLANE-NEXT: vpermilps $0, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Folded Reload
5379 ; AVX2-FAST-PERLANE-NEXT: # ymm3 = mem[0,0,0,0,4,4,4,4]
5380 ; AVX2-FAST-PERLANE-NEXT: vpermilps $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Folded Reload
5381 ; AVX2-FAST-PERLANE-NEXT: # ymm6 = mem[0,1,0,1,4,5,4,5]
5382 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm3 = ymm6[0],ymm3[1],ymm6[2,3,4],ymm3[5],ymm6[6,7]
5383 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[2,1,2,3]
5384 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 48(%rax), %ymm6
5385 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm6[2,3],ymm3[4,5,6,7]
5386 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm3[0,1,2],ymm2[3,4,5,6],ymm3[7]
5387 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, (%rsp) # 32-byte Spill
5388 ; AVX2-FAST-PERLANE-NEXT: vpermilps $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Folded Reload
5389 ; AVX2-FAST-PERLANE-NEXT: # ymm3 = mem[1,1,1,1,5,5,5,5]
5390 ; AVX2-FAST-PERLANE-NEXT: vblendps $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
5391 ; AVX2-FAST-PERLANE-NEXT: # ymm3 = ymm3[0],mem[1],ymm3[2,3,4],mem[5],ymm3[6,7]
5392 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[2,2,2,2]
5393 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
5394 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5395 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm6 = ymm0[1,1],ymm15[1,1],ymm0[5,5],ymm15[5,5]
5396 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3,4],ymm6[5,6],ymm3[7]
5397 ; AVX2-FAST-PERLANE-NEXT: vpermilps $0, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Folded Reload
5398 ; AVX2-FAST-PERLANE-NEXT: # ymm6 = mem[0,0,0,0,4,4,4,4]
5399 ; AVX2-FAST-PERLANE-NEXT: vpermilps $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Folded Reload
5400 ; AVX2-FAST-PERLANE-NEXT: # ymm7 = mem[0,1,0,1,4,5,4,5]
5401 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm6 = ymm7[0],ymm6[1],ymm7[2,3,4],ymm6[5],ymm7[6,7]
5402 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[2,1,2,3]
5403 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 80(%rax), %ymm7
5404 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1],ymm7[2,3],ymm6[4,5,6,7]
5405 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2],ymm3[3,4,5,6],ymm6[7]
5406 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss 16(%rdx), %ymm3
5407 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm4, %ymm1
5408 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm7 = ymm4[3,1,2,0,7,5,6,4]
5409 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm3 = ymm7[0,1,2,3,4,5],ymm3[6],ymm7[7]
5410 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} ymm7 = ymm5[0],ymm8[0],ymm5[1],ymm8[1],ymm5[4],ymm8[4],ymm5[5],ymm8[5]
5411 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm7[4,5],ymm3[6,7]
5412 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
5413 ; AVX2-FAST-PERLANE-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm7 # 16-byte Folded Reload
5414 ; AVX2-FAST-PERLANE-NEXT: # xmm7 = xmm4[3,3],mem[3,3]
5415 ; AVX2-FAST-PERLANE-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7, %xmm7 # 16-byte Folded Reload
5416 ; AVX2-FAST-PERLANE-NEXT: # xmm7 = xmm7[0,1,2],mem[3]
5417 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm7 = ymm3[0],ymm7[1,2,3],ymm3[4,5,6,7]
5418 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm3 = ymm1[2],ymm9[2],ymm1[3],ymm9[3],ymm1[6],ymm9[6],ymm1[7],ymm9[7]
5419 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm8 = ymm8[2],ymm5[2],ymm8[3],ymm5[3],ymm8[6],ymm5[6],ymm8[7],ymm5[7]
5420 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[3,3,3,3]
5421 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm8 = ymm8[3,3,3,3]
5422 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm3 = ymm8[0,1,2],ymm3[3,4,5,6,7]
5423 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm8 = ymm11[3,3],ymm13[3,3],ymm11[7,7],ymm13[7,7]
5424 ; AVX2-FAST-PERLANE-NEXT: vpermilps {{.*#+}} ymm9 = mem[2,3,2,3,6,7,6,7]
5425 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm8 = ymm9[0],ymm8[1,2],ymm9[3,4],ymm8[5,6],ymm9[7]
5426 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm8 = ymm8[2,1,2,3]
5427 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0],ymm3[1,2,3,4],ymm8[5,6,7]
5428 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss 48(%rdx), %ymm3
5429 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm9 = ymm10[3,1,2,0,7,5,6,4]
5430 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm3 = ymm9[0,1,2,3,4,5],ymm3[6],ymm9[7]
5431 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} ymm9 = ymm12[0],ymm14[0],ymm12[1],ymm14[1],ymm12[4],ymm14[4],ymm12[5],ymm14[5]
5432 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm9[4,5],ymm3[6,7]
5433 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
5434 ; AVX2-FAST-PERLANE-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm9 # 16-byte Folded Reload
5435 ; AVX2-FAST-PERLANE-NEXT: # xmm9 = xmm2[3,3],mem[3,3]
5436 ; AVX2-FAST-PERLANE-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9, %xmm9 # 16-byte Folded Reload
5437 ; AVX2-FAST-PERLANE-NEXT: # xmm9 = xmm9[0,1,2],mem[3]
5438 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm9 = ymm3[0],ymm9[1,2,3],ymm3[4,5,6,7]
5439 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm3 # 32-byte Folded Reload
5440 ; AVX2-FAST-PERLANE-NEXT: # ymm3 = ymm10[2],mem[2],ymm10[3],mem[3],ymm10[6],mem[6],ymm10[7],mem[7]
5441 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm11 = ymm14[2],ymm12[2],ymm14[3],ymm12[3],ymm14[6],ymm12[6],ymm14[7],ymm12[7]
5442 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[3,3,3,3]
5443 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm11 = ymm11[3,3,3,3]
5444 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm3 = ymm11[0,1,2],ymm3[3,4,5,6,7]
5445 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5446 ; AVX2-FAST-PERLANE-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm11 # 32-byte Folded Reload
5447 ; AVX2-FAST-PERLANE-NEXT: # ymm11 = ymm0[3,3],mem[3,3],ymm0[7,7],mem[7,7]
5448 ; AVX2-FAST-PERLANE-NEXT: vpermilps {{.*#+}} ymm12 = mem[2,3,2,3,6,7,6,7]
5449 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm11 = ymm12[0],ymm11[1,2],ymm12[3,4],ymm11[5,6],ymm12[7]
5450 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm11 = ymm11[2,1,2,3]
5451 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm3 = ymm11[0],ymm3[1,2,3,4],ymm11[5,6,7]
5452 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss 80(%rdx), %ymm11
5453 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm15, %ymm1
5454 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm12 = ymm15[3,1,2,0,7,5,6,4]
5455 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm11 = ymm12[0,1,2,3,4,5],ymm11[6],ymm12[7]
5456 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
5457 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5458 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} ymm12 = ymm15[0],ymm0[0],ymm15[1],ymm0[1],ymm15[4],ymm0[4],ymm15[5],ymm0[5]
5459 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm11 = ymm11[0,1,2,3],ymm12[4,5],ymm11[6,7]
5460 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
5461 ; AVX2-FAST-PERLANE-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm12 # 16-byte Folded Reload
5462 ; AVX2-FAST-PERLANE-NEXT: # xmm12 = xmm2[3,3],mem[3,3]
5463 ; AVX2-FAST-PERLANE-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm12, %xmm12 # 16-byte Folded Reload
5464 ; AVX2-FAST-PERLANE-NEXT: # xmm12 = xmm12[0,1,2],mem[3]
5465 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm11 = ymm11[0],ymm12[1,2,3],ymm11[4,5,6,7]
5466 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm12 # 32-byte Folded Reload
5467 ; AVX2-FAST-PERLANE-NEXT: # ymm12 = ymm1[2],mem[2],ymm1[3],mem[3],ymm1[6],mem[6],ymm1[7],mem[7]
5468 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm13 = ymm0[2],ymm15[2],ymm0[3],ymm15[3],ymm0[6],ymm15[6],ymm0[7],ymm15[7]
5469 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm12 = ymm12[3,3,3,3]
5470 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm13 = ymm13[3,3,3,3]
5471 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm12 = ymm13[0,1,2],ymm12[3,4,5,6,7]
5472 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5473 ; AVX2-FAST-PERLANE-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm13 # 32-byte Folded Reload
5474 ; AVX2-FAST-PERLANE-NEXT: # ymm13 = ymm0[3,3],mem[3,3],ymm0[7,7],mem[7,7]
5475 ; AVX2-FAST-PERLANE-NEXT: vpermilps {{.*#+}} ymm14 = mem[2,3,2,3,6,7,6,7]
5476 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm13 = ymm14[0],ymm13[1,2],ymm14[3,4],ymm13[5,6],ymm14[7]
5477 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm13 = ymm13[2,1,2,3]
5478 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm12 = ymm13[0],ymm12[1,2,3,4],ymm13[5,6,7]
5479 ; AVX2-FAST-PERLANE-NEXT: movq {{[0-9]+}}(%rsp), %rax
5480 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm12, 640(%rax)
5481 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm11, 544(%rax)
5482 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm3, 416(%rax)
5483 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm9, 320(%rax)
5484 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm8, 192(%rax)
5485 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm7, 96(%rax)
5486 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
5487 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm3, 608(%rax)
5488 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm6, 576(%rax)
5489 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
5490 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm3, 384(%rax)
5491 ; AVX2-FAST-PERLANE-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
5492 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 352(%rax)
5493 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
5494 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm2, 160(%rax)
5495 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5496 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 128(%rax)
5497 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5498 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 864(%rax)
5499 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5500 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 832(%rax)
5501 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5502 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 768(%rax)
5503 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5504 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 736(%rax)
5505 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5506 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 704(%rax)
5507 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5508 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 672(%rax)
5509 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5510 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 512(%rax)
5511 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5512 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 480(%rax)
5513 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5514 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 448(%rax)
5515 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5516 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 288(%rax)
5517 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5518 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 256(%rax)
5519 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5520 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 224(%rax)
5521 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5522 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 64(%rax)
5523 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5524 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 32(%rax)
5525 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5526 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, (%rax)
5527 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5528 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 800(%rax)
5529 ; AVX2-FAST-PERLANE-NEXT: addq $1320, %rsp # imm = 0x528
5530 ; AVX2-FAST-PERLANE-NEXT: vzeroupper
5531 ; AVX2-FAST-PERLANE-NEXT: retq
5533 ; AVX512F-LABEL: store_i32_stride7_vf32:
5535 ; AVX512F-NEXT: pushq %rax
5536 ; AVX512F-NEXT: movq {{[0-9]+}}(%rsp), %rax
5537 ; AVX512F-NEXT: vmovdqa64 (%rdi), %zmm15
5538 ; AVX512F-NEXT: vmovdqa64 64(%rdi), %zmm18
5539 ; AVX512F-NEXT: vmovdqa64 (%rsi), %zmm14
5540 ; AVX512F-NEXT: vmovdqa64 64(%rsi), %zmm10
5541 ; AVX512F-NEXT: vmovdqa64 (%rdx), %zmm1
5542 ; AVX512F-NEXT: vmovdqa64 64(%rdx), %zmm21
5543 ; AVX512F-NEXT: vmovdqa64 (%rcx), %zmm12
5544 ; AVX512F-NEXT: vmovdqa64 64(%rcx), %zmm11
5545 ; AVX512F-NEXT: vmovdqa64 (%r8), %zmm3
5546 ; AVX512F-NEXT: vmovdqa64 64(%r8), %zmm28
5547 ; AVX512F-NEXT: vmovdqa64 (%r9), %zmm8
5548 ; AVX512F-NEXT: vmovdqa64 64(%r9), %zmm20
5549 ; AVX512F-NEXT: vmovdqa64 (%rax), %zmm9
5550 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm30 = <u,u,u,u,14,30,u,u,u,u,u,15,31,u,u,u>
5551 ; AVX512F-NEXT: vmovdqa64 %zmm21, %zmm2
5552 ; AVX512F-NEXT: vpermt2d %zmm11, %zmm30, %zmm2
5553 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm7 = <u,u,14,30,u,u,u,u,u,15,31,u,u,u,u,u>
5554 ; AVX512F-NEXT: vmovdqa64 %zmm18, %zmm4
5555 ; AVX512F-NEXT: vpermt2d %zmm10, %zmm7, %zmm4
5556 ; AVX512F-NEXT: movw $6192, %cx # imm = 0x1830
5557 ; AVX512F-NEXT: kmovw %ecx, %k1
5558 ; AVX512F-NEXT: vmovdqa32 %zmm2, %zmm4 {%k1}
5559 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm0 = <u,u,2,3,4,5,30,u,u,9,10,11,12,31,u,u>
5560 ; AVX512F-NEXT: vpermi2d %zmm28, %zmm4, %zmm0
5561 ; AVX512F-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
5562 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm22 = <u,u,u,u,12,28,u,u,u,u,u,13,29,u,u,u>
5563 ; AVX512F-NEXT: vmovdqa64 %zmm18, %zmm2
5564 ; AVX512F-NEXT: vpermt2d %zmm10, %zmm22, %zmm2
5565 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm13 = <11,u,u,u,u,u,28,12,u,u,u,u,u,29,13,u>
5566 ; AVX512F-NEXT: vmovdqa64 %zmm11, %zmm4
5567 ; AVX512F-NEXT: vpermt2d %zmm21, %zmm13, %zmm4
5568 ; AVX512F-NEXT: vmovdqa32 %zmm2, %zmm4 {%k1}
5569 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm0 = <0,27,u,u,4,5,6,7,28,u,u,11,12,13,14,29>
5570 ; AVX512F-NEXT: vpermi2d %zmm28, %zmm4, %zmm0
5571 ; AVX512F-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
5572 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm2 = <u,u,0,16,u,u,u,u,u,1,17,u,u,u,u,u>
5573 ; AVX512F-NEXT: vmovdqa64 %zmm1, %zmm16
5574 ; AVX512F-NEXT: vpermt2d %zmm12, %zmm2, %zmm16
5575 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm0 = <0,16,u,u,u,u,u,1,17,u,u,u,u,u,2,18>
5576 ; AVX512F-NEXT: vmovdqa64 %zmm15, %zmm6
5577 ; AVX512F-NEXT: vpermt2d %zmm14, %zmm0, %zmm6
5578 ; AVX512F-NEXT: movw $1548, %cx # imm = 0x60C
5579 ; AVX512F-NEXT: kmovw %ecx, %k2
5580 ; AVX512F-NEXT: vmovdqa32 %zmm16, %zmm6 {%k2}
5581 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm27 = <u,u,u,u,0,16,u,u,u,u,u,1,17,u,u,u>
5582 ; AVX512F-NEXT: vmovdqa64 %zmm3, %zmm31
5583 ; AVX512F-NEXT: vpermt2d %zmm8, %zmm27, %zmm31
5584 ; AVX512F-NEXT: vmovdqa64 %zmm21, %zmm25
5585 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm17 = <9,u,u,u,u,u,26,10,u,u,u,u,u,27,11,u>
5586 ; AVX512F-NEXT: vmovdqa64 %zmm10, %zmm16
5587 ; AVX512F-NEXT: vpermt2d %zmm18, %zmm17, %zmm16
5588 ; AVX512F-NEXT: vmovdqa64 %zmm21, %zmm24
5589 ; AVX512F-NEXT: vmovdqa64 %zmm18, %zmm4
5590 ; AVX512F-NEXT: vmovdqa64 %zmm21, %zmm29
5591 ; AVX512F-NEXT: vmovdqa64 %zmm18, %zmm26
5592 ; AVX512F-NEXT: vmovdqa64 %zmm21, %zmm23
5593 ; AVX512F-NEXT: vpermt2d %zmm11, %zmm2, %zmm21
5594 ; AVX512F-NEXT: vmovdqa64 %zmm18, %zmm5
5595 ; AVX512F-NEXT: vpermt2d %zmm10, %zmm0, %zmm18
5596 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm2 = <u,u,u,u,4,5,16,u,u,u,u,11,12,17,u,u>
5597 ; AVX512F-NEXT: vpermt2d %zmm9, %zmm2, %zmm31
5598 ; AVX512F-NEXT: movw $14448, %cx # imm = 0x3870
5599 ; AVX512F-NEXT: vmovdqa32 %zmm21, %zmm18 {%k2}
5600 ; AVX512F-NEXT: kmovw %ecx, %k2
5601 ; AVX512F-NEXT: vmovdqa32 %zmm31, %zmm6 {%k2}
5602 ; AVX512F-NEXT: vmovdqa64 %zmm28, %zmm31
5603 ; AVX512F-NEXT: vmovdqa64 %zmm28, %zmm19
5604 ; AVX512F-NEXT: vmovdqa64 %zmm28, %zmm21
5605 ; AVX512F-NEXT: vmovdqa64 %zmm28, %zmm0
5606 ; AVX512F-NEXT: vpermt2d %zmm20, %zmm27, %zmm28
5607 ; AVX512F-NEXT: vmovdqa64 64(%rax), %zmm27
5608 ; AVX512F-NEXT: vpermt2d %zmm27, %zmm2, %zmm28
5609 ; AVX512F-NEXT: vmovdqa32 %zmm28, %zmm18 {%k2}
5610 ; AVX512F-NEXT: vpermi2d %zmm12, %zmm1, %zmm30
5611 ; AVX512F-NEXT: vpermi2d %zmm14, %zmm15, %zmm7
5612 ; AVX512F-NEXT: vmovdqa32 %zmm30, %zmm7 {%k1}
5613 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm2 = <13,u,u,u,u,u,30,14,u,u,u,u,u,31,15,u>
5614 ; AVX512F-NEXT: vpermi2d %zmm3, %zmm8, %zmm2
5615 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm28 = <0,29,u,u,u,u,6,7,30,u,u,u,u,13,14,31>
5616 ; AVX512F-NEXT: vpermi2d %zmm9, %zmm2, %zmm28
5617 ; AVX512F-NEXT: movw $-7741, %ax # imm = 0xE1C3
5618 ; AVX512F-NEXT: kmovw %eax, %k2
5619 ; AVX512F-NEXT: vmovdqa32 %zmm28, %zmm7 {%k2}
5620 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm2 = <u,9,25,u,u,u,u,u,10,26,u,u,u,u,u,11>
5621 ; AVX512F-NEXT: vpermt2d %zmm11, %zmm2, %zmm25
5622 ; AVX512F-NEXT: movw $-31994, %ax # imm = 0x8306
5623 ; AVX512F-NEXT: kmovw %eax, %k2
5624 ; AVX512F-NEXT: vmovdqa32 %zmm25, %zmm16 {%k2}
5625 ; AVX512F-NEXT: vpermi2d %zmm14, %zmm15, %zmm22
5626 ; AVX512F-NEXT: vpermi2d %zmm1, %zmm12, %zmm13
5627 ; AVX512F-NEXT: vmovdqa32 %zmm22, %zmm13 {%k1}
5628 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm22 = <u,11,27,u,u,u,u,u,12,28,u,u,u,u,u,13>
5629 ; AVX512F-NEXT: vpermi2d %zmm8, %zmm3, %zmm22
5630 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm25 = <u,1,2,27,u,u,u,u,8,9,28,u,u,u,u,15>
5631 ; AVX512F-NEXT: vpermi2d %zmm9, %zmm22, %zmm25
5632 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm22 = <u,u,u,9,25,u,u,u,u,u,10,26,u,u,u,u>
5633 ; AVX512F-NEXT: vpermt2d %zmm20, %zmm22, %zmm31
5634 ; AVX512F-NEXT: movw $-30962, %ax # imm = 0x870E
5635 ; AVX512F-NEXT: kmovw %eax, %k1
5636 ; AVX512F-NEXT: vmovdqa32 %zmm25, %zmm13 {%k1}
5637 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm25 = <u,u,u,3,4,25,u,u,u,u,10,11,26,u,u,u>
5638 ; AVX512F-NEXT: vpermt2d %zmm27, %zmm25, %zmm31
5639 ; AVX512F-NEXT: movw $7224, %ax # imm = 0x1C38
5640 ; AVX512F-NEXT: kmovw %eax, %k1
5641 ; AVX512F-NEXT: vmovdqa32 %zmm31, %zmm16 {%k1}
5642 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm28 = <u,u,u,7,23,u,u,u,u,u,8,24,u,u,u,u>
5643 ; AVX512F-NEXT: vpermt2d %zmm11, %zmm28, %zmm24
5644 ; AVX512F-NEXT: vpermi2d %zmm12, %zmm1, %zmm2
5645 ; AVX512F-NEXT: vpermi2d %zmm15, %zmm14, %zmm17
5646 ; AVX512F-NEXT: vmovdqa32 %zmm2, %zmm17 {%k2}
5647 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm2 = <u,7,23,u,u,u,u,u,8,24,u,u,u,u,u,9>
5648 ; AVX512F-NEXT: vpermt2d %zmm10, %zmm2, %zmm4
5649 ; AVX512F-NEXT: movw $3096, %ax # imm = 0xC18
5650 ; AVX512F-NEXT: kmovw %eax, %k2
5651 ; AVX512F-NEXT: vmovdqa32 %zmm24, %zmm4 {%k2}
5652 ; AVX512F-NEXT: vpermi2d %zmm8, %zmm3, %zmm22
5653 ; AVX512F-NEXT: vpermt2d %zmm9, %zmm25, %zmm22
5654 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm24 = <u,u,u,u,u,7,23,u,u,u,u,u,8,24,u,u>
5655 ; AVX512F-NEXT: vpermt2d %zmm20, %zmm24, %zmm19
5656 ; AVX512F-NEXT: vmovdqa32 %zmm22, %zmm17 {%k1}
5657 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm22 = <22,u,u,u,u,5,6,23,u,u,u,u,12,13,24,u>
5658 ; AVX512F-NEXT: vpermt2d %zmm27, %zmm22, %zmm19
5659 ; AVX512F-NEXT: movw $28897, %ax # imm = 0x70E1
5660 ; AVX512F-NEXT: kmovw %eax, %k3
5661 ; AVX512F-NEXT: vmovdqa32 %zmm19, %zmm4 {%k3}
5662 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm19 = <u,u,u,u,u,5,21,u,u,u,u,u,6,22,u,u>
5663 ; AVX512F-NEXT: vpermt2d %zmm11, %zmm19, %zmm29
5664 ; AVX512F-NEXT: vpermi2d %zmm12, %zmm1, %zmm28
5665 ; AVX512F-NEXT: vpermi2d %zmm14, %zmm15, %zmm2
5666 ; AVX512F-NEXT: vmovdqa32 %zmm28, %zmm2 {%k2}
5667 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm25 = <u,u,u,5,21,u,u,u,u,u,6,22,u,u,u,u>
5668 ; AVX512F-NEXT: vpermt2d %zmm10, %zmm25, %zmm26
5669 ; AVX512F-NEXT: movw $12384, %ax # imm = 0x3060
5670 ; AVX512F-NEXT: kmovw %eax, %k1
5671 ; AVX512F-NEXT: vmovdqa32 %zmm29, %zmm26 {%k1}
5672 ; AVX512F-NEXT: vpermi2d %zmm8, %zmm3, %zmm24
5673 ; AVX512F-NEXT: vpermt2d %zmm9, %zmm22, %zmm24
5674 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm22 = <4,20,u,u,u,u,u,5,21,u,u,u,u,u,6,22>
5675 ; AVX512F-NEXT: vpermt2d %zmm20, %zmm22, %zmm21
5676 ; AVX512F-NEXT: vmovdqa32 %zmm24, %zmm2 {%k3}
5677 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm24 = <0,1,20,u,u,u,u,7,8,21,u,u,u,u,14,15>
5678 ; AVX512F-NEXT: vpermt2d %zmm27, %zmm24, %zmm21
5679 ; AVX512F-NEXT: movw $15480, %ax # imm = 0x3C78
5680 ; AVX512F-NEXT: kmovw %eax, %k2
5681 ; AVX512F-NEXT: vmovdqa32 %zmm26, %zmm21 {%k2}
5682 ; AVX512F-NEXT: vpermi2d %zmm12, %zmm1, %zmm19
5683 ; AVX512F-NEXT: vpermi2d %zmm14, %zmm15, %zmm25
5684 ; AVX512F-NEXT: vmovdqa32 %zmm19, %zmm25 {%k1}
5685 ; AVX512F-NEXT: vpermi2d %zmm8, %zmm3, %zmm22
5686 ; AVX512F-NEXT: vpermt2d %zmm9, %zmm24, %zmm22
5687 ; AVX512F-NEXT: vmovdqa32 %zmm25, %zmm22 {%k2}
5688 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm19 = <u,u,u,u,u,3,19,u,u,u,u,u,4,20,u,u>
5689 ; AVX512F-NEXT: vpermt2d %zmm10, %zmm19, %zmm5
5690 ; AVX512F-NEXT: vpermt2d %zmm14, %zmm19, %zmm15
5691 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm10 = <2,18,u,u,u,u,u,3,19,u,u,u,u,u,4,20>
5692 ; AVX512F-NEXT: vpermt2d %zmm11, %zmm10, %zmm23
5693 ; AVX512F-NEXT: vpermt2d %zmm12, %zmm10, %zmm1
5694 ; AVX512F-NEXT: vmovdqa32 %zmm5, %zmm23 {%k1}
5695 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm5 = <u,u,2,18,u,u,u,u,u,3,19,u,u,u,u,u>
5696 ; AVX512F-NEXT: vpermt2d %zmm8, %zmm5, %zmm3
5697 ; AVX512F-NEXT: vpermt2d %zmm20, %zmm5, %zmm0
5698 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm5 = <u,u,2,3,18,u,u,u,u,9,10,19,u,u,u,u>
5699 ; AVX512F-NEXT: vpermt2d %zmm9, %zmm5, %zmm3
5700 ; AVX512F-NEXT: vpermt2d %zmm27, %zmm5, %zmm0
5701 ; AVX512F-NEXT: vmovdqa32 %zmm15, %zmm1 {%k1}
5702 ; AVX512F-NEXT: movw $3612, %ax # imm = 0xE1C
5703 ; AVX512F-NEXT: kmovw %eax, %k1
5704 ; AVX512F-NEXT: vmovdqa32 %zmm0, %zmm23 {%k1}
5705 ; AVX512F-NEXT: vmovdqa32 %zmm3, %zmm1 {%k1}
5706 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm0 = <29,u,2,3,4,5,6,30,u,9,10,11,12,13,31,u>
5707 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
5708 ; AVX512F-NEXT: vpermi2d %zmm20, %zmm3, %zmm0
5709 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm3 = <0,1,27,u,4,5,6,7,8,28,u,11,12,13,14,15>
5710 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
5711 ; AVX512F-NEXT: vpermi2d %zmm20, %zmm5, %zmm3
5712 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm5 = [0,29,2,3,4,5,6,7,30,9,10,11,12,13,14,31]
5713 ; AVX512F-NEXT: vpermi2d %zmm27, %zmm0, %zmm5
5714 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm0 = [0,1,2,27,4,5,6,7,8,9,28,11,12,13,14,15]
5715 ; AVX512F-NEXT: vpermi2d %zmm27, %zmm3, %zmm0
5716 ; AVX512F-NEXT: movq {{[0-9]+}}(%rsp), %rax
5717 ; AVX512F-NEXT: vmovdqa64 %zmm1, 64(%rax)
5718 ; AVX512F-NEXT: vmovdqa64 %zmm22, 128(%rax)
5719 ; AVX512F-NEXT: vmovdqa64 %zmm2, 192(%rax)
5720 ; AVX512F-NEXT: vmovdqa64 %zmm17, 256(%rax)
5721 ; AVX512F-NEXT: vmovdqa64 %zmm13, 320(%rax)
5722 ; AVX512F-NEXT: vmovdqa64 %zmm7, 384(%rax)
5723 ; AVX512F-NEXT: vmovdqa64 %zmm18, 448(%rax)
5724 ; AVX512F-NEXT: vmovdqa64 %zmm23, 512(%rax)
5725 ; AVX512F-NEXT: vmovdqa64 %zmm21, 576(%rax)
5726 ; AVX512F-NEXT: vmovdqa64 %zmm4, 640(%rax)
5727 ; AVX512F-NEXT: vmovdqa64 %zmm16, 704(%rax)
5728 ; AVX512F-NEXT: vmovdqa64 %zmm6, (%rax)
5729 ; AVX512F-NEXT: vmovdqa64 %zmm0, 768(%rax)
5730 ; AVX512F-NEXT: vmovdqa64 %zmm5, 832(%rax)
5731 ; AVX512F-NEXT: popq %rax
5732 ; AVX512F-NEXT: vzeroupper
5733 ; AVX512F-NEXT: retq
5735 ; AVX512BW-LABEL: store_i32_stride7_vf32:
5736 ; AVX512BW: # %bb.0:
5737 ; AVX512BW-NEXT: pushq %rax
5738 ; AVX512BW-NEXT: movq {{[0-9]+}}(%rsp), %rax
5739 ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm15
5740 ; AVX512BW-NEXT: vmovdqa64 64(%rdi), %zmm18
5741 ; AVX512BW-NEXT: vmovdqa64 (%rsi), %zmm14
5742 ; AVX512BW-NEXT: vmovdqa64 64(%rsi), %zmm10
5743 ; AVX512BW-NEXT: vmovdqa64 (%rdx), %zmm1
5744 ; AVX512BW-NEXT: vmovdqa64 64(%rdx), %zmm21
5745 ; AVX512BW-NEXT: vmovdqa64 (%rcx), %zmm12
5746 ; AVX512BW-NEXT: vmovdqa64 64(%rcx), %zmm11
5747 ; AVX512BW-NEXT: vmovdqa64 (%r8), %zmm3
5748 ; AVX512BW-NEXT: vmovdqa64 64(%r8), %zmm28
5749 ; AVX512BW-NEXT: vmovdqa64 (%r9), %zmm8
5750 ; AVX512BW-NEXT: vmovdqa64 64(%r9), %zmm20
5751 ; AVX512BW-NEXT: vmovdqa64 (%rax), %zmm9
5752 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm30 = <u,u,u,u,14,30,u,u,u,u,u,15,31,u,u,u>
5753 ; AVX512BW-NEXT: vmovdqa64 %zmm21, %zmm2
5754 ; AVX512BW-NEXT: vpermt2d %zmm11, %zmm30, %zmm2
5755 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm7 = <u,u,14,30,u,u,u,u,u,15,31,u,u,u,u,u>
5756 ; AVX512BW-NEXT: vmovdqa64 %zmm18, %zmm4
5757 ; AVX512BW-NEXT: vpermt2d %zmm10, %zmm7, %zmm4
5758 ; AVX512BW-NEXT: movw $6192, %cx # imm = 0x1830
5759 ; AVX512BW-NEXT: kmovd %ecx, %k1
5760 ; AVX512BW-NEXT: vmovdqa32 %zmm2, %zmm4 {%k1}
5761 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm0 = <u,u,2,3,4,5,30,u,u,9,10,11,12,31,u,u>
5762 ; AVX512BW-NEXT: vpermi2d %zmm28, %zmm4, %zmm0
5763 ; AVX512BW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
5764 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm22 = <u,u,u,u,12,28,u,u,u,u,u,13,29,u,u,u>
5765 ; AVX512BW-NEXT: vmovdqa64 %zmm18, %zmm2
5766 ; AVX512BW-NEXT: vpermt2d %zmm10, %zmm22, %zmm2
5767 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm13 = <11,u,u,u,u,u,28,12,u,u,u,u,u,29,13,u>
5768 ; AVX512BW-NEXT: vmovdqa64 %zmm11, %zmm4
5769 ; AVX512BW-NEXT: vpermt2d %zmm21, %zmm13, %zmm4
5770 ; AVX512BW-NEXT: vmovdqa32 %zmm2, %zmm4 {%k1}
5771 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm0 = <0,27,u,u,4,5,6,7,28,u,u,11,12,13,14,29>
5772 ; AVX512BW-NEXT: vpermi2d %zmm28, %zmm4, %zmm0
5773 ; AVX512BW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
5774 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm2 = <u,u,0,16,u,u,u,u,u,1,17,u,u,u,u,u>
5775 ; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm16
5776 ; AVX512BW-NEXT: vpermt2d %zmm12, %zmm2, %zmm16
5777 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm0 = <0,16,u,u,u,u,u,1,17,u,u,u,u,u,2,18>
5778 ; AVX512BW-NEXT: vmovdqa64 %zmm15, %zmm6
5779 ; AVX512BW-NEXT: vpermt2d %zmm14, %zmm0, %zmm6
5780 ; AVX512BW-NEXT: movw $1548, %cx # imm = 0x60C
5781 ; AVX512BW-NEXT: kmovd %ecx, %k2
5782 ; AVX512BW-NEXT: vmovdqa32 %zmm16, %zmm6 {%k2}
5783 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm27 = <u,u,u,u,0,16,u,u,u,u,u,1,17,u,u,u>
5784 ; AVX512BW-NEXT: vmovdqa64 %zmm3, %zmm31
5785 ; AVX512BW-NEXT: vpermt2d %zmm8, %zmm27, %zmm31
5786 ; AVX512BW-NEXT: vmovdqa64 %zmm21, %zmm25
5787 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm17 = <9,u,u,u,u,u,26,10,u,u,u,u,u,27,11,u>
5788 ; AVX512BW-NEXT: vmovdqa64 %zmm10, %zmm16
5789 ; AVX512BW-NEXT: vpermt2d %zmm18, %zmm17, %zmm16
5790 ; AVX512BW-NEXT: vmovdqa64 %zmm21, %zmm24
5791 ; AVX512BW-NEXT: vmovdqa64 %zmm18, %zmm4
5792 ; AVX512BW-NEXT: vmovdqa64 %zmm21, %zmm29
5793 ; AVX512BW-NEXT: vmovdqa64 %zmm18, %zmm26
5794 ; AVX512BW-NEXT: vmovdqa64 %zmm21, %zmm23
5795 ; AVX512BW-NEXT: vpermt2d %zmm11, %zmm2, %zmm21
5796 ; AVX512BW-NEXT: vmovdqa64 %zmm18, %zmm5
5797 ; AVX512BW-NEXT: vpermt2d %zmm10, %zmm0, %zmm18
5798 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm2 = <u,u,u,u,4,5,16,u,u,u,u,11,12,17,u,u>
5799 ; AVX512BW-NEXT: vpermt2d %zmm9, %zmm2, %zmm31
5800 ; AVX512BW-NEXT: movw $14448, %cx # imm = 0x3870
5801 ; AVX512BW-NEXT: vmovdqa32 %zmm21, %zmm18 {%k2}
5802 ; AVX512BW-NEXT: kmovd %ecx, %k2
5803 ; AVX512BW-NEXT: vmovdqa32 %zmm31, %zmm6 {%k2}
5804 ; AVX512BW-NEXT: vmovdqa64 %zmm28, %zmm31
5805 ; AVX512BW-NEXT: vmovdqa64 %zmm28, %zmm19
5806 ; AVX512BW-NEXT: vmovdqa64 %zmm28, %zmm21
5807 ; AVX512BW-NEXT: vmovdqa64 %zmm28, %zmm0
5808 ; AVX512BW-NEXT: vpermt2d %zmm20, %zmm27, %zmm28
5809 ; AVX512BW-NEXT: vmovdqa64 64(%rax), %zmm27
5810 ; AVX512BW-NEXT: vpermt2d %zmm27, %zmm2, %zmm28
5811 ; AVX512BW-NEXT: vmovdqa32 %zmm28, %zmm18 {%k2}
5812 ; AVX512BW-NEXT: vpermi2d %zmm12, %zmm1, %zmm30
5813 ; AVX512BW-NEXT: vpermi2d %zmm14, %zmm15, %zmm7
5814 ; AVX512BW-NEXT: vmovdqa32 %zmm30, %zmm7 {%k1}
5815 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm2 = <13,u,u,u,u,u,30,14,u,u,u,u,u,31,15,u>
5816 ; AVX512BW-NEXT: vpermi2d %zmm3, %zmm8, %zmm2
5817 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm28 = <0,29,u,u,u,u,6,7,30,u,u,u,u,13,14,31>
5818 ; AVX512BW-NEXT: vpermi2d %zmm9, %zmm2, %zmm28
5819 ; AVX512BW-NEXT: movw $-7741, %ax # imm = 0xE1C3
5820 ; AVX512BW-NEXT: kmovd %eax, %k2
5821 ; AVX512BW-NEXT: vmovdqa32 %zmm28, %zmm7 {%k2}
5822 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm2 = <u,9,25,u,u,u,u,u,10,26,u,u,u,u,u,11>
5823 ; AVX512BW-NEXT: vpermt2d %zmm11, %zmm2, %zmm25
5824 ; AVX512BW-NEXT: movw $-31994, %ax # imm = 0x8306
5825 ; AVX512BW-NEXT: kmovd %eax, %k2
5826 ; AVX512BW-NEXT: vmovdqa32 %zmm25, %zmm16 {%k2}
5827 ; AVX512BW-NEXT: vpermi2d %zmm14, %zmm15, %zmm22
5828 ; AVX512BW-NEXT: vpermi2d %zmm1, %zmm12, %zmm13
5829 ; AVX512BW-NEXT: vmovdqa32 %zmm22, %zmm13 {%k1}
5830 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm22 = <u,11,27,u,u,u,u,u,12,28,u,u,u,u,u,13>
5831 ; AVX512BW-NEXT: vpermi2d %zmm8, %zmm3, %zmm22
5832 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm25 = <u,1,2,27,u,u,u,u,8,9,28,u,u,u,u,15>
5833 ; AVX512BW-NEXT: vpermi2d %zmm9, %zmm22, %zmm25
5834 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm22 = <u,u,u,9,25,u,u,u,u,u,10,26,u,u,u,u>
5835 ; AVX512BW-NEXT: vpermt2d %zmm20, %zmm22, %zmm31
5836 ; AVX512BW-NEXT: movw $-30962, %ax # imm = 0x870E
5837 ; AVX512BW-NEXT: kmovd %eax, %k1
5838 ; AVX512BW-NEXT: vmovdqa32 %zmm25, %zmm13 {%k1}
5839 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm25 = <u,u,u,3,4,25,u,u,u,u,10,11,26,u,u,u>
5840 ; AVX512BW-NEXT: vpermt2d %zmm27, %zmm25, %zmm31
5841 ; AVX512BW-NEXT: movw $7224, %ax # imm = 0x1C38
5842 ; AVX512BW-NEXT: kmovd %eax, %k1
5843 ; AVX512BW-NEXT: vmovdqa32 %zmm31, %zmm16 {%k1}
5844 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm28 = <u,u,u,7,23,u,u,u,u,u,8,24,u,u,u,u>
5845 ; AVX512BW-NEXT: vpermt2d %zmm11, %zmm28, %zmm24
5846 ; AVX512BW-NEXT: vpermi2d %zmm12, %zmm1, %zmm2
5847 ; AVX512BW-NEXT: vpermi2d %zmm15, %zmm14, %zmm17
5848 ; AVX512BW-NEXT: vmovdqa32 %zmm2, %zmm17 {%k2}
5849 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm2 = <u,7,23,u,u,u,u,u,8,24,u,u,u,u,u,9>
5850 ; AVX512BW-NEXT: vpermt2d %zmm10, %zmm2, %zmm4
5851 ; AVX512BW-NEXT: movw $3096, %ax # imm = 0xC18
5852 ; AVX512BW-NEXT: kmovd %eax, %k2
5853 ; AVX512BW-NEXT: vmovdqa32 %zmm24, %zmm4 {%k2}
5854 ; AVX512BW-NEXT: vpermi2d %zmm8, %zmm3, %zmm22
5855 ; AVX512BW-NEXT: vpermt2d %zmm9, %zmm25, %zmm22
5856 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm24 = <u,u,u,u,u,7,23,u,u,u,u,u,8,24,u,u>
5857 ; AVX512BW-NEXT: vpermt2d %zmm20, %zmm24, %zmm19
5858 ; AVX512BW-NEXT: vmovdqa32 %zmm22, %zmm17 {%k1}
5859 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm22 = <22,u,u,u,u,5,6,23,u,u,u,u,12,13,24,u>
5860 ; AVX512BW-NEXT: vpermt2d %zmm27, %zmm22, %zmm19
5861 ; AVX512BW-NEXT: movw $28897, %ax # imm = 0x70E1
5862 ; AVX512BW-NEXT: kmovd %eax, %k3
5863 ; AVX512BW-NEXT: vmovdqa32 %zmm19, %zmm4 {%k3}
5864 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm19 = <u,u,u,u,u,5,21,u,u,u,u,u,6,22,u,u>
5865 ; AVX512BW-NEXT: vpermt2d %zmm11, %zmm19, %zmm29
5866 ; AVX512BW-NEXT: vpermi2d %zmm12, %zmm1, %zmm28
5867 ; AVX512BW-NEXT: vpermi2d %zmm14, %zmm15, %zmm2
5868 ; AVX512BW-NEXT: vmovdqa32 %zmm28, %zmm2 {%k2}
5869 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm25 = <u,u,u,5,21,u,u,u,u,u,6,22,u,u,u,u>
5870 ; AVX512BW-NEXT: vpermt2d %zmm10, %zmm25, %zmm26
5871 ; AVX512BW-NEXT: movw $12384, %ax # imm = 0x3060
5872 ; AVX512BW-NEXT: kmovd %eax, %k1
5873 ; AVX512BW-NEXT: vmovdqa32 %zmm29, %zmm26 {%k1}
5874 ; AVX512BW-NEXT: vpermi2d %zmm8, %zmm3, %zmm24
5875 ; AVX512BW-NEXT: vpermt2d %zmm9, %zmm22, %zmm24
5876 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm22 = <4,20,u,u,u,u,u,5,21,u,u,u,u,u,6,22>
5877 ; AVX512BW-NEXT: vpermt2d %zmm20, %zmm22, %zmm21
5878 ; AVX512BW-NEXT: vmovdqa32 %zmm24, %zmm2 {%k3}
5879 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm24 = <0,1,20,u,u,u,u,7,8,21,u,u,u,u,14,15>
5880 ; AVX512BW-NEXT: vpermt2d %zmm27, %zmm24, %zmm21
5881 ; AVX512BW-NEXT: movw $15480, %ax # imm = 0x3C78
5882 ; AVX512BW-NEXT: kmovd %eax, %k2
5883 ; AVX512BW-NEXT: vmovdqa32 %zmm26, %zmm21 {%k2}
5884 ; AVX512BW-NEXT: vpermi2d %zmm12, %zmm1, %zmm19
5885 ; AVX512BW-NEXT: vpermi2d %zmm14, %zmm15, %zmm25
5886 ; AVX512BW-NEXT: vmovdqa32 %zmm19, %zmm25 {%k1}
5887 ; AVX512BW-NEXT: vpermi2d %zmm8, %zmm3, %zmm22
5888 ; AVX512BW-NEXT: vpermt2d %zmm9, %zmm24, %zmm22
5889 ; AVX512BW-NEXT: vmovdqa32 %zmm25, %zmm22 {%k2}
5890 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm19 = <u,u,u,u,u,3,19,u,u,u,u,u,4,20,u,u>
5891 ; AVX512BW-NEXT: vpermt2d %zmm10, %zmm19, %zmm5
5892 ; AVX512BW-NEXT: vpermt2d %zmm14, %zmm19, %zmm15
5893 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm10 = <2,18,u,u,u,u,u,3,19,u,u,u,u,u,4,20>
5894 ; AVX512BW-NEXT: vpermt2d %zmm11, %zmm10, %zmm23
5895 ; AVX512BW-NEXT: vpermt2d %zmm12, %zmm10, %zmm1
5896 ; AVX512BW-NEXT: vmovdqa32 %zmm5, %zmm23 {%k1}
5897 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm5 = <u,u,2,18,u,u,u,u,u,3,19,u,u,u,u,u>
5898 ; AVX512BW-NEXT: vpermt2d %zmm8, %zmm5, %zmm3
5899 ; AVX512BW-NEXT: vpermt2d %zmm20, %zmm5, %zmm0
5900 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm5 = <u,u,2,3,18,u,u,u,u,9,10,19,u,u,u,u>
5901 ; AVX512BW-NEXT: vpermt2d %zmm9, %zmm5, %zmm3
5902 ; AVX512BW-NEXT: vpermt2d %zmm27, %zmm5, %zmm0
5903 ; AVX512BW-NEXT: vmovdqa32 %zmm15, %zmm1 {%k1}
5904 ; AVX512BW-NEXT: movw $3612, %ax # imm = 0xE1C
5905 ; AVX512BW-NEXT: kmovd %eax, %k1
5906 ; AVX512BW-NEXT: vmovdqa32 %zmm0, %zmm23 {%k1}
5907 ; AVX512BW-NEXT: vmovdqa32 %zmm3, %zmm1 {%k1}
5908 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm0 = <29,u,2,3,4,5,6,30,u,9,10,11,12,13,31,u>
5909 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
5910 ; AVX512BW-NEXT: vpermi2d %zmm20, %zmm3, %zmm0
5911 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm3 = <0,1,27,u,4,5,6,7,8,28,u,11,12,13,14,15>
5912 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
5913 ; AVX512BW-NEXT: vpermi2d %zmm20, %zmm5, %zmm3
5914 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm5 = [0,29,2,3,4,5,6,7,30,9,10,11,12,13,14,31]
5915 ; AVX512BW-NEXT: vpermi2d %zmm27, %zmm0, %zmm5
5916 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm0 = [0,1,2,27,4,5,6,7,8,9,28,11,12,13,14,15]
5917 ; AVX512BW-NEXT: vpermi2d %zmm27, %zmm3, %zmm0
5918 ; AVX512BW-NEXT: movq {{[0-9]+}}(%rsp), %rax
5919 ; AVX512BW-NEXT: vmovdqa64 %zmm1, 64(%rax)
5920 ; AVX512BW-NEXT: vmovdqa64 %zmm22, 128(%rax)
5921 ; AVX512BW-NEXT: vmovdqa64 %zmm2, 192(%rax)
5922 ; AVX512BW-NEXT: vmovdqa64 %zmm17, 256(%rax)
5923 ; AVX512BW-NEXT: vmovdqa64 %zmm13, 320(%rax)
5924 ; AVX512BW-NEXT: vmovdqa64 %zmm7, 384(%rax)
5925 ; AVX512BW-NEXT: vmovdqa64 %zmm18, 448(%rax)
5926 ; AVX512BW-NEXT: vmovdqa64 %zmm23, 512(%rax)
5927 ; AVX512BW-NEXT: vmovdqa64 %zmm21, 576(%rax)
5928 ; AVX512BW-NEXT: vmovdqa64 %zmm4, 640(%rax)
5929 ; AVX512BW-NEXT: vmovdqa64 %zmm16, 704(%rax)
5930 ; AVX512BW-NEXT: vmovdqa64 %zmm6, (%rax)
5931 ; AVX512BW-NEXT: vmovdqa64 %zmm0, 768(%rax)
5932 ; AVX512BW-NEXT: vmovdqa64 %zmm5, 832(%rax)
5933 ; AVX512BW-NEXT: popq %rax
5934 ; AVX512BW-NEXT: vzeroupper
5935 ; AVX512BW-NEXT: retq
5936 %in.vec0 = load <32 x i32>, ptr %in.vecptr0, align 64
5937 %in.vec1 = load <32 x i32>, ptr %in.vecptr1, align 64
5938 %in.vec2 = load <32 x i32>, ptr %in.vecptr2, align 64
5939 %in.vec3 = load <32 x i32>, ptr %in.vecptr3, align 64
5940 %in.vec4 = load <32 x i32>, ptr %in.vecptr4, align 64
5941 %in.vec5 = load <32 x i32>, ptr %in.vecptr5, align 64
5942 %in.vec6 = load <32 x i32>, ptr %in.vecptr6, align 64
5943 %1 = shufflevector <32 x i32> %in.vec0, <32 x i32> %in.vec1, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
5944 %2 = shufflevector <32 x i32> %in.vec2, <32 x i32> %in.vec3, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
5945 %3 = shufflevector <32 x i32> %in.vec4, <32 x i32> %in.vec5, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
5946 %4 = shufflevector <64 x i32> %1, <64 x i32> %2, <128 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95, i32 96, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 112, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127>
5947 %5 = shufflevector <32 x i32> %in.vec6, <32 x i32> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
5948 %6 = shufflevector <64 x i32> %3, <64 x i32> %5, <96 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95>
5949 %7 = shufflevector <96 x i32> %6, <96 x i32> poison, <128 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
5950 %8 = shufflevector <128 x i32> %4, <128 x i32> %7, <224 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95, i32 96, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 112, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127, i32 128, i32 129, i32 130, i32 131, i32 132, i32 133, i32 134, i32 135, i32 136, i32 137, i32 138, i32 139, i32 140, i32 141, i32 142, i32 143, i32 144, i32 145, i32 146, i32 147, i32 148, i32 149, i32 150, i32 151, i32 152, i32 153, i32 154, i32 155, i32 156, i32 157, i32 158, i32 159, i32 160, i32 161, i32 162, i32 163, i32 164, i32 165, i32 166, i32 167, i32 168, i32 169, i32 170, i32 171, i32 172, i32 173, i32 174, i32 175, i32 176, i32 177, i32 178, i32 179, i32 180, i32 181, i32 182, i32 183, i32 184, i32 185, i32 186, i32 187, i32 188, i32 189, i32 190, i32 191, i32 192, i32 193, i32 194, i32 195, i32 196, i32 197, i32 198, i32 199, i32 200, i32 201, i32 202, i32 203, i32 204, i32 205, i32 206, i32 207, i32 208, i32 209, i32 210, i32 211, i32 212, i32 213, i32 214, i32 215, i32 216, i32 217, i32 218, i32 219, i32 220, i32 221, i32 222, i32 223>
5951 %interleaved.vec = shufflevector <224 x i32> %8, <224 x i32> poison, <224 x i32> <i32 0, i32 32, i32 64, i32 96, i32 128, i32 160, i32 192, i32 1, i32 33, i32 65, i32 97, i32 129, i32 161, i32 193, i32 2, i32 34, i32 66, i32 98, i32 130, i32 162, i32 194, i32 3, i32 35, i32 67, i32 99, i32 131, i32 163, i32 195, i32 4, i32 36, i32 68, i32 100, i32 132, i32 164, i32 196, i32 5, i32 37, i32 69, i32 101, i32 133, i32 165, i32 197, i32 6, i32 38, i32 70, i32 102, i32 134, i32 166, i32 198, i32 7, i32 39, i32 71, i32 103, i32 135, i32 167, i32 199, i32 8, i32 40, i32 72, i32 104, i32 136, i32 168, i32 200, i32 9, i32 41, i32 73, i32 105, i32 137, i32 169, i32 201, i32 10, i32 42, i32 74, i32 106, i32 138, i32 170, i32 202, i32 11, i32 43, i32 75, i32 107, i32 139, i32 171, i32 203, i32 12, i32 44, i32 76, i32 108, i32 140, i32 172, i32 204, i32 13, i32 45, i32 77, i32 109, i32 141, i32 173, i32 205, i32 14, i32 46, i32 78, i32 110, i32 142, i32 174, i32 206, i32 15, i32 47, i32 79, i32 111, i32 143, i32 175, i32 207, i32 16, i32 48, i32 80, i32 112, i32 144, i32 176, i32 208, i32 17, i32 49, i32 81, i32 113, i32 145, i32 177, i32 209, i32 18, i32 50, i32 82, i32 114, i32 146, i32 178, i32 210, i32 19, i32 51, i32 83, i32 115, i32 147, i32 179, i32 211, i32 20, i32 52, i32 84, i32 116, i32 148, i32 180, i32 212, i32 21, i32 53, i32 85, i32 117, i32 149, i32 181, i32 213, i32 22, i32 54, i32 86, i32 118, i32 150, i32 182, i32 214, i32 23, i32 55, i32 87, i32 119, i32 151, i32 183, i32 215, i32 24, i32 56, i32 88, i32 120, i32 152, i32 184, i32 216, i32 25, i32 57, i32 89, i32 121, i32 153, i32 185, i32 217, i32 26, i32 58, i32 90, i32 122, i32 154, i32 186, i32 218, i32 27, i32 59, i32 91, i32 123, i32 155, i32 187, i32 219, i32 28, i32 60, i32 92, i32 124, i32 156, i32 188, i32 220, i32 29, i32 61, i32 93, i32 125, i32 157, i32 189, i32 221, i32 30, i32 62, i32 94, i32 126, i32 158, i32 190, i32 222, i32 31, i32 63, i32 95, i32 127, i32 159, i32 191, i32 223>
5952 store <224 x i32> %interleaved.vec, ptr %out.vec, align 64
5956 define void @store_i32_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecptr2, ptr %in.vecptr3, ptr %in.vecptr4, ptr %in.vecptr5, ptr %in.vecptr6, ptr %out.vec) nounwind {
5957 ; SSE-LABEL: store_i32_stride7_vf64:
5959 ; SSE-NEXT: subq $2760, %rsp # imm = 0xAC8
5960 ; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax
5961 ; SSE-NEXT: movdqa (%rdi), %xmm6
5962 ; SSE-NEXT: movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5963 ; SSE-NEXT: movdqa (%rsi), %xmm5
5964 ; SSE-NEXT: movdqa 16(%rsi), %xmm4
5965 ; SSE-NEXT: movaps (%rdx), %xmm3
5966 ; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5967 ; SSE-NEXT: movdqa 16(%rdx), %xmm7
5968 ; SSE-NEXT: movdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5969 ; SSE-NEXT: movaps (%rcx), %xmm9
5970 ; SSE-NEXT: movaps 16(%rcx), %xmm10
5971 ; SSE-NEXT: movaps %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5972 ; SSE-NEXT: movaps (%r8), %xmm0
5973 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5974 ; SSE-NEXT: movaps 16(%r8), %xmm8
5975 ; SSE-NEXT: movaps %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5976 ; SSE-NEXT: movdqa (%r9), %xmm11
5977 ; SSE-NEXT: movdqa %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5978 ; SSE-NEXT: movdqa 16(%r9), %xmm12
5979 ; SSE-NEXT: movdqa %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5980 ; SSE-NEXT: movdqa (%rax), %xmm15
5981 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm9[1,1]
5982 ; SSE-NEXT: movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5983 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm5[1,1,1,1]
5984 ; SSE-NEXT: movss {{.*#+}} xmm3 = xmm1[0],xmm3[1,2,3]
5985 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,1],xmm0[2,0]
5986 ; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5987 ; SSE-NEXT: movdqa %xmm6, %xmm0
5988 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm5[2],xmm0[3],xmm5[3]
5989 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm15[1,1,1,1]
5990 ; SSE-NEXT: movdqa %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5991 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm11[1,1,1,1]
5992 ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
5993 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm0[0]
5994 ; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5995 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm7[1,1,1,1]
5996 ; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5997 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm4[1,1,1,1]
5998 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
5999 ; SSE-NEXT: movaps %xmm8, %xmm0
6000 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm10[1,1]
6001 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,0]
6002 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6003 ; SSE-NEXT: movdqa 16(%rax), %xmm0
6004 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6005 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
6006 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm12[1,1,1,1]
6007 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
6008 ; SSE-NEXT: movdqa 16(%rdi), %xmm5
6009 ; SSE-NEXT: movdqa %xmm5, %xmm0
6010 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm4[2],xmm0[3],xmm4[3]
6011 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
6012 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6013 ; SSE-NEXT: movdqa 32(%rsi), %xmm1
6014 ; SSE-NEXT: movaps 32(%rdx), %xmm4
6015 ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6016 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1]
6017 ; SSE-NEXT: movdqa %xmm1, %xmm2
6018 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6019 ; SSE-NEXT: movaps %xmm4, %xmm1
6020 ; SSE-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
6021 ; SSE-NEXT: movaps 32(%rcx), %xmm4
6022 ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6023 ; SSE-NEXT: movaps 32(%r8), %xmm0
6024 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6025 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm4[1,1]
6026 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,0]
6027 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6028 ; SSE-NEXT: movdqa 32(%r9), %xmm1
6029 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6030 ; SSE-NEXT: movdqa 32(%rax), %xmm0
6031 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6032 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
6033 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,1,1]
6034 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
6035 ; SSE-NEXT: movdqa 32(%rdi), %xmm6
6036 ; SSE-NEXT: movdqa %xmm6, %xmm0
6037 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
6038 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
6039 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6040 ; SSE-NEXT: movdqa 48(%rsi), %xmm2
6041 ; SSE-NEXT: movdqa 48(%rdx), %xmm0
6042 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6043 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
6044 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,1,1]
6045 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6046 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
6047 ; SSE-NEXT: movaps 48(%rcx), %xmm4
6048 ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6049 ; SSE-NEXT: movaps 48(%r8), %xmm7
6050 ; SSE-NEXT: movaps %xmm7, %xmm0
6051 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm4[1,1]
6052 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,0]
6053 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6054 ; SSE-NEXT: movdqa 48(%r9), %xmm1
6055 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6056 ; SSE-NEXT: movdqa 48(%rax), %xmm0
6057 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6058 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
6059 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,1,1]
6060 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
6061 ; SSE-NEXT: movdqa 48(%rdi), %xmm10
6062 ; SSE-NEXT: movdqa %xmm10, %xmm0
6063 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
6064 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
6065 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6066 ; SSE-NEXT: movdqa 64(%rsi), %xmm1
6067 ; SSE-NEXT: movaps 64(%rdx), %xmm4
6068 ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6069 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1]
6070 ; SSE-NEXT: movdqa %xmm1, %xmm2
6071 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6072 ; SSE-NEXT: movaps %xmm4, %xmm1
6073 ; SSE-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
6074 ; SSE-NEXT: movaps 64(%rcx), %xmm4
6075 ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6076 ; SSE-NEXT: movaps 64(%r8), %xmm13
6077 ; SSE-NEXT: movaps %xmm13, %xmm0
6078 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm4[1,1]
6079 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,0]
6080 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6081 ; SSE-NEXT: movdqa 64(%r9), %xmm1
6082 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6083 ; SSE-NEXT: movdqa 64(%rax), %xmm0
6084 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6085 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
6086 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,1,1]
6087 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
6088 ; SSE-NEXT: movdqa 64(%rdi), %xmm14
6089 ; SSE-NEXT: movdqa %xmm14, %xmm0
6090 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
6091 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
6092 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6093 ; SSE-NEXT: movdqa 80(%rsi), %xmm2
6094 ; SSE-NEXT: movdqa 80(%rdx), %xmm0
6095 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6096 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
6097 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,1,1]
6098 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6099 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
6100 ; SSE-NEXT: movaps 80(%rcx), %xmm4
6101 ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6102 ; SSE-NEXT: movaps 80(%r8), %xmm0
6103 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6104 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm4[1,1]
6105 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,0]
6106 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6107 ; SSE-NEXT: movdqa 80(%r9), %xmm1
6108 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6109 ; SSE-NEXT: movdqa 80(%rax), %xmm0
6110 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6111 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
6112 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,1,1]
6113 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
6114 ; SSE-NEXT: movdqa 80(%rdi), %xmm0
6115 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6116 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
6117 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
6118 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6119 ; SSE-NEXT: movdqa 96(%rsi), %xmm1
6120 ; SSE-NEXT: movaps 96(%rdx), %xmm4
6121 ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6122 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1]
6123 ; SSE-NEXT: movdqa %xmm1, %xmm2
6124 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6125 ; SSE-NEXT: movaps %xmm4, %xmm1
6126 ; SSE-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
6127 ; SSE-NEXT: movaps 96(%rcx), %xmm4
6128 ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6129 ; SSE-NEXT: movaps 96(%r8), %xmm0
6130 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6131 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm4[1,1]
6132 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,0]
6133 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6134 ; SSE-NEXT: movdqa 96(%r9), %xmm1
6135 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6136 ; SSE-NEXT: movdqa 96(%rax), %xmm0
6137 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6138 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
6139 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,1,1]
6140 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
6141 ; SSE-NEXT: movdqa 96(%rdi), %xmm0
6142 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6143 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
6144 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
6145 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6146 ; SSE-NEXT: movdqa 112(%rsi), %xmm2
6147 ; SSE-NEXT: movdqa 112(%rdx), %xmm0
6148 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6149 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
6150 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,1,1]
6151 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6152 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
6153 ; SSE-NEXT: movaps 112(%rcx), %xmm4
6154 ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6155 ; SSE-NEXT: movaps 112(%r8), %xmm0
6156 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6157 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm4[1,1]
6158 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,0]
6159 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6160 ; SSE-NEXT: movdqa 112(%r9), %xmm1
6161 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6162 ; SSE-NEXT: movdqa 112(%rax), %xmm0
6163 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6164 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
6165 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,1,1]
6166 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
6167 ; SSE-NEXT: movdqa 112(%rdi), %xmm0
6168 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6169 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
6170 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
6171 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6172 ; SSE-NEXT: movdqa 128(%rsi), %xmm1
6173 ; SSE-NEXT: movaps 128(%rdx), %xmm4
6174 ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6175 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1]
6176 ; SSE-NEXT: movdqa %xmm1, %xmm2
6177 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6178 ; SSE-NEXT: movaps %xmm4, %xmm1
6179 ; SSE-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
6180 ; SSE-NEXT: movaps 128(%rcx), %xmm4
6181 ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6182 ; SSE-NEXT: movaps 128(%r8), %xmm0
6183 ; SSE-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
6184 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm4[1,1]
6185 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,0]
6186 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6187 ; SSE-NEXT: movdqa 128(%r9), %xmm1
6188 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6189 ; SSE-NEXT: movdqa 128(%rax), %xmm0
6190 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6191 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
6192 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,1,1]
6193 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
6194 ; SSE-NEXT: movdqa 128(%rdi), %xmm0
6195 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6196 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
6197 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
6198 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6199 ; SSE-NEXT: movdqa 144(%rsi), %xmm2
6200 ; SSE-NEXT: movdqa 144(%rdx), %xmm0
6201 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6202 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
6203 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,1,1]
6204 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6205 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
6206 ; SSE-NEXT: movaps 144(%rcx), %xmm4
6207 ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6208 ; SSE-NEXT: movaps 144(%r8), %xmm0
6209 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6210 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm4[1,1]
6211 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,0]
6212 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6213 ; SSE-NEXT: movdqa 144(%r9), %xmm1
6214 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6215 ; SSE-NEXT: movdqa 144(%rax), %xmm0
6216 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6217 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
6218 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,1,1]
6219 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
6220 ; SSE-NEXT: movdqa 144(%rdi), %xmm0
6221 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6222 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
6223 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
6224 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6225 ; SSE-NEXT: movdqa 160(%rsi), %xmm1
6226 ; SSE-NEXT: movaps 160(%rdx), %xmm4
6227 ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6228 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1]
6229 ; SSE-NEXT: movdqa %xmm1, %xmm2
6230 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6231 ; SSE-NEXT: movaps %xmm4, %xmm1
6232 ; SSE-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
6233 ; SSE-NEXT: movaps 160(%rcx), %xmm4
6234 ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6235 ; SSE-NEXT: movaps 160(%r8), %xmm0
6236 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6237 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm4[1,1]
6238 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,0]
6239 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6240 ; SSE-NEXT: movdqa 160(%r9), %xmm1
6241 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6242 ; SSE-NEXT: movdqa 160(%rax), %xmm0
6243 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6244 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
6245 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,1,1]
6246 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
6247 ; SSE-NEXT: movdqa 160(%rdi), %xmm0
6248 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6249 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
6250 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
6251 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6252 ; SSE-NEXT: movdqa 176(%rsi), %xmm2
6253 ; SSE-NEXT: movdqa 176(%rdx), %xmm0
6254 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6255 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
6256 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,1,1]
6257 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6258 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
6259 ; SSE-NEXT: movaps 176(%rcx), %xmm4
6260 ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6261 ; SSE-NEXT: movaps 176(%r8), %xmm0
6262 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6263 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm4[1,1]
6264 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,0]
6265 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6266 ; SSE-NEXT: movdqa 176(%r9), %xmm1
6267 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6268 ; SSE-NEXT: movdqa 176(%rax), %xmm0
6269 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6270 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
6271 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,1,1]
6272 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
6273 ; SSE-NEXT: movdqa 176(%rdi), %xmm0
6274 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6275 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
6276 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
6277 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6278 ; SSE-NEXT: movdqa 192(%rsi), %xmm1
6279 ; SSE-NEXT: movaps 192(%rdx), %xmm4
6280 ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6281 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1]
6282 ; SSE-NEXT: movdqa %xmm1, %xmm2
6283 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6284 ; SSE-NEXT: movaps %xmm4, %xmm1
6285 ; SSE-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
6286 ; SSE-NEXT: movaps 192(%rcx), %xmm4
6287 ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6288 ; SSE-NEXT: movaps 192(%r8), %xmm0
6289 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6290 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm4[1,1]
6291 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,0]
6292 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6293 ; SSE-NEXT: movdqa 192(%r9), %xmm1
6294 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6295 ; SSE-NEXT: movdqa 192(%rax), %xmm0
6296 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6297 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
6298 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,1,1]
6299 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
6300 ; SSE-NEXT: movdqa 192(%rdi), %xmm0
6301 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6302 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
6303 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
6304 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6305 ; SSE-NEXT: movdqa 208(%rsi), %xmm3
6306 ; SSE-NEXT: movdqa 208(%rdx), %xmm0
6307 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6308 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
6309 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm3[1,1,1,1]
6310 ; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6311 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
6312 ; SSE-NEXT: movaps 208(%rcx), %xmm8
6313 ; SSE-NEXT: movaps 208(%r8), %xmm4
6314 ; SSE-NEXT: movaps %xmm4, %xmm0
6315 ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6316 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm8[1,1]
6317 ; SSE-NEXT: movaps %xmm8, %xmm11
6318 ; SSE-NEXT: movaps %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6319 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,0]
6320 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6321 ; SSE-NEXT: movdqa 208(%r9), %xmm12
6322 ; SSE-NEXT: movdqa %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6323 ; SSE-NEXT: movdqa 208(%rax), %xmm8
6324 ; SSE-NEXT: movdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6325 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm8[1,1,1,1]
6326 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm12[1,1,1,1]
6327 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
6328 ; SSE-NEXT: movdqa 208(%rdi), %xmm0
6329 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6330 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm3[2],xmm0[3],xmm3[3]
6331 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
6332 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6333 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm4[3,3,3,3]
6334 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm11[3,3,3,3]
6335 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
6336 ; SSE-NEXT: movdqa %xmm8, %xmm0
6337 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3],xmm12[3,3]
6338 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,0]
6339 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6340 ; SSE-NEXT: movdqa 224(%rsi), %xmm0
6341 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6342 ; SSE-NEXT: movaps 224(%rdx), %xmm2
6343 ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6344 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
6345 ; SSE-NEXT: movss {{.*#+}} xmm2 = xmm0[0],xmm2[1,2,3]
6346 ; SSE-NEXT: movaps 224(%rcx), %xmm3
6347 ; SSE-NEXT: movaps 224(%r8), %xmm0
6348 ; SSE-NEXT: movaps %xmm0, %xmm1
6349 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm3[1,1]
6350 ; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6351 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,0]
6352 ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6353 ; SSE-NEXT: movaps %xmm0, %xmm1
6354 ; SSE-NEXT: movaps 224(%r9), %xmm11
6355 ; SSE-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm11[0]
6356 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6357 ; SSE-NEXT: movaps %xmm11, %xmm1
6358 ; SSE-NEXT: movaps %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6359 ; SSE-NEXT: unpckhpd {{.*#+}} xmm11 = xmm11[1],xmm0[1]
6360 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,3,3,3]
6361 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm3[3,3,3,3]
6362 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
6363 ; SSE-NEXT: movaps 224(%rax), %xmm0
6364 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6365 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,3],xmm0[3,3]
6366 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1],xmm1[0,2]
6367 ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6368 ; SSE-NEXT: movdqa 240(%rsi), %xmm1
6369 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6370 ; SSE-NEXT: movdqa 240(%rdx), %xmm0
6371 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6372 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
6373 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,1,1]
6374 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
6375 ; SSE-NEXT: movaps 240(%rcx), %xmm8
6376 ; SSE-NEXT: movaps 240(%r8), %xmm0
6377 ; SSE-NEXT: movaps %xmm0, %xmm1
6378 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm8[1,1]
6379 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,0]
6380 ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6381 ; SSE-NEXT: movaps %xmm0, %xmm1
6382 ; SSE-NEXT: movaps 240(%r9), %xmm12
6383 ; SSE-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm12[0]
6384 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6385 ; SSE-NEXT: movaps %xmm12, %xmm1
6386 ; SSE-NEXT: movaps %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6387 ; SSE-NEXT: unpckhpd {{.*#+}} xmm12 = xmm12[1],xmm0[1]
6388 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,3,3,3]
6389 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm8[3,3,3,3]
6390 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
6391 ; SSE-NEXT: movaps 240(%rax), %xmm0
6392 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6393 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,3],xmm0[3,3]
6394 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1],xmm1[0,2]
6395 ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6396 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6397 ; SSE-NEXT: movaps %xmm1, %xmm0
6398 ; SSE-NEXT: movaps %xmm9, %xmm2
6399 ; SSE-NEXT: movaps %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6400 ; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm9[0],xmm0[1],xmm9[1]
6401 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
6402 ; SSE-NEXT: movaps %xmm4, %xmm3
6403 ; SSE-NEXT: unpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
6404 ; SSE-NEXT: # xmm3 = xmm3[0],mem[0],xmm3[1],mem[1]
6405 ; SSE-NEXT: movlhps {{.*#+}} xmm3 = xmm3[0],xmm0[0]
6406 ; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6407 ; SSE-NEXT: shufps {{.*#+}} xmm15 = xmm15[0,1],xmm4[1,3]
6408 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
6409 ; SSE-NEXT: movaps %xmm4, %xmm3
6410 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
6411 ; SSE-NEXT: unpcklps {{.*#+}} xmm3 = xmm3[0],xmm9[0],xmm3[1],xmm9[1]
6412 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,1],xmm15[0,2]
6413 ; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6414 ; SSE-NEXT: movaps %xmm4, %xmm0
6415 ; SSE-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],xmm9[2],xmm0[3],xmm9[3]
6416 ; SSE-NEXT: movaps %xmm1, %xmm3
6417 ; SSE-NEXT: unpckhps {{.*#+}} xmm3 = xmm3[2],xmm2[2],xmm3[3],xmm2[3]
6418 ; SSE-NEXT: movlhps {{.*#+}} xmm3 = xmm3[0],xmm0[0]
6419 ; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6420 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6421 ; SSE-NEXT: movaps %xmm1, %xmm0
6422 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
6423 ; SSE-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm4[0]
6424 ; SSE-NEXT: movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6425 ; SSE-NEXT: movdqa %xmm5, %xmm2
6426 ; SSE-NEXT: punpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
6427 ; SSE-NEXT: # xmm2 = xmm2[0],mem[0],xmm2[1],mem[1]
6428 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1],xmm0[2,0]
6429 ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6430 ; SSE-NEXT: movdqa %xmm5, %xmm0
6431 ; SSE-NEXT: shufps $197, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
6432 ; SSE-NEXT: # xmm0 = xmm0[1,1],mem[0,3]
6433 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
6434 ; SSE-NEXT: movaps %xmm2, %xmm5
6435 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
6436 ; SSE-NEXT: unpcklps {{.*#+}} xmm5 = xmm5[0],xmm3[0],xmm5[1],xmm3[1]
6437 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,1],xmm0[2,0]
6438 ; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6439 ; SSE-NEXT: movaps %xmm4, %xmm5
6440 ; SSE-NEXT: unpckhps {{.*#+}} xmm5 = xmm5[2],xmm1[2],xmm5[3],xmm1[3]
6441 ; SSE-NEXT: movaps %xmm2, %xmm0
6442 ; SSE-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],xmm3[2],xmm0[3],xmm3[3]
6443 ; SSE-NEXT: movlhps {{.*#+}} xmm5 = xmm5[0],xmm0[0]
6444 ; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6445 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
6446 ; SSE-NEXT: movdqa %xmm2, %xmm0
6447 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6448 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
6449 ; SSE-NEXT: movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6450 ; SSE-NEXT: movdqa %xmm6, %xmm3
6451 ; SSE-NEXT: punpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
6452 ; SSE-NEXT: # xmm3 = xmm3[0],mem[0],xmm3[1],mem[1]
6453 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm0[0]
6454 ; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6455 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6456 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm6[1,3]
6457 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
6458 ; SSE-NEXT: movaps %xmm4, %xmm6
6459 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
6460 ; SSE-NEXT: unpcklps {{.*#+}} xmm6 = xmm6[0],xmm3[0],xmm6[1],xmm3[1]
6461 ; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,1],xmm0[0,2]
6462 ; SSE-NEXT: movaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6463 ; SSE-NEXT: movaps %xmm4, %xmm0
6464 ; SSE-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],xmm3[2],xmm0[3],xmm3[3]
6465 ; SSE-NEXT: movdqa %xmm2, %xmm3
6466 ; SSE-NEXT: punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm1[2],xmm3[3],xmm1[3]
6467 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm0[0]
6468 ; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6469 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
6470 ; SSE-NEXT: movaps %xmm2, %xmm0
6471 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6472 ; SSE-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
6473 ; SSE-NEXT: movdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6474 ; SSE-NEXT: movdqa %xmm10, %xmm3
6475 ; SSE-NEXT: punpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
6476 ; SSE-NEXT: # xmm3 = xmm3[0],mem[0],xmm3[1],mem[1]
6477 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,1],xmm0[2,0]
6478 ; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6479 ; SSE-NEXT: movdqa %xmm10, %xmm0
6480 ; SSE-NEXT: shufps $197, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
6481 ; SSE-NEXT: # xmm0 = xmm0[1,1],mem[0,3]
6482 ; SSE-NEXT: movaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6483 ; SSE-NEXT: movaps %xmm7, %xmm5
6484 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
6485 ; SSE-NEXT: unpcklps {{.*#+}} xmm5 = xmm5[0],xmm3[0],xmm5[1],xmm3[1]
6486 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,1],xmm0[2,0]
6487 ; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6488 ; SSE-NEXT: movaps %xmm1, %xmm5
6489 ; SSE-NEXT: unpckhps {{.*#+}} xmm5 = xmm5[2],xmm2[2],xmm5[3],xmm2[3]
6490 ; SSE-NEXT: movaps %xmm7, %xmm0
6491 ; SSE-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],xmm3[2],xmm0[3],xmm3[3]
6492 ; SSE-NEXT: movlhps {{.*#+}} xmm5 = xmm5[0],xmm0[0]
6493 ; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6494 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
6495 ; SSE-NEXT: movdqa %xmm2, %xmm0
6496 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6497 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
6498 ; SSE-NEXT: movdqa %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6499 ; SSE-NEXT: movdqa %xmm14, %xmm3
6500 ; SSE-NEXT: punpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
6501 ; SSE-NEXT: # xmm3 = xmm3[0],mem[0],xmm3[1],mem[1]
6502 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm0[0]
6503 ; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6504 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6505 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm14[1,3]
6506 ; SSE-NEXT: movaps %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6507 ; SSE-NEXT: movaps %xmm13, %xmm5
6508 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
6509 ; SSE-NEXT: unpcklps {{.*#+}} xmm5 = xmm5[0],xmm3[0],xmm5[1],xmm3[1]
6510 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,1],xmm0[0,2]
6511 ; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6512 ; SSE-NEXT: movaps %xmm13, %xmm0
6513 ; SSE-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],xmm3[2],xmm0[3],xmm3[3]
6514 ; SSE-NEXT: movdqa %xmm2, %xmm3
6515 ; SSE-NEXT: punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm1[2],xmm3[3],xmm1[3]
6516 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm0[0]
6517 ; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6518 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
6519 ; SSE-NEXT: movaps %xmm3, %xmm0
6520 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
6521 ; SSE-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0]
6522 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6523 ; SSE-NEXT: movaps %xmm1, %xmm5
6524 ; SSE-NEXT: unpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Folded Reload
6525 ; SSE-NEXT: # xmm5 = xmm5[0],mem[0],xmm5[1],mem[1]
6526 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,1],xmm0[2,0]
6527 ; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6528 ; SSE-NEXT: movaps %xmm1, %xmm0
6529 ; SSE-NEXT: shufps $197, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
6530 ; SSE-NEXT: # xmm0 = xmm0[1,1],mem[0,3]
6531 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6532 ; SSE-NEXT: movaps %xmm1, %xmm6
6533 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
6534 ; SSE-NEXT: unpcklps {{.*#+}} xmm6 = xmm6[0],xmm4[0],xmm6[1],xmm4[1]
6535 ; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,1],xmm0[2,0]
6536 ; SSE-NEXT: movaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6537 ; SSE-NEXT: movaps %xmm2, %xmm6
6538 ; SSE-NEXT: unpckhps {{.*#+}} xmm6 = xmm6[2],xmm3[2],xmm6[3],xmm3[3]
6539 ; SSE-NEXT: movaps %xmm1, %xmm0
6540 ; SSE-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],xmm4[2],xmm0[3],xmm4[3]
6541 ; SSE-NEXT: movlhps {{.*#+}} xmm6 = xmm6[0],xmm0[0]
6542 ; SSE-NEXT: movaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6543 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
6544 ; SSE-NEXT: movaps %xmm3, %xmm0
6545 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
6546 ; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
6547 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6548 ; SSE-NEXT: movaps %xmm1, %xmm5
6549 ; SSE-NEXT: unpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Folded Reload
6550 ; SSE-NEXT: # xmm5 = xmm5[0],mem[0],xmm5[1],mem[1]
6551 ; SSE-NEXT: movlhps {{.*#+}} xmm5 = xmm5[0],xmm0[0]
6552 ; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6553 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6554 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[1,3]
6555 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6556 ; SSE-NEXT: movaps %xmm1, %xmm6
6557 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
6558 ; SSE-NEXT: unpcklps {{.*#+}} xmm6 = xmm6[0],xmm4[0],xmm6[1],xmm4[1]
6559 ; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,1],xmm0[0,2]
6560 ; SSE-NEXT: movaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6561 ; SSE-NEXT: movaps %xmm1, %xmm0
6562 ; SSE-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],xmm4[2],xmm0[3],xmm4[3]
6563 ; SSE-NEXT: movaps %xmm3, %xmm1
6564 ; SSE-NEXT: unpckhps {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
6565 ; SSE-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
6566 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6567 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
6568 ; SSE-NEXT: movaps %xmm3, %xmm0
6569 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
6570 ; SSE-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0]
6571 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6572 ; SSE-NEXT: movaps %xmm1, %xmm5
6573 ; SSE-NEXT: unpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Folded Reload
6574 ; SSE-NEXT: # xmm5 = xmm5[0],mem[0],xmm5[1],mem[1]
6575 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,1],xmm0[2,0]
6576 ; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6577 ; SSE-NEXT: movaps %xmm1, %xmm0
6578 ; SSE-NEXT: shufps $197, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
6579 ; SSE-NEXT: # xmm0 = xmm0[1,1],mem[0,3]
6580 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6581 ; SSE-NEXT: movaps %xmm1, %xmm6
6582 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
6583 ; SSE-NEXT: unpcklps {{.*#+}} xmm6 = xmm6[0],xmm4[0],xmm6[1],xmm4[1]
6584 ; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,1],xmm0[2,0]
6585 ; SSE-NEXT: movaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6586 ; SSE-NEXT: movaps %xmm2, %xmm6
6587 ; SSE-NEXT: unpckhps {{.*#+}} xmm6 = xmm6[2],xmm3[2],xmm6[3],xmm3[3]
6588 ; SSE-NEXT: movaps %xmm1, %xmm0
6589 ; SSE-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],xmm4[2],xmm0[3],xmm4[3]
6590 ; SSE-NEXT: movlhps {{.*#+}} xmm6 = xmm6[0],xmm0[0]
6591 ; SSE-NEXT: movaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6592 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
6593 ; SSE-NEXT: movaps %xmm3, %xmm0
6594 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
6595 ; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
6596 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6597 ; SSE-NEXT: movaps %xmm1, %xmm5
6598 ; SSE-NEXT: unpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Folded Reload
6599 ; SSE-NEXT: # xmm5 = xmm5[0],mem[0],xmm5[1],mem[1]
6600 ; SSE-NEXT: movlhps {{.*#+}} xmm5 = xmm5[0],xmm0[0]
6601 ; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6602 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6603 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[1,3]
6604 ; SSE-NEXT: movaps (%rsp), %xmm1 # 16-byte Reload
6605 ; SSE-NEXT: movaps %xmm1, %xmm6
6606 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
6607 ; SSE-NEXT: unpcklps {{.*#+}} xmm6 = xmm6[0],xmm4[0],xmm6[1],xmm4[1]
6608 ; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,1],xmm0[0,2]
6609 ; SSE-NEXT: movaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6610 ; SSE-NEXT: movaps %xmm1, %xmm0
6611 ; SSE-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],xmm4[2],xmm0[3],xmm4[3]
6612 ; SSE-NEXT: movaps %xmm3, %xmm1
6613 ; SSE-NEXT: unpckhps {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
6614 ; SSE-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
6615 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6616 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
6617 ; SSE-NEXT: movaps %xmm3, %xmm0
6618 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
6619 ; SSE-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0]
6620 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6621 ; SSE-NEXT: movaps %xmm1, %xmm5
6622 ; SSE-NEXT: unpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Folded Reload
6623 ; SSE-NEXT: # xmm5 = xmm5[0],mem[0],xmm5[1],mem[1]
6624 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,1],xmm0[2,0]
6625 ; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6626 ; SSE-NEXT: movaps %xmm1, %xmm0
6627 ; SSE-NEXT: shufps $197, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
6628 ; SSE-NEXT: # xmm0 = xmm0[1,1],mem[0,3]
6629 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6630 ; SSE-NEXT: movaps %xmm1, %xmm6
6631 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
6632 ; SSE-NEXT: unpcklps {{.*#+}} xmm6 = xmm6[0],xmm4[0],xmm6[1],xmm4[1]
6633 ; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,1],xmm0[2,0]
6634 ; SSE-NEXT: movaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6635 ; SSE-NEXT: movaps %xmm2, %xmm6
6636 ; SSE-NEXT: unpckhps {{.*#+}} xmm6 = xmm6[2],xmm3[2],xmm6[3],xmm3[3]
6637 ; SSE-NEXT: movaps %xmm1, %xmm0
6638 ; SSE-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],xmm4[2],xmm0[3],xmm4[3]
6639 ; SSE-NEXT: movlhps {{.*#+}} xmm6 = xmm6[0],xmm0[0]
6640 ; SSE-NEXT: movaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6641 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
6642 ; SSE-NEXT: movaps %xmm3, %xmm0
6643 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
6644 ; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
6645 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6646 ; SSE-NEXT: movaps %xmm1, %xmm5
6647 ; SSE-NEXT: unpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Folded Reload
6648 ; SSE-NEXT: # xmm5 = xmm5[0],mem[0],xmm5[1],mem[1]
6649 ; SSE-NEXT: movlhps {{.*#+}} xmm5 = xmm5[0],xmm0[0]
6650 ; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6651 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6652 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[1,3]
6653 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6654 ; SSE-NEXT: movaps %xmm1, %xmm6
6655 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
6656 ; SSE-NEXT: unpcklps {{.*#+}} xmm6 = xmm6[0],xmm4[0],xmm6[1],xmm4[1]
6657 ; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,1],xmm0[0,2]
6658 ; SSE-NEXT: movaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6659 ; SSE-NEXT: movaps %xmm1, %xmm0
6660 ; SSE-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],xmm4[2],xmm0[3],xmm4[3]
6661 ; SSE-NEXT: movaps %xmm3, %xmm1
6662 ; SSE-NEXT: unpckhps {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
6663 ; SSE-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
6664 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6665 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
6666 ; SSE-NEXT: movaps %xmm3, %xmm0
6667 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
6668 ; SSE-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0]
6669 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6670 ; SSE-NEXT: movaps %xmm1, %xmm4
6671 ; SSE-NEXT: unpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
6672 ; SSE-NEXT: # xmm4 = xmm4[0],mem[0],xmm4[1],mem[1]
6673 ; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,1],xmm0[2,0]
6674 ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6675 ; SSE-NEXT: movaps %xmm1, %xmm0
6676 ; SSE-NEXT: shufps $197, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
6677 ; SSE-NEXT: # xmm0 = xmm0[1,1],mem[0,3]
6678 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6679 ; SSE-NEXT: movaps %xmm1, %xmm4
6680 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
6681 ; SSE-NEXT: unpcklps {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
6682 ; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,1],xmm0[2,0]
6683 ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6684 ; SSE-NEXT: movaps %xmm2, %xmm4
6685 ; SSE-NEXT: unpckhps {{.*#+}} xmm4 = xmm4[2],xmm3[2],xmm4[3],xmm3[3]
6686 ; SSE-NEXT: movaps %xmm1, %xmm0
6687 ; SSE-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],xmm5[2],xmm0[3],xmm5[3]
6688 ; SSE-NEXT: movlhps {{.*#+}} xmm4 = xmm4[0],xmm0[0]
6689 ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6690 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
6691 ; SSE-NEXT: movaps %xmm15, %xmm0
6692 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
6693 ; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
6694 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6695 ; SSE-NEXT: movaps %xmm1, %xmm4
6696 ; SSE-NEXT: unpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
6697 ; SSE-NEXT: # xmm4 = xmm4[0],mem[0],xmm4[1],mem[1]
6698 ; SSE-NEXT: movlhps {{.*#+}} xmm4 = xmm4[0],xmm0[0]
6699 ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6700 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6701 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[1,3]
6702 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6703 ; SSE-NEXT: movaps %xmm1, %xmm5
6704 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
6705 ; SSE-NEXT: unpcklps {{.*#+}} xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1]
6706 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,1],xmm0[0,2]
6707 ; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6708 ; SSE-NEXT: movaps %xmm1, %xmm0
6709 ; SSE-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],xmm4[2],xmm0[3],xmm4[3]
6710 ; SSE-NEXT: unpckhps {{.*#+}} xmm15 = xmm15[2],xmm2[2],xmm15[3],xmm2[3]
6711 ; SSE-NEXT: movlhps {{.*#+}} xmm15 = xmm15[0],xmm0[0]
6712 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
6713 ; SSE-NEXT: movaps %xmm14, %xmm0
6714 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6715 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm1[0,3]
6716 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,1],xmm14[3,3]
6717 ; SSE-NEXT: movaps %xmm1, %xmm4
6718 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
6719 ; SSE-NEXT: movaps %xmm2, %xmm13
6720 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
6721 ; SSE-NEXT: unpckhps {{.*#+}} xmm13 = xmm13[2],xmm3[2],xmm13[3],xmm3[3]
6722 ; SSE-NEXT: movlhps {{.*#+}} xmm3 = xmm3[0],xmm2[0]
6723 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6724 ; SSE-NEXT: unpcklps {{.*#+}} xmm14 = xmm14[0],xmm1[0],xmm14[1],xmm1[1]
6725 ; SSE-NEXT: shufps {{.*#+}} xmm14 = xmm14[0,1],xmm3[2,0]
6726 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
6727 ; SSE-NEXT: movaps %xmm3, %xmm10
6728 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
6729 ; SSE-NEXT: unpcklps {{.*#+}} xmm10 = xmm10[0],xmm5[0],xmm10[1],xmm5[1]
6730 ; SSE-NEXT: shufps {{.*#+}} xmm10 = xmm10[0,1],xmm0[2,0]
6731 ; SSE-NEXT: unpckhps {{.*#+}} xmm3 = xmm3[2],xmm5[2],xmm3[3],xmm5[3]
6732 ; SSE-NEXT: movlhps {{.*#+}} xmm13 = xmm13[0],xmm3[0]
6733 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,3],xmm1[3,3]
6734 ; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,2],xmm2[2,0]
6735 ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6736 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
6737 ; SSE-NEXT: movaps %xmm2, %xmm0
6738 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
6739 ; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
6740 ; SSE-NEXT: movaps 224(%rdi), %xmm6
6741 ; SSE-NEXT: movaps %xmm6, %xmm9
6742 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
6743 ; SSE-NEXT: unpcklps {{.*#+}} xmm9 = xmm9[0],xmm3[0],xmm9[1],xmm3[1]
6744 ; SSE-NEXT: movlhps {{.*#+}} xmm9 = xmm9[0],xmm0[0]
6745 ; SSE-NEXT: movaps %xmm6, %xmm0
6746 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6747 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm1[0,3]
6748 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
6749 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,2],xmm0[2,0]
6750 ; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6751 ; SSE-NEXT: movaps %xmm6, %xmm0
6752 ; SSE-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],xmm3[2],xmm0[3],xmm3[3]
6753 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
6754 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[1,1],xmm1[1,1]
6755 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,2],xmm0[0,1]
6756 ; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6757 ; SSE-NEXT: movaps %xmm2, %xmm7
6758 ; SSE-NEXT: unpckhps {{.*#+}} xmm7 = xmm7[2],xmm4[2],xmm7[3],xmm4[3]
6759 ; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,1],xmm11[2,0]
6760 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,3],xmm3[3,3]
6761 ; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[3,1],xmm1[2,3]
6762 ; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[2,0],xmm2[2,0]
6763 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6764 ; SSE-NEXT: movaps %xmm0, %xmm11
6765 ; SSE-NEXT: unpckhps {{.*#+}} xmm11 = xmm11[2],xmm8[2],xmm11[3],xmm8[3]
6766 ; SSE-NEXT: movlhps {{.*#+}} xmm8 = xmm8[0],xmm0[0]
6767 ; SSE-NEXT: movaps %xmm0, %xmm4
6768 ; SSE-NEXT: movaps 240(%rdi), %xmm3
6769 ; SSE-NEXT: movaps %xmm3, %xmm5
6770 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6771 ; SSE-NEXT: unpcklps {{.*#+}} xmm5 = xmm5[0],xmm1[0],xmm5[1],xmm1[1]
6772 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,1],xmm8[2,0]
6773 ; SSE-NEXT: movaps %xmm3, %xmm8
6774 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
6775 ; SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[1,1],xmm2[0,3]
6776 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6777 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm8[2,0]
6778 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6779 ; SSE-NEXT: movaps %xmm3, %xmm8
6780 ; SSE-NEXT: unpckhps {{.*#+}} xmm8 = xmm8[2],xmm1[2],xmm8[3],xmm1[3]
6781 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6782 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm2[1,1]
6783 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm8[0,1]
6784 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6785 ; SSE-NEXT: shufps {{.*#+}} xmm11 = xmm11[0,1],xmm12[2,0]
6786 ; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[3,3],xmm1[3,3]
6787 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1],xmm2[2,3]
6788 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[2,0],xmm4[2,0]
6789 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6790 ; SSE-NEXT: shufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
6791 ; SSE-NEXT: # xmm1 = xmm1[3,3],mem[3,3]
6792 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6793 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,3],xmm1[2,0]
6794 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
6795 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm12[2,3,2,3]
6796 ; SSE-NEXT: movss {{.*#+}} xmm0 = xmm4[0],xmm0[1,2,3]
6797 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6798 ; SSE-NEXT: shufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Folded Reload
6799 ; SSE-NEXT: # xmm12 = xmm12[3,3],mem[3,3]
6800 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6801 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,3],xmm12[2,0]
6802 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
6803 ; SSE-NEXT: # xmm4 = mem[3,3,3,3]
6804 ; SSE-NEXT: movss {{.*#+}} xmm0 = xmm4[0],xmm0[1,2,3]
6805 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6806 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6807 ; SSE-NEXT: shufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
6808 ; SSE-NEXT: # xmm0 = xmm0[3,3],mem[3,3]
6809 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6810 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm0[2,0]
6811 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6812 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm0[2,3,2,3]
6813 ; SSE-NEXT: movss {{.*#+}} xmm1 = xmm4[0],xmm1[1,2,3]
6814 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6815 ; SSE-NEXT: shufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
6816 ; SSE-NEXT: # xmm0 = xmm0[3,3],mem[3,3]
6817 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6818 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm0[2,0]
6819 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
6820 ; SSE-NEXT: # xmm4 = mem[3,3,3,3]
6821 ; SSE-NEXT: movss {{.*#+}} xmm1 = xmm4[0],xmm1[1,2,3]
6822 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6823 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6824 ; SSE-NEXT: shufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
6825 ; SSE-NEXT: # xmm0 = xmm0[3,3],mem[3,3]
6826 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6827 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm0[2,0]
6828 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6829 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm0[2,3,2,3]
6830 ; SSE-NEXT: movss {{.*#+}} xmm1 = xmm4[0],xmm1[1,2,3]
6831 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6832 ; SSE-NEXT: shufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
6833 ; SSE-NEXT: # xmm0 = xmm0[3,3],mem[3,3]
6834 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6835 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm0[2,0]
6836 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
6837 ; SSE-NEXT: # xmm4 = mem[3,3,3,3]
6838 ; SSE-NEXT: movss {{.*#+}} xmm1 = xmm4[0],xmm1[1,2,3]
6839 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6840 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6841 ; SSE-NEXT: shufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
6842 ; SSE-NEXT: # xmm0 = xmm0[3,3],mem[3,3]
6843 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6844 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm0[2,0]
6845 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6846 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm0[2,3,2,3]
6847 ; SSE-NEXT: movss {{.*#+}} xmm1 = xmm4[0],xmm1[1,2,3]
6848 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6849 ; SSE-NEXT: shufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
6850 ; SSE-NEXT: # xmm0 = xmm0[3,3],mem[3,3]
6851 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6852 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm0[2,0]
6853 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
6854 ; SSE-NEXT: # xmm4 = mem[3,3,3,3]
6855 ; SSE-NEXT: movss {{.*#+}} xmm1 = xmm4[0],xmm1[1,2,3]
6856 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6857 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6858 ; SSE-NEXT: shufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
6859 ; SSE-NEXT: # xmm0 = xmm0[3,3],mem[3,3]
6860 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
6861 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,3],xmm0[2,0]
6862 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6863 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm0[2,3,2,3]
6864 ; SSE-NEXT: movss {{.*#+}} xmm2 = xmm4[0],xmm2[1,2,3]
6865 ; SSE-NEXT: shufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
6866 ; SSE-NEXT: # xmm0 = xmm0[3,3],mem[3,3]
6867 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6868 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm0[2,0]
6869 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
6870 ; SSE-NEXT: # xmm4 = mem[3,3,3,3]
6871 ; SSE-NEXT: movss {{.*#+}} xmm1 = xmm4[0],xmm1[1,2,3]
6872 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6873 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6874 ; SSE-NEXT: shufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
6875 ; SSE-NEXT: # xmm0 = xmm0[3,3],mem[3,3]
6876 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6877 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm0[2,0]
6878 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6879 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm0[2,3,2,3]
6880 ; SSE-NEXT: movss {{.*#+}} xmm1 = xmm4[0],xmm1[1,2,3]
6881 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6882 ; SSE-NEXT: shufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
6883 ; SSE-NEXT: # xmm0 = xmm0[3,3],mem[3,3]
6884 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6885 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm0[2,0]
6886 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
6887 ; SSE-NEXT: # xmm4 = mem[3,3,3,3]
6888 ; SSE-NEXT: movss {{.*#+}} xmm1 = xmm4[0],xmm1[1,2,3]
6889 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6890 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6891 ; SSE-NEXT: shufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
6892 ; SSE-NEXT: # xmm0 = xmm0[3,3],mem[3,3]
6893 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6894 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm0[2,0]
6895 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6896 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm0[2,3,2,3]
6897 ; SSE-NEXT: movss {{.*#+}} xmm1 = xmm4[0],xmm1[1,2,3]
6898 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6899 ; SSE-NEXT: shufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
6900 ; SSE-NEXT: # xmm0 = xmm0[3,3],mem[3,3]
6901 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6902 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm0[2,0]
6903 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
6904 ; SSE-NEXT: # xmm4 = mem[3,3,3,3]
6905 ; SSE-NEXT: movss {{.*#+}} xmm1 = xmm4[0],xmm1[1,2,3]
6906 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6907 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6908 ; SSE-NEXT: shufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
6909 ; SSE-NEXT: # xmm0 = xmm0[3,3],mem[3,3]
6910 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6911 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm0[2,0]
6912 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6913 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm0[2,3,2,3]
6914 ; SSE-NEXT: movss {{.*#+}} xmm1 = xmm4[0],xmm1[1,2,3]
6915 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6916 ; SSE-NEXT: shufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
6917 ; SSE-NEXT: # xmm0 = xmm0[3,3],mem[3,3]
6918 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6919 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm0[2,0]
6920 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
6921 ; SSE-NEXT: # xmm4 = mem[3,3,3,3]
6922 ; SSE-NEXT: movss {{.*#+}} xmm1 = xmm4[0],xmm1[1,2,3]
6923 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6924 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6925 ; SSE-NEXT: shufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
6926 ; SSE-NEXT: # xmm0 = xmm0[3,3],mem[3,3]
6927 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6928 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm0[2,0]
6929 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6930 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm0[2,3,2,3]
6931 ; SSE-NEXT: movss {{.*#+}} xmm1 = xmm4[0],xmm1[1,2,3]
6932 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6933 ; SSE-NEXT: shufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
6934 ; SSE-NEXT: # xmm0 = xmm0[3,3],mem[3,3]
6935 ; SSE-NEXT: movaps (%rsp), %xmm1 # 16-byte Reload
6936 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm0[2,0]
6937 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
6938 ; SSE-NEXT: # xmm4 = mem[3,3,3,3]
6939 ; SSE-NEXT: movss {{.*#+}} xmm1 = xmm4[0],xmm1[1,2,3]
6940 ; SSE-NEXT: movaps %xmm1, (%rsp) # 16-byte Spill
6941 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6942 ; SSE-NEXT: shufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
6943 ; SSE-NEXT: # xmm0 = xmm0[3,3],mem[3,3]
6944 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6945 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm0[2,0]
6946 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6947 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm0[2,3,2,3]
6948 ; SSE-NEXT: movss {{.*#+}} xmm1 = xmm4[0],xmm1[1,2,3]
6949 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6950 ; SSE-NEXT: shufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
6951 ; SSE-NEXT: # xmm0 = xmm0[3,3],mem[3,3]
6952 ; SSE-NEXT: movaps %xmm0, %xmm4
6953 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6954 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,3],xmm4[2,0]
6955 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
6956 ; SSE-NEXT: # xmm4 = mem[3,3,3,3]
6957 ; SSE-NEXT: movss {{.*#+}} xmm0 = xmm4[0],xmm0[1,2,3]
6958 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6959 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6960 ; SSE-NEXT: shufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
6961 ; SSE-NEXT: # xmm0 = xmm0[3,3],mem[3,3]
6962 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6963 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm0[2,0]
6964 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6965 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm0[2,3,2,3]
6966 ; SSE-NEXT: movss {{.*#+}} xmm1 = xmm4[0],xmm1[1,2,3]
6967 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6968 ; SSE-NEXT: shufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
6969 ; SSE-NEXT: # xmm0 = xmm0[3,3],mem[3,3]
6970 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6971 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm0[2,0]
6972 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
6973 ; SSE-NEXT: # xmm4 = mem[3,3,3,3]
6974 ; SSE-NEXT: movss {{.*#+}} xmm1 = xmm4[0],xmm1[1,2,3]
6975 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6976 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6977 ; SSE-NEXT: shufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
6978 ; SSE-NEXT: # xmm0 = xmm0[3,3],mem[3,3]
6979 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6980 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm0[2,0]
6981 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6982 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm0[2,3,2,3]
6983 ; SSE-NEXT: movss {{.*#+}} xmm1 = xmm4[0],xmm1[1,2,3]
6984 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6985 ; SSE-NEXT: shufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
6986 ; SSE-NEXT: # xmm0 = xmm0[3,3],mem[3,3]
6987 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
6988 ; SSE-NEXT: shufps {{.*#+}} xmm12 = xmm12[0,3],xmm0[2,0]
6989 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
6990 ; SSE-NEXT: # xmm4 = mem[3,3,3,3]
6991 ; SSE-NEXT: movss {{.*#+}} xmm12 = xmm4[0],xmm12[1,2,3]
6992 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6993 ; SSE-NEXT: shufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
6994 ; SSE-NEXT: # xmm0 = xmm0[3,3],mem[3,3]
6995 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
6996 ; SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[0,3],xmm0[2,0]
6997 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6998 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm1[2,3,2,3]
6999 ; SSE-NEXT: movss {{.*#+}} xmm8 = xmm4[0],xmm8[1,2,3]
7000 ; SSE-NEXT: shufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
7001 ; SSE-NEXT: # xmm1 = xmm1[3,3],mem[3,3]
7002 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7003 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,3],xmm1[2,0]
7004 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
7005 ; SSE-NEXT: # xmm4 = mem[3,3,3,3]
7006 ; SSE-NEXT: movss {{.*#+}} xmm0 = xmm4[0],xmm0[1,2,3]
7007 ; SSE-NEXT: movaps %xmm0, %xmm4
7008 ; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax
7009 ; SSE-NEXT: movaps %xmm3, 1760(%rax)
7010 ; SSE-NEXT: movaps %xmm11, 1744(%rax)
7011 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7012 ; SSE-NEXT: movaps %xmm0, 1728(%rax)
7013 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7014 ; SSE-NEXT: movaps %xmm0, 1696(%rax)
7015 ; SSE-NEXT: movaps %xmm5, 1680(%rax)
7016 ; SSE-NEXT: movaps %xmm6, 1648(%rax)
7017 ; SSE-NEXT: movaps %xmm7, 1632(%rax)
7018 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7019 ; SSE-NEXT: movaps %xmm0, 1616(%rax)
7020 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7021 ; SSE-NEXT: movaps %xmm0, 1584(%rax)
7022 ; SSE-NEXT: movaps %xmm9, 1568(%rax)
7023 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7024 ; SSE-NEXT: movaps %xmm0, 1536(%rax)
7025 ; SSE-NEXT: movaps %xmm13, 1520(%rax)
7026 ; SSE-NEXT: movaps %xmm10, 1472(%rax)
7027 ; SSE-NEXT: movaps %xmm14, 1456(%rax)
7028 ; SSE-NEXT: movaps %xmm15, 1408(%rax)
7029 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7030 ; SSE-NEXT: movaps %xmm0, 1360(%rax)
7031 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7032 ; SSE-NEXT: movaps %xmm0, 1344(%rax)
7033 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7034 ; SSE-NEXT: movaps %xmm0, 1296(%rax)
7035 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7036 ; SSE-NEXT: movaps %xmm0, 1248(%rax)
7037 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7038 ; SSE-NEXT: movaps %xmm0, 1232(%rax)
7039 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7040 ; SSE-NEXT: movaps %xmm0, 1184(%rax)
7041 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7042 ; SSE-NEXT: movaps %xmm0, 1136(%rax)
7043 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7044 ; SSE-NEXT: movaps %xmm0, 1120(%rax)
7045 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7046 ; SSE-NEXT: movaps %xmm0, 1072(%rax)
7047 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7048 ; SSE-NEXT: movaps %xmm0, 1024(%rax)
7049 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7050 ; SSE-NEXT: movaps %xmm0, 1008(%rax)
7051 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7052 ; SSE-NEXT: movaps %xmm0, 960(%rax)
7053 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7054 ; SSE-NEXT: movaps %xmm0, 912(%rax)
7055 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7056 ; SSE-NEXT: movaps %xmm0, 896(%rax)
7057 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7058 ; SSE-NEXT: movaps %xmm0, 848(%rax)
7059 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7060 ; SSE-NEXT: movaps %xmm0, 800(%rax)
7061 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7062 ; SSE-NEXT: movaps %xmm0, 784(%rax)
7063 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7064 ; SSE-NEXT: movaps %xmm0, 736(%rax)
7065 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7066 ; SSE-NEXT: movaps %xmm0, 688(%rax)
7067 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7068 ; SSE-NEXT: movaps %xmm0, 672(%rax)
7069 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7070 ; SSE-NEXT: movaps %xmm0, 624(%rax)
7071 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7072 ; SSE-NEXT: movaps %xmm0, 576(%rax)
7073 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7074 ; SSE-NEXT: movaps %xmm0, 560(%rax)
7075 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7076 ; SSE-NEXT: movaps %xmm0, 512(%rax)
7077 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7078 ; SSE-NEXT: movaps %xmm0, 464(%rax)
7079 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7080 ; SSE-NEXT: movaps %xmm0, 448(%rax)
7081 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7082 ; SSE-NEXT: movaps %xmm0, 400(%rax)
7083 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7084 ; SSE-NEXT: movaps %xmm0, 352(%rax)
7085 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7086 ; SSE-NEXT: movaps %xmm0, 336(%rax)
7087 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7088 ; SSE-NEXT: movaps %xmm0, 288(%rax)
7089 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7090 ; SSE-NEXT: movaps %xmm0, 240(%rax)
7091 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7092 ; SSE-NEXT: movaps %xmm0, 224(%rax)
7093 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7094 ; SSE-NEXT: movaps %xmm0, 176(%rax)
7095 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7096 ; SSE-NEXT: movaps %xmm0, 128(%rax)
7097 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7098 ; SSE-NEXT: movaps %xmm0, 112(%rax)
7099 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7100 ; SSE-NEXT: movaps %xmm0, 64(%rax)
7101 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7102 ; SSE-NEXT: movaps %xmm0, 16(%rax)
7103 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7104 ; SSE-NEXT: movaps %xmm0, (%rax)
7105 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7106 ; SSE-NEXT: movaps %xmm0, 1776(%rax)
7107 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7108 ; SSE-NEXT: movaps %xmm0, 1712(%rax)
7109 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7110 ; SSE-NEXT: movaps %xmm0, 1664(%rax)
7111 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7112 ; SSE-NEXT: movaps %xmm0, 1600(%rax)
7113 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7114 ; SSE-NEXT: movaps %xmm0, 1552(%rax)
7115 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7116 ; SSE-NEXT: movaps %xmm0, 1504(%rax)
7117 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
7118 ; SSE-NEXT: movaps %xmm1, 1488(%rax)
7119 ; SSE-NEXT: movaps %xmm4, 1440(%rax)
7120 ; SSE-NEXT: movaps %xmm8, 1424(%rax)
7121 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
7122 ; SSE-NEXT: movaps %xmm1, 1392(%rax)
7123 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
7124 ; SSE-NEXT: movaps %xmm1, 1376(%rax)
7125 ; SSE-NEXT: movaps %xmm12, 1328(%rax)
7126 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7127 ; SSE-NEXT: movaps %xmm0, 1312(%rax)
7128 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
7129 ; SSE-NEXT: movaps %xmm1, 1280(%rax)
7130 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
7131 ; SSE-NEXT: movaps %xmm1, 1264(%rax)
7132 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7133 ; SSE-NEXT: movaps %xmm0, 1216(%rax)
7134 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7135 ; SSE-NEXT: movaps %xmm0, 1200(%rax)
7136 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
7137 ; SSE-NEXT: movaps %xmm1, 1168(%rax)
7138 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
7139 ; SSE-NEXT: movaps %xmm1, 1152(%rax)
7140 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7141 ; SSE-NEXT: movaps %xmm0, 1104(%rax)
7142 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7143 ; SSE-NEXT: movaps %xmm0, 1088(%rax)
7144 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7145 ; SSE-NEXT: movaps %xmm0, 1056(%rax)
7146 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7147 ; SSE-NEXT: movaps %xmm0, 1040(%rax)
7148 ; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
7149 ; SSE-NEXT: movaps %xmm0, 992(%rax)
7150 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7151 ; SSE-NEXT: movaps %xmm0, 976(%rax)
7152 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7153 ; SSE-NEXT: movaps %xmm0, 944(%rax)
7154 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7155 ; SSE-NEXT: movaps %xmm0, 928(%rax)
7156 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7157 ; SSE-NEXT: movaps %xmm0, 880(%rax)
7158 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7159 ; SSE-NEXT: movaps %xmm0, 864(%rax)
7160 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7161 ; SSE-NEXT: movaps %xmm0, 832(%rax)
7162 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7163 ; SSE-NEXT: movaps %xmm0, 816(%rax)
7164 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7165 ; SSE-NEXT: movaps %xmm0, 768(%rax)
7166 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7167 ; SSE-NEXT: movaps %xmm0, 752(%rax)
7168 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7169 ; SSE-NEXT: movaps %xmm0, 720(%rax)
7170 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7171 ; SSE-NEXT: movaps %xmm0, 704(%rax)
7172 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7173 ; SSE-NEXT: movaps %xmm0, 656(%rax)
7174 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7175 ; SSE-NEXT: movaps %xmm0, 640(%rax)
7176 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7177 ; SSE-NEXT: movaps %xmm0, 608(%rax)
7178 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7179 ; SSE-NEXT: movaps %xmm0, 592(%rax)
7180 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7181 ; SSE-NEXT: movaps %xmm0, 544(%rax)
7182 ; SSE-NEXT: movaps %xmm2, 528(%rax)
7183 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7184 ; SSE-NEXT: movaps %xmm0, 496(%rax)
7185 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7186 ; SSE-NEXT: movaps %xmm0, 480(%rax)
7187 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7188 ; SSE-NEXT: movaps %xmm0, 432(%rax)
7189 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7190 ; SSE-NEXT: movaps %xmm0, 416(%rax)
7191 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7192 ; SSE-NEXT: movaps %xmm0, 384(%rax)
7193 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7194 ; SSE-NEXT: movaps %xmm0, 368(%rax)
7195 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7196 ; SSE-NEXT: movaps %xmm0, 320(%rax)
7197 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7198 ; SSE-NEXT: movaps %xmm0, 304(%rax)
7199 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7200 ; SSE-NEXT: movaps %xmm0, 272(%rax)
7201 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7202 ; SSE-NEXT: movaps %xmm0, 256(%rax)
7203 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7204 ; SSE-NEXT: movaps %xmm0, 208(%rax)
7205 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7206 ; SSE-NEXT: movaps %xmm0, 192(%rax)
7207 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7208 ; SSE-NEXT: movaps %xmm0, 160(%rax)
7209 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7210 ; SSE-NEXT: movaps %xmm0, 144(%rax)
7211 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7212 ; SSE-NEXT: movaps %xmm0, 96(%rax)
7213 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7214 ; SSE-NEXT: movaps %xmm0, 80(%rax)
7215 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7216 ; SSE-NEXT: movaps %xmm0, 48(%rax)
7217 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7218 ; SSE-NEXT: movaps %xmm0, 32(%rax)
7219 ; SSE-NEXT: addq $2760, %rsp # imm = 0xAC8
7222 ; AVX1-ONLY-LABEL: store_i32_stride7_vf64:
7223 ; AVX1-ONLY: # %bb.0:
7224 ; AVX1-ONLY-NEXT: subq $3416, %rsp # imm = 0xD58
7225 ; AVX1-ONLY-NEXT: movq {{[0-9]+}}(%rsp), %rax
7226 ; AVX1-ONLY-NEXT: vmovaps 224(%rdi), %ymm5
7227 ; AVX1-ONLY-NEXT: vmovaps 224(%rsi), %ymm2
7228 ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7229 ; AVX1-ONLY-NEXT: vmovaps 224(%rdx), %ymm1
7230 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7231 ; AVX1-ONLY-NEXT: vmovaps 224(%rcx), %ymm0
7232 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7233 ; AVX1-ONLY-NEXT: vmovaps 224(%r8), %ymm3
7234 ; AVX1-ONLY-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7235 ; AVX1-ONLY-NEXT: vmovaps 224(%rax), %ymm4
7236 ; AVX1-ONLY-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7237 ; AVX1-ONLY-NEXT: vunpckhps {{.*#+}} ymm0 = ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[6],ymm0[6],ymm1[7],ymm0[7]
7238 ; AVX1-ONLY-NEXT: vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7239 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} ymm1 = ymm2[1],ymm5[1],ymm2[3],ymm5[3]
7240 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm5[0,1],ymm1[2,0],ymm5[4,5],ymm1[6,4]
7241 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
7242 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm3[6,7]
7243 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = mem[2,3,2,3]
7244 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[1,1,2,2,5,5,6,6]
7245 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5,6],ymm1[7]
7246 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm4[2,3],ymm0[2,3]
7247 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4],ymm1[5],ymm0[6,7]
7248 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7249 ; AVX1-ONLY-NEXT: vmovaps (%rax), %xmm2
7250 ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7251 ; AVX1-ONLY-NEXT: vmovaps (%r9), %xmm3
7252 ; AVX1-ONLY-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7253 ; AVX1-ONLY-NEXT: vmovaps (%r8), %xmm5
7254 ; AVX1-ONLY-NEXT: vmovaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7255 ; AVX1-ONLY-NEXT: vmovaps (%rdi), %xmm1
7256 ; AVX1-ONLY-NEXT: vmovaps (%rsi), %xmm4
7257 ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm0 = xmm4[0],xmm1[0]
7258 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,1]
7259 ; AVX1-ONLY-NEXT: vmovaps %xmm1, %xmm6
7260 ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7261 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
7262 ; AVX1-ONLY-NEXT: vmovaps (%rcx), %xmm7
7263 ; AVX1-ONLY-NEXT: vmovaps (%rdx), %xmm8
7264 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} xmm1 = xmm8[0],xmm7[0],xmm8[1],xmm7[1]
7265 ; AVX1-ONLY-NEXT: vmovaps %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7266 ; AVX1-ONLY-NEXT: vmovaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7267 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,1,0,1]
7268 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
7269 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} xmm1 = xmm5[0],xmm3[0],xmm5[1],xmm3[1]
7270 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
7271 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2
7272 ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[2],ymm2[2]
7273 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6],ymm0[7]
7274 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7275 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm3[1,1],xmm5[1,1]
7276 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
7277 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm2[5],ymm0[6,7]
7278 ; AVX1-ONLY-NEXT: vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7279 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm6[1],xmm4[1]
7280 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm4[1,1],xmm1[0,2]
7281 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm1
7282 ; AVX1-ONLY-NEXT: vinsertps {{.*#+}} xmm2 = zero,xmm8[1],xmm7[1],zero
7283 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2],ymm1[3,4,5,6,7]
7284 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
7285 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7286 ; AVX1-ONLY-NEXT: vmovaps (%rdi), %ymm1
7287 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7288 ; AVX1-ONLY-NEXT: vmovaps (%rsi), %ymm0
7289 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7290 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[1,1],ymm1[1,1],ymm0[5,5],ymm1[5,5]
7291 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
7292 ; AVX1-ONLY-NEXT: vmovaps (%rdx), %ymm12
7293 ; AVX1-ONLY-NEXT: vmovaps (%rcx), %ymm1
7294 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7295 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm12[1,1],ymm1[1,1],ymm12[5,5],ymm1[5,5]
7296 ; AVX1-ONLY-NEXT: vmovups %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7297 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6],ymm0[7]
7298 ; AVX1-ONLY-NEXT: vmovaps (%r8), %ymm2
7299 ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7300 ; AVX1-ONLY-NEXT: vmovaps (%r9), %ymm1
7301 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7302 ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[2],ymm2[2]
7303 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[2,0],ymm2[2,1],ymm1[6,4],ymm2[6,5]
7304 ; AVX1-ONLY-NEXT: vmovaps (%rax), %ymm2
7305 ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7306 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm2[2,3],ymm1[2,3]
7307 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm1, %xmm1
7308 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[2],ymm2[3]
7309 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6],ymm1[7]
7310 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7311 ; AVX1-ONLY-NEXT: vmovaps 32(%rdi), %xmm1
7312 ; AVX1-ONLY-NEXT: vmovaps 32(%rsi), %xmm4
7313 ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm0 = xmm4[0],xmm1[0]
7314 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,1]
7315 ; AVX1-ONLY-NEXT: vmovaps %xmm1, %xmm7
7316 ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7317 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
7318 ; AVX1-ONLY-NEXT: vmovaps 32(%rcx), %xmm3
7319 ; AVX1-ONLY-NEXT: vmovaps 32(%rdx), %xmm2
7320 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} xmm1 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
7321 ; AVX1-ONLY-NEXT: vmovaps %xmm3, %xmm8
7322 ; AVX1-ONLY-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7323 ; AVX1-ONLY-NEXT: vmovaps %xmm2, %xmm9
7324 ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7325 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,1,0,1]
7326 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
7327 ; AVX1-ONLY-NEXT: vmovaps 32(%r9), %xmm3
7328 ; AVX1-ONLY-NEXT: vmovaps %xmm3, (%rsp) # 16-byte Spill
7329 ; AVX1-ONLY-NEXT: vmovaps 32(%r8), %xmm6
7330 ; AVX1-ONLY-NEXT: vmovaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7331 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} xmm1 = xmm6[0],xmm3[0],xmm6[1],xmm3[1]
7332 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
7333 ; AVX1-ONLY-NEXT: vmovaps 32(%rax), %xmm2
7334 ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7335 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2
7336 ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[2],ymm2[2]
7337 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6],ymm0[7]
7338 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7339 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm3[1,1],xmm6[1,1]
7340 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
7341 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm2[5],ymm0[6,7]
7342 ; AVX1-ONLY-NEXT: vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7343 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm7[1],xmm4[1]
7344 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm4[1,1],xmm1[0,2]
7345 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm1
7346 ; AVX1-ONLY-NEXT: vinsertps {{.*#+}} xmm2 = zero,xmm9[1],xmm8[1],zero
7347 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2],ymm1[3,4,5,6,7]
7348 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
7349 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7350 ; AVX1-ONLY-NEXT: vmovaps 32(%rdi), %ymm1
7351 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7352 ; AVX1-ONLY-NEXT: vmovaps 32(%rsi), %ymm0
7353 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7354 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[1,1],ymm1[1,1],ymm0[5,5],ymm1[5,5]
7355 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
7356 ; AVX1-ONLY-NEXT: vmovaps 32(%rdx), %ymm1
7357 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7358 ; AVX1-ONLY-NEXT: vmovaps 32(%rcx), %ymm8
7359 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[1,1],ymm8[1,1],ymm1[5,5],ymm8[5,5]
7360 ; AVX1-ONLY-NEXT: vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7361 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6],ymm0[7]
7362 ; AVX1-ONLY-NEXT: vmovaps 32(%r8), %ymm2
7363 ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7364 ; AVX1-ONLY-NEXT: vmovaps 32(%r9), %ymm1
7365 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7366 ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[2],ymm2[2]
7367 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[2,0],ymm2[2,1],ymm1[6,4],ymm2[6,5]
7368 ; AVX1-ONLY-NEXT: vmovaps 32(%rax), %ymm2
7369 ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7370 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm2[2,3],ymm1[2,3]
7371 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm1, %xmm1
7372 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[2],ymm2[3]
7373 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6],ymm1[7]
7374 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7375 ; AVX1-ONLY-NEXT: vmovaps 64(%rdi), %xmm1
7376 ; AVX1-ONLY-NEXT: vmovaps 64(%rsi), %xmm6
7377 ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm0 = xmm6[0],xmm1[0]
7378 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,1]
7379 ; AVX1-ONLY-NEXT: vmovaps %xmm1, %xmm5
7380 ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7381 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
7382 ; AVX1-ONLY-NEXT: vmovaps 64(%rcx), %xmm3
7383 ; AVX1-ONLY-NEXT: vmovaps 64(%rdx), %xmm2
7384 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} xmm1 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
7385 ; AVX1-ONLY-NEXT: vmovaps %xmm3, %xmm7
7386 ; AVX1-ONLY-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7387 ; AVX1-ONLY-NEXT: vmovaps %xmm2, %xmm9
7388 ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7389 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,1,0,1]
7390 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
7391 ; AVX1-ONLY-NEXT: vmovaps 64(%r9), %xmm3
7392 ; AVX1-ONLY-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7393 ; AVX1-ONLY-NEXT: vmovaps 64(%r8), %xmm4
7394 ; AVX1-ONLY-NEXT: vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7395 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} xmm1 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
7396 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
7397 ; AVX1-ONLY-NEXT: vmovaps 64(%rax), %xmm2
7398 ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7399 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2
7400 ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[2],ymm2[2]
7401 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6],ymm0[7]
7402 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7403 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm3[1,1],xmm4[1,1]
7404 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
7405 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm2[5],ymm0[6,7]
7406 ; AVX1-ONLY-NEXT: vmovaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7407 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm5[1],xmm6[1]
7408 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm6[1,1],xmm1[0,2]
7409 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm1
7410 ; AVX1-ONLY-NEXT: vinsertps {{.*#+}} xmm2 = zero,xmm9[1],xmm7[1],zero
7411 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2],ymm1[3,4,5,6,7]
7412 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
7413 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7414 ; AVX1-ONLY-NEXT: vmovaps 64(%rdi), %ymm1
7415 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7416 ; AVX1-ONLY-NEXT: vmovaps 64(%rsi), %ymm0
7417 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7418 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[1,1],ymm1[1,1],ymm0[5,5],ymm1[5,5]
7419 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
7420 ; AVX1-ONLY-NEXT: vmovaps 64(%rdx), %ymm1
7421 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7422 ; AVX1-ONLY-NEXT: vmovaps 64(%rcx), %ymm2
7423 ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7424 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[1,1],ymm2[1,1],ymm1[5,5],ymm2[5,5]
7425 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6],ymm0[7]
7426 ; AVX1-ONLY-NEXT: vmovaps 64(%r8), %ymm2
7427 ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7428 ; AVX1-ONLY-NEXT: vmovaps 64(%r9), %ymm15
7429 ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm15[0],ymm2[0],ymm15[2],ymm2[2]
7430 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[2,0],ymm2[2,1],ymm1[6,4],ymm2[6,5]
7431 ; AVX1-ONLY-NEXT: vmovaps 64(%rax), %ymm2
7432 ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7433 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm2[2,3],ymm1[2,3]
7434 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm1, %xmm1
7435 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[2],ymm2[3]
7436 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6],ymm1[7]
7437 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7438 ; AVX1-ONLY-NEXT: vmovaps 96(%rdi), %xmm1
7439 ; AVX1-ONLY-NEXT: vmovaps 96(%rsi), %xmm5
7440 ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm0 = xmm5[0],xmm1[0]
7441 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,1]
7442 ; AVX1-ONLY-NEXT: vmovaps %xmm1, %xmm6
7443 ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7444 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
7445 ; AVX1-ONLY-NEXT: vmovaps 96(%rcx), %xmm3
7446 ; AVX1-ONLY-NEXT: vmovaps 96(%rdx), %xmm2
7447 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} xmm1 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
7448 ; AVX1-ONLY-NEXT: vmovaps %xmm3, %xmm7
7449 ; AVX1-ONLY-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7450 ; AVX1-ONLY-NEXT: vmovaps %xmm2, %xmm9
7451 ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7452 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,1,0,1]
7453 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
7454 ; AVX1-ONLY-NEXT: vmovaps 96(%r9), %xmm3
7455 ; AVX1-ONLY-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7456 ; AVX1-ONLY-NEXT: vmovaps 96(%r8), %xmm4
7457 ; AVX1-ONLY-NEXT: vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7458 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} xmm1 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
7459 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
7460 ; AVX1-ONLY-NEXT: vmovaps 96(%rax), %xmm2
7461 ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7462 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2
7463 ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[2],ymm2[2]
7464 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6],ymm0[7]
7465 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7466 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm3[1,1],xmm4[1,1]
7467 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
7468 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm2[5],ymm0[6,7]
7469 ; AVX1-ONLY-NEXT: vmovaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7470 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm6[1],xmm5[1]
7471 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm5[1,1],xmm1[0,2]
7472 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm1
7473 ; AVX1-ONLY-NEXT: vinsertps {{.*#+}} xmm2 = zero,xmm9[1],xmm7[1],zero
7474 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2],ymm1[3,4,5,6,7]
7475 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
7476 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7477 ; AVX1-ONLY-NEXT: vmovaps 96(%rdi), %ymm0
7478 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7479 ; AVX1-ONLY-NEXT: vmovaps 96(%rsi), %ymm1
7480 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7481 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm1[1,1],ymm0[1,1],ymm1[5,5],ymm0[5,5]
7482 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
7483 ; AVX1-ONLY-NEXT: vmovaps 96(%rdx), %ymm2
7484 ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7485 ; AVX1-ONLY-NEXT: vmovaps 96(%rcx), %ymm1
7486 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7487 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm2[1,1],ymm1[1,1],ymm2[5,5],ymm1[5,5]
7488 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6],ymm0[7]
7489 ; AVX1-ONLY-NEXT: vmovaps 96(%r8), %ymm2
7490 ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7491 ; AVX1-ONLY-NEXT: vmovaps 96(%r9), %ymm10
7492 ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm10[0],ymm2[0],ymm10[2],ymm2[2]
7493 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[2,0],ymm2[2,1],ymm1[6,4],ymm2[6,5]
7494 ; AVX1-ONLY-NEXT: vmovaps 96(%rax), %ymm2
7495 ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7496 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm2[2,3],ymm1[2,3]
7497 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm1, %xmm1
7498 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[2],ymm2[3]
7499 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6],ymm1[7]
7500 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7501 ; AVX1-ONLY-NEXT: vmovaps 128(%rdi), %xmm1
7502 ; AVX1-ONLY-NEXT: vmovaps 128(%rsi), %xmm4
7503 ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm0 = xmm4[0],xmm1[0]
7504 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,1]
7505 ; AVX1-ONLY-NEXT: vmovaps %xmm1, %xmm5
7506 ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7507 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
7508 ; AVX1-ONLY-NEXT: vmovaps 128(%rcx), %xmm3
7509 ; AVX1-ONLY-NEXT: vmovaps 128(%rdx), %xmm2
7510 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} xmm1 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
7511 ; AVX1-ONLY-NEXT: vmovaps %xmm3, %xmm6
7512 ; AVX1-ONLY-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7513 ; AVX1-ONLY-NEXT: vmovaps %xmm2, %xmm9
7514 ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7515 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,1,0,1]
7516 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
7517 ; AVX1-ONLY-NEXT: vmovaps 128(%r9), %xmm3
7518 ; AVX1-ONLY-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7519 ; AVX1-ONLY-NEXT: vmovaps 128(%r8), %xmm7
7520 ; AVX1-ONLY-NEXT: vmovaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7521 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} xmm1 = xmm7[0],xmm3[0],xmm7[1],xmm3[1]
7522 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
7523 ; AVX1-ONLY-NEXT: vmovaps 128(%rax), %xmm2
7524 ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7525 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2
7526 ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[2],ymm2[2]
7527 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6],ymm0[7]
7528 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7529 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm3[1,1],xmm7[1,1]
7530 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
7531 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm2[5],ymm0[6,7]
7532 ; AVX1-ONLY-NEXT: vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7533 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm5[1],xmm4[1]
7534 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm4[1,1],xmm1[0,2]
7535 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm1
7536 ; AVX1-ONLY-NEXT: vinsertps {{.*#+}} xmm2 = zero,xmm9[1],xmm6[1],zero
7537 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2],ymm1[3,4,5,6,7]
7538 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
7539 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7540 ; AVX1-ONLY-NEXT: vmovaps 128(%rdi), %ymm1
7541 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7542 ; AVX1-ONLY-NEXT: vmovaps 128(%rsi), %ymm0
7543 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7544 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[1,1],ymm1[1,1],ymm0[5,5],ymm1[5,5]
7545 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
7546 ; AVX1-ONLY-NEXT: vmovaps 128(%rdx), %ymm2
7547 ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7548 ; AVX1-ONLY-NEXT: vmovaps 128(%rcx), %ymm1
7549 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7550 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm2[1,1],ymm1[1,1],ymm2[5,5],ymm1[5,5]
7551 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6],ymm0[7]
7552 ; AVX1-ONLY-NEXT: vmovaps 128(%r8), %ymm2
7553 ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7554 ; AVX1-ONLY-NEXT: vmovaps 128(%r9), %ymm1
7555 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7556 ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[2],ymm2[2]
7557 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[2,0],ymm2[2,1],ymm1[6,4],ymm2[6,5]
7558 ; AVX1-ONLY-NEXT: vmovaps 128(%rax), %ymm2
7559 ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7560 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm2[2,3],ymm1[2,3]
7561 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm1, %xmm1
7562 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[2],ymm2[3]
7563 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6],ymm1[7]
7564 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7565 ; AVX1-ONLY-NEXT: vmovaps 160(%rdi), %xmm1
7566 ; AVX1-ONLY-NEXT: vmovaps 160(%rsi), %xmm4
7567 ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm0 = xmm4[0],xmm1[0]
7568 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,1]
7569 ; AVX1-ONLY-NEXT: vmovaps %xmm1, %xmm5
7570 ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7571 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
7572 ; AVX1-ONLY-NEXT: vmovaps 160(%rcx), %xmm3
7573 ; AVX1-ONLY-NEXT: vmovaps 160(%rdx), %xmm2
7574 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} xmm1 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
7575 ; AVX1-ONLY-NEXT: vmovaps %xmm3, %xmm6
7576 ; AVX1-ONLY-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7577 ; AVX1-ONLY-NEXT: vmovaps %xmm2, %xmm9
7578 ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7579 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,1,0,1]
7580 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
7581 ; AVX1-ONLY-NEXT: vmovaps 160(%r9), %xmm3
7582 ; AVX1-ONLY-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7583 ; AVX1-ONLY-NEXT: vmovaps 160(%r8), %xmm7
7584 ; AVX1-ONLY-NEXT: vmovaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7585 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} xmm1 = xmm7[0],xmm3[0],xmm7[1],xmm3[1]
7586 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
7587 ; AVX1-ONLY-NEXT: vmovaps 160(%rax), %xmm2
7588 ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7589 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2
7590 ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[2],ymm2[2]
7591 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6],ymm0[7]
7592 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7593 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm3[1,1],xmm7[1,1]
7594 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
7595 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm2[5],ymm0[6,7]
7596 ; AVX1-ONLY-NEXT: vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7597 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm5[1],xmm4[1]
7598 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm4[1,1],xmm1[0,2]
7599 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm1
7600 ; AVX1-ONLY-NEXT: vinsertps {{.*#+}} xmm2 = zero,xmm9[1],xmm6[1],zero
7601 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2],ymm1[3,4,5,6,7]
7602 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
7603 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7604 ; AVX1-ONLY-NEXT: vmovaps 160(%rdi), %ymm1
7605 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7606 ; AVX1-ONLY-NEXT: vmovaps 160(%rsi), %ymm0
7607 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7608 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[1,1],ymm1[1,1],ymm0[5,5],ymm1[5,5]
7609 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
7610 ; AVX1-ONLY-NEXT: vmovaps 160(%rdx), %ymm6
7611 ; AVX1-ONLY-NEXT: vmovaps 160(%rcx), %ymm1
7612 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7613 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm6[1,1],ymm1[1,1],ymm6[5,5],ymm1[5,5]
7614 ; AVX1-ONLY-NEXT: vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7615 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6],ymm0[7]
7616 ; AVX1-ONLY-NEXT: vmovaps 160(%r8), %ymm9
7617 ; AVX1-ONLY-NEXT: vmovaps 160(%r9), %ymm13
7618 ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm13[0],ymm9[0],ymm13[2],ymm9[2]
7619 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[2,0],ymm9[2,1],ymm1[6,4],ymm9[6,5]
7620 ; AVX1-ONLY-NEXT: vmovups %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7621 ; AVX1-ONLY-NEXT: vmovaps 160(%rax), %ymm2
7622 ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7623 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm2[2,3],ymm1[2,3]
7624 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm1, %xmm1
7625 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[2],ymm2[3]
7626 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6],ymm1[7]
7627 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7628 ; AVX1-ONLY-NEXT: vmovaps 192(%rdi), %xmm1
7629 ; AVX1-ONLY-NEXT: vmovaps 192(%rsi), %xmm11
7630 ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm0 = xmm11[0],xmm1[0]
7631 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,1]
7632 ; AVX1-ONLY-NEXT: vmovaps %xmm1, %xmm14
7633 ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7634 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
7635 ; AVX1-ONLY-NEXT: vmovaps 192(%rcx), %xmm5
7636 ; AVX1-ONLY-NEXT: vmovaps 192(%rdx), %xmm4
7637 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} xmm1 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
7638 ; AVX1-ONLY-NEXT: vmovaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7639 ; AVX1-ONLY-NEXT: vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7640 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,1,0,1]
7641 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
7642 ; AVX1-ONLY-NEXT: vmovaps 192(%r9), %xmm3
7643 ; AVX1-ONLY-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7644 ; AVX1-ONLY-NEXT: vmovaps 192(%r8), %xmm7
7645 ; AVX1-ONLY-NEXT: vmovaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7646 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} xmm1 = xmm7[0],xmm3[0],xmm7[1],xmm3[1]
7647 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
7648 ; AVX1-ONLY-NEXT: vmovaps 192(%rax), %xmm2
7649 ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7650 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2
7651 ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[2],ymm2[2]
7652 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6],ymm0[7]
7653 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7654 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm3[1,1],xmm7[1,1]
7655 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
7656 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm2[5],ymm0[6,7]
7657 ; AVX1-ONLY-NEXT: vmovaps %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7658 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm14[1],xmm11[1]
7659 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm11[1,1],xmm1[0,2]
7660 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm1
7661 ; AVX1-ONLY-NEXT: vinsertps {{.*#+}} xmm2 = zero,xmm4[1],xmm5[1],zero
7662 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2],ymm1[3,4,5,6,7]
7663 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
7664 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7665 ; AVX1-ONLY-NEXT: vmovaps 192(%rdi), %ymm4
7666 ; AVX1-ONLY-NEXT: vmovaps 192(%rsi), %ymm5
7667 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm5[1,1],ymm4[1,1],ymm5[5,5],ymm4[5,5]
7668 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
7669 ; AVX1-ONLY-NEXT: vmovaps 192(%rdx), %ymm7
7670 ; AVX1-ONLY-NEXT: vmovaps 192(%rcx), %ymm14
7671 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm7[1,1],ymm14[1,1],ymm7[5,5],ymm14[5,5]
7672 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm0[0,1,2,3,4],ymm1[5,6],ymm0[7]
7673 ; AVX1-ONLY-NEXT: vmovaps 192(%r8), %ymm11
7674 ; AVX1-ONLY-NEXT: vmovaps 192(%r9), %ymm3
7675 ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm2 = ymm3[0],ymm11[0],ymm3[2],ymm11[2]
7676 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm2 = ymm2[2,0],ymm11[2,1],ymm2[6,4],ymm11[6,5]
7677 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = mem[2,3],ymm2[2,3]
7678 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm2, %xmm2
7679 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm0 = ymm2[0],ymm0[0],ymm2[2],ymm0[3]
7680 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6],ymm0[7]
7681 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7682 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
7683 ; AVX1-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
7684 ; AVX1-ONLY-NEXT: # ymm0 = ymm0[1],mem[1],ymm0[3],mem[3]
7685 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,1,2,0,4,5,6,4]
7686 ; AVX1-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm1 # 32-byte Folded Reload
7687 ; AVX1-ONLY-NEXT: # ymm1 = ymm12[2],mem[2],ymm12[3],mem[3],ymm12[6],mem[6],ymm12[7],mem[7]
7688 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
7689 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
7690 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
7691 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} ymm1 = ymm1[1],ymm2[1],ymm1[3],ymm2[3]
7692 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm2[1,1],ymm1[0,2],ymm2[5,5],ymm1[4,6]
7693 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[2,3,2,3]
7694 ; AVX1-ONLY-NEXT: vmovaps 16(%rax), %xmm2
7695 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7]
7696 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
7697 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7698 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
7699 ; AVX1-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
7700 ; AVX1-ONLY-NEXT: # ymm0 = ymm0[1],mem[1],ymm0[3],mem[3]
7701 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,1,2,0,4,5,6,4]
7702 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
7703 ; AVX1-ONLY-NEXT: vunpckhps {{.*#+}} ymm1 = ymm1[2],ymm8[2],ymm1[3],ymm8[3],ymm1[6],ymm8[6],ymm1[7],ymm8[7]
7704 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
7705 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
7706 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
7707 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} ymm1 = ymm1[1],ymm2[1],ymm1[3],ymm2[3]
7708 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm2[1,1],ymm1[0,2],ymm2[5,5],ymm1[4,6]
7709 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[2,3,2,3]
7710 ; AVX1-ONLY-NEXT: vmovaps 48(%rax), %xmm2
7711 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7]
7712 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
7713 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7714 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
7715 ; AVX1-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
7716 ; AVX1-ONLY-NEXT: # ymm0 = ymm0[1],mem[1],ymm0[3],mem[3]
7717 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,1,2,0,4,5,6,4]
7718 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
7719 ; AVX1-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm1 # 32-byte Folded Reload
7720 ; AVX1-ONLY-NEXT: # ymm1 = ymm8[2],mem[2],ymm8[3],mem[3],ymm8[6],mem[6],ymm8[7],mem[7]
7721 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
7722 ; AVX1-ONLY-NEXT: vmovups %ymm15, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7723 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
7724 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} ymm1 = ymm1[1],ymm15[1],ymm1[3],ymm15[3]
7725 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm15[1,1],ymm1[0,2],ymm15[5,5],ymm1[4,6]
7726 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[2,3,2,3]
7727 ; AVX1-ONLY-NEXT: vmovaps 80(%rax), %xmm2
7728 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7]
7729 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
7730 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7731 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
7732 ; AVX1-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm0 # 32-byte Folded Reload
7733 ; AVX1-ONLY-NEXT: # ymm0 = ymm15[1],mem[1],ymm15[3],mem[3]
7734 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,1,2,0,4,5,6,4]
7735 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
7736 ; AVX1-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm1 # 32-byte Folded Reload
7737 ; AVX1-ONLY-NEXT: # ymm1 = ymm12[2],mem[2],ymm12[3],mem[3],ymm12[6],mem[6],ymm12[7],mem[7]
7738 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
7739 ; AVX1-ONLY-NEXT: vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7740 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
7741 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} ymm1 = ymm1[1],ymm10[1],ymm1[3],ymm10[3]
7742 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm10[1,1],ymm1[0,2],ymm10[5,5],ymm1[4,6]
7743 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[2,3,2,3]
7744 ; AVX1-ONLY-NEXT: vmovaps 112(%rax), %xmm2
7745 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7]
7746 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
7747 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7748 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
7749 ; AVX1-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
7750 ; AVX1-ONLY-NEXT: # ymm0 = ymm0[1],mem[1],ymm0[3],mem[3]
7751 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,1,2,0,4,5,6,4]
7752 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
7753 ; AVX1-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
7754 ; AVX1-ONLY-NEXT: # ymm1 = ymm1[2],mem[2],ymm1[3],mem[3],ymm1[6],mem[6],ymm1[7],mem[7]
7755 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
7756 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
7757 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
7758 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} ymm1 = ymm1[1],ymm2[1],ymm1[3],ymm2[3]
7759 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm2[1,1],ymm1[0,2],ymm2[5,5],ymm1[4,6]
7760 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[2,3,2,3]
7761 ; AVX1-ONLY-NEXT: vmovaps 144(%rax), %xmm2
7762 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7]
7763 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
7764 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7765 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
7766 ; AVX1-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
7767 ; AVX1-ONLY-NEXT: # ymm0 = ymm0[1],mem[1],ymm0[3],mem[3]
7768 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,1,2,0,4,5,6,4]
7769 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
7770 ; AVX1-ONLY-NEXT: vunpckhps {{.*#+}} ymm1 = ymm6[2],ymm2[2],ymm6[3],ymm2[3],ymm6[6],ymm2[6],ymm6[7],ymm2[7]
7771 ; AVX1-ONLY-NEXT: vmovaps %ymm2, %ymm10
7772 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
7773 ; AVX1-ONLY-NEXT: vmovups %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7774 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} ymm1 = ymm9[1],ymm13[1],ymm9[3],ymm13[3]
7775 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm13[1,1],ymm1[0,2],ymm13[5,5],ymm1[4,6]
7776 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[2,3,2,3]
7777 ; AVX1-ONLY-NEXT: vmovaps 176(%rax), %xmm2
7778 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7]
7779 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
7780 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7781 ; AVX1-ONLY-NEXT: vmovaps %ymm4, %ymm6
7782 ; AVX1-ONLY-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7783 ; AVX1-ONLY-NEXT: vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7784 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm5[1],ymm4[1],ymm5[3],ymm4[3]
7785 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,1,2,0,4,5,6,4]
7786 ; AVX1-ONLY-NEXT: vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7787 ; AVX1-ONLY-NEXT: vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7788 ; AVX1-ONLY-NEXT: vunpckhps {{.*#+}} ymm1 = ymm7[2],ymm14[2],ymm7[3],ymm14[3],ymm7[6],ymm14[6],ymm7[7],ymm14[7]
7789 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
7790 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} ymm1 = ymm11[1],ymm3[1],ymm11[3],ymm3[3]
7791 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm3[1,1],ymm1[0,2],ymm3[5,5],ymm1[4,6]
7792 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[2,3,2,3]
7793 ; AVX1-ONLY-NEXT: vmovaps 208(%rax), %xmm2
7794 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7]
7795 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
7796 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7797 ; AVX1-ONLY-NEXT: vmovaps 224(%rsi), %xmm2
7798 ; AVX1-ONLY-NEXT: vmovaps 224(%rdi), %xmm11
7799 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm11[1],xmm2[1]
7800 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm2[1,1],xmm0[0,2]
7801 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm3
7802 ; AVX1-ONLY-NEXT: vmovaps 224(%rcx), %xmm4
7803 ; AVX1-ONLY-NEXT: vmovaps 224(%rdx), %xmm1
7804 ; AVX1-ONLY-NEXT: vinsertps {{.*#+}} xmm0 = zero,xmm1[1],xmm4[1],zero
7805 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0],ymm0[1,2],ymm3[3,4,5,6,7]
7806 ; AVX1-ONLY-NEXT: vbroadcastss 228(%r8), %ymm13
7807 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2],ymm13[3],ymm3[4,5,6,7]
7808 ; AVX1-ONLY-NEXT: vbroadcastss 228(%r9), %ymm13
7809 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm13[4,5],ymm3[6,7]
7810 ; AVX1-ONLY-NEXT: vinsertf128 $1, 224(%rax), %ymm0, %ymm0
7811 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm3[0],ymm0[1],ymm3[2,3,4],ymm0[5],ymm3[6,7]
7812 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7813 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm4[1],xmm1[1]
7814 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,3]
7815 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
7816 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm3 = xmm11[3,3],xmm2[3,3]
7817 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm3
7818 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm3[5,6],ymm0[7]
7819 ; AVX1-ONLY-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
7820 ; AVX1-ONLY-NEXT: # ymm0 = ymm0[0,1],mem[2,3],ymm0[4,5,6,7]
7821 ; AVX1-ONLY-NEXT: vbroadcastss 232(%r9), %xmm3
7822 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm3[3],ymm0[4,5,6,7]
7823 ; AVX1-ONLY-NEXT: vbroadcastss 232(%rax), %ymm3
7824 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm3[4],ymm0[5,6,7]
7825 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7826 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm14[3,3],ymm7[3,3],ymm14[7,7],ymm7[7,7]
7827 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
7828 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm3 = ymm6[3,3],ymm5[3,3],ymm6[7,7],ymm5[7,7]
7829 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm3, %xmm3
7830 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm3[0,1,2],ymm0[3,4,5,6,7]
7831 ; AVX1-ONLY-NEXT: vbroadcastss 220(%r8), %ymm3
7832 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm3[5],ymm0[6,7]
7833 ; AVX1-ONLY-NEXT: vbroadcastss 220(%r9), %ymm3
7834 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm3[6,7]
7835 ; AVX1-ONLY-NEXT: vbroadcastsd 216(%rax), %ymm3
7836 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm3[0],ymm0[1,2,3,4,5,6],ymm3[7]
7837 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7838 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} xmm0 = xmm1[0],xmm4[0],xmm1[1],xmm4[1]
7839 ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm1 = xmm2[0],xmm11[0]
7840 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm1[2,0],xmm11[2,1]
7841 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,1,0,1]
7842 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm2
7843 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1],ymm0[2,3],ymm2[4,5,6,7]
7844 ; AVX1-ONLY-NEXT: vinsertf128 $1, 224(%r8), %ymm1, %ymm1
7845 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
7846 ; AVX1-ONLY-NEXT: vbroadcastss 224(%r9), %ymm1
7847 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
7848 ; AVX1-ONLY-NEXT: vbroadcastss 224(%rax), %ymm1
7849 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6],ymm0[7]
7850 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7851 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
7852 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
7853 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm4[3,3],ymm3[3,3],ymm4[7,7],ymm3[7,7]
7854 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
7855 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
7856 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
7857 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm6[3,3],ymm2[3,3],ymm6[7,7],ymm2[7,7]
7858 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm1, %xmm1
7859 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
7860 ; AVX1-ONLY-NEXT: vbroadcastss 252(%r8), %ymm1
7861 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
7862 ; AVX1-ONLY-NEXT: vbroadcastss 252(%r9), %ymm1
7863 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
7864 ; AVX1-ONLY-NEXT: vbroadcastsd 248(%rax), %ymm1
7865 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5,6],ymm1[7]
7866 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7867 ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm3[0],ymm4[0],ymm3[2],ymm4[2]
7868 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm4[3,1],ymm0[0,2],ymm4[7,5],ymm0[4,6]
7869 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} ymm1 = ymm6[0],ymm2[0],ymm6[1],ymm2[1],ymm6[4],ymm2[4],ymm6[5],ymm2[5]
7870 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
7871 ; AVX1-ONLY-NEXT: vbroadcastss 236(%r8), %ymm1
7872 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4,5,6,7]
7873 ; AVX1-ONLY-NEXT: vbroadcastss 236(%r9), %xmm1
7874 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
7875 ; AVX1-ONLY-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
7876 ; AVX1-ONLY-NEXT: # ymm0 = ymm0[0,1,2],mem[3],ymm0[4,5,6,7]
7877 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7878 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm3[1,1],ymm4[1,1],ymm3[5,5],ymm4[5,5]
7879 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm2[1,1],ymm6[1,1],ymm2[5,5],ymm6[5,5]
7880 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[2,3,2,3]
7881 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5,6],ymm1[7]
7882 ; AVX1-ONLY-NEXT: vbroadcastsd 240(%r8), %ymm1
7883 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5,6],ymm1[7]
7884 ; AVX1-ONLY-NEXT: vbroadcastss 240(%r9), %xmm1
7885 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4,5,6,7]
7886 ; AVX1-ONLY-NEXT: vbroadcastss 240(%rax), %ymm1
7887 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4,5,6,7]
7888 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7889 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7890 ; AVX1-ONLY-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
7891 ; AVX1-ONLY-NEXT: # xmm0 = xmm0[3,3],mem[3,3]
7892 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
7893 ; AVX1-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm1 # 16-byte Folded Reload
7894 ; AVX1-ONLY-NEXT: # xmm1 = xmm2[2],mem[2],xmm2[3],mem[3]
7895 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
7896 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
7897 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5,6],ymm1[7]
7898 ; AVX1-ONLY-NEXT: vpermilps $170, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
7899 ; AVX1-ONLY-NEXT: # xmm1 = mem[2,2,2,2]
7900 ; AVX1-ONLY-NEXT: vblendps $7, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
7901 ; AVX1-ONLY-NEXT: # xmm1 = mem[0,1,2],xmm1[3]
7902 ; AVX1-ONLY-NEXT: vbroadcastsd 8(%rax), %ymm2
7903 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
7904 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4],ymm0[5,6,7]
7905 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7906 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
7907 ; AVX1-ONLY-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
7908 ; AVX1-ONLY-NEXT: # ymm0 = ymm0[3,3],mem[3,3],ymm0[7,7],mem[7,7]
7909 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
7910 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
7911 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
7912 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[3,3],ymm7[3,3],ymm1[7,7],ymm7[7,7]
7913 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm1, %xmm1
7914 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
7915 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
7916 ; AVX1-ONLY-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
7917 ; AVX1-ONLY-NEXT: # ymm1 = ymm1[3,3],mem[3,3],ymm1[7,7],mem[7,7]
7918 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
7919 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm2[2,3],ymm1[1,2],ymm2[6,7],ymm1[5,6]
7920 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[2,3,2,3]
7921 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,2,3,1,4,6,7,5]
7922 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4],ymm1[5,6,7]
7923 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7924 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7925 ; AVX1-ONLY-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
7926 ; AVX1-ONLY-NEXT: # xmm0 = xmm0[3,3],mem[3,3]
7927 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
7928 ; AVX1-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm1 # 16-byte Folded Reload
7929 ; AVX1-ONLY-NEXT: # xmm1 = xmm2[2],mem[2],xmm2[3],mem[3]
7930 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
7931 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
7932 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5,6],ymm1[7]
7933 ; AVX1-ONLY-NEXT: vpermilps $170, (%rsp), %xmm1 # 16-byte Folded Reload
7934 ; AVX1-ONLY-NEXT: # xmm1 = mem[2,2,2,2]
7935 ; AVX1-ONLY-NEXT: vblendps $7, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
7936 ; AVX1-ONLY-NEXT: # xmm1 = mem[0,1,2],xmm1[3]
7937 ; AVX1-ONLY-NEXT: vbroadcastsd 40(%rax), %ymm2
7938 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
7939 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4],ymm0[5,6,7]
7940 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7941 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
7942 ; AVX1-ONLY-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
7943 ; AVX1-ONLY-NEXT: # ymm0 = ymm0[3,3],mem[3,3],ymm0[7,7],mem[7,7]
7944 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
7945 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
7946 ; AVX1-ONLY-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
7947 ; AVX1-ONLY-NEXT: # ymm1 = ymm1[3,3],mem[3,3],ymm1[7,7],mem[7,7]
7948 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm1, %xmm1
7949 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
7950 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
7951 ; AVX1-ONLY-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
7952 ; AVX1-ONLY-NEXT: # ymm1 = ymm1[3,3],mem[3,3],ymm1[7,7],mem[7,7]
7953 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
7954 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm2[2,3],ymm1[1,2],ymm2[6,7],ymm1[5,6]
7955 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[2,3,2,3]
7956 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,2,3,1,4,6,7,5]
7957 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4],ymm1[5,6,7]
7958 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7959 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7960 ; AVX1-ONLY-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
7961 ; AVX1-ONLY-NEXT: # xmm0 = xmm0[3,3],mem[3,3]
7962 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
7963 ; AVX1-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm1 # 16-byte Folded Reload
7964 ; AVX1-ONLY-NEXT: # xmm1 = xmm2[2],mem[2],xmm2[3],mem[3]
7965 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
7966 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
7967 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5,6],ymm1[7]
7968 ; AVX1-ONLY-NEXT: vpermilps $170, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
7969 ; AVX1-ONLY-NEXT: # xmm1 = mem[2,2,2,2]
7970 ; AVX1-ONLY-NEXT: vblendps $7, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
7971 ; AVX1-ONLY-NEXT: # xmm1 = mem[0,1,2],xmm1[3]
7972 ; AVX1-ONLY-NEXT: vbroadcastsd 72(%rax), %ymm2
7973 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
7974 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4],ymm0[5,6,7]
7975 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7976 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
7977 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[3,3],ymm8[3,3],ymm0[7,7],ymm8[7,7]
7978 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
7979 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
7980 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
7981 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[3,3],ymm6[3,3],ymm1[7,7],ymm6[7,7]
7982 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm1, %xmm1
7983 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
7984 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
7985 ; AVX1-ONLY-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
7986 ; AVX1-ONLY-NEXT: # ymm1 = ymm1[3,3],mem[3,3],ymm1[7,7],mem[7,7]
7987 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
7988 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm2[2,3],ymm1[1,2],ymm2[6,7],ymm1[5,6]
7989 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[2,3,2,3]
7990 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,2,3,1,4,6,7,5]
7991 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4],ymm1[5,6,7]
7992 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7993 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7994 ; AVX1-ONLY-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
7995 ; AVX1-ONLY-NEXT: # xmm0 = xmm0[3,3],mem[3,3]
7996 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
7997 ; AVX1-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm1 # 16-byte Folded Reload
7998 ; AVX1-ONLY-NEXT: # xmm1 = xmm2[2],mem[2],xmm2[3],mem[3]
7999 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
8000 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
8001 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5,6],ymm1[7]
8002 ; AVX1-ONLY-NEXT: vpermilps $170, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
8003 ; AVX1-ONLY-NEXT: # xmm1 = mem[2,2,2,2]
8004 ; AVX1-ONLY-NEXT: vblendps $7, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
8005 ; AVX1-ONLY-NEXT: # xmm1 = mem[0,1,2],xmm1[3]
8006 ; AVX1-ONLY-NEXT: vbroadcastsd 104(%rax), %ymm2
8007 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
8008 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4],ymm0[5,6,7]
8009 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8010 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8011 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[3,3],ymm12[3,3],ymm0[7,7],ymm12[7,7]
8012 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
8013 ; AVX1-ONLY-NEXT: vmovaps %ymm15, %ymm4
8014 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8015 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[3,3],ymm15[3,3],ymm1[7,7],ymm15[7,7]
8016 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm1, %xmm1
8017 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
8018 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8019 ; AVX1-ONLY-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
8020 ; AVX1-ONLY-NEXT: # ymm1 = ymm1[3,3],mem[3,3],ymm1[7,7],mem[7,7]
8021 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
8022 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm2[2,3],ymm1[1,2],ymm2[6,7],ymm1[5,6]
8023 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[2,3,2,3]
8024 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,2,3,1,4,6,7,5]
8025 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4],ymm1[5,6,7]
8026 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8027 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
8028 ; AVX1-ONLY-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
8029 ; AVX1-ONLY-NEXT: # xmm0 = xmm0[3,3],mem[3,3]
8030 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
8031 ; AVX1-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm1 # 16-byte Folded Reload
8032 ; AVX1-ONLY-NEXT: # xmm1 = xmm2[2],mem[2],xmm2[3],mem[3]
8033 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
8034 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
8035 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5,6],ymm1[7]
8036 ; AVX1-ONLY-NEXT: vpermilps $170, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
8037 ; AVX1-ONLY-NEXT: # xmm1 = mem[2,2,2,2]
8038 ; AVX1-ONLY-NEXT: vblendps $7, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
8039 ; AVX1-ONLY-NEXT: # xmm1 = mem[0,1,2],xmm1[3]
8040 ; AVX1-ONLY-NEXT: vbroadcastsd 136(%rax), %ymm2
8041 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
8042 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4],ymm0[5,6,7]
8043 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8044 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
8045 ; AVX1-ONLY-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm0 # 32-byte Folded Reload
8046 ; AVX1-ONLY-NEXT: # ymm0 = ymm14[3,3],mem[3,3],ymm14[7,7],mem[7,7]
8047 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
8048 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
8049 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8050 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm2 = ymm12[3,3],ymm1[3,3],ymm12[7,7],ymm1[7,7]
8051 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm2, %xmm2
8052 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2],ymm0[3,4,5,6,7]
8053 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
8054 ; AVX1-ONLY-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
8055 ; AVX1-ONLY-NEXT: # ymm2 = ymm2[3,3],mem[3,3],ymm2[7,7],mem[7,7]
8056 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
8057 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm2 = ymm3[2,3],ymm2[1,2],ymm3[6,7],ymm2[5,6]
8058 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm2[2,3,2,3]
8059 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm2 = ymm2[0,2,3,1,4,6,7,5]
8060 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0],ymm0[1,2,3,4],ymm2[5,6,7]
8061 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8062 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
8063 ; AVX1-ONLY-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
8064 ; AVX1-ONLY-NEXT: # xmm0 = xmm0[3,3],mem[3,3]
8065 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
8066 ; AVX1-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm2 # 16-byte Folded Reload
8067 ; AVX1-ONLY-NEXT: # xmm2 = xmm3[2],mem[2],xmm3[3],mem[3]
8068 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
8069 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
8070 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3,4],ymm0[5,6],ymm2[7]
8071 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
8072 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm2 = xmm15[2,2,2,2]
8073 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
8074 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm2 = xmm13[0,1,2],xmm2[3]
8075 ; AVX1-ONLY-NEXT: vbroadcastsd 168(%rax), %ymm3
8076 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
8077 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm2[2,3,4],ymm0[5,6,7]
8078 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8079 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
8080 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm10[3,3],ymm11[3,3],ymm10[7,7],ymm11[7,7]
8081 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
8082 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
8083 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
8084 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm2 = ymm9[3,3],ymm10[3,3],ymm9[7,7],ymm10[7,7]
8085 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm2, %xmm2
8086 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2],ymm0[3,4,5,6,7]
8087 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
8088 ; AVX1-ONLY-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
8089 ; AVX1-ONLY-NEXT: # ymm2 = ymm2[3,3],mem[3,3],ymm2[7,7],mem[7,7]
8090 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
8091 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm2 = ymm3[2,3],ymm2[1,2],ymm3[6,7],ymm2[5,6]
8092 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm2[2,3,2,3]
8093 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm2 = ymm2[0,2,3,1,4,6,7,5]
8094 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0],ymm0[1,2,3,4],ymm2[5,6,7]
8095 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8096 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
8097 ; AVX1-ONLY-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
8098 ; AVX1-ONLY-NEXT: # xmm0 = xmm0[3,3],mem[3,3]
8099 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
8100 ; AVX1-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm2 # 16-byte Folded Reload
8101 ; AVX1-ONLY-NEXT: # xmm2 = xmm3[2],mem[2],xmm3[3],mem[3]
8102 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
8103 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
8104 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3,4],ymm0[5,6],ymm2[7]
8105 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
8106 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm2 = xmm8[2,2,2,2]
8107 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
8108 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm2 = xmm5[0,1,2],xmm2[3]
8109 ; AVX1-ONLY-NEXT: vbroadcastsd 200(%rax), %ymm3
8110 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
8111 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm2[2,3,4],ymm0[5,6,7]
8112 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8113 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8114 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} ymm0 = ymm0[0],ymm7[0],ymm0[1],ymm7[1],ymm0[4],ymm7[4],ymm0[5],ymm7[5]
8115 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
8116 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
8117 ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm2 = ymm2[0],ymm7[0],ymm2[2],ymm7[2]
8118 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm2 = ymm7[3,1],ymm2[0,2],ymm7[7,5],ymm2[4,6]
8119 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5],ymm2[6,7]
8120 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
8121 ; AVX1-ONLY-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm2 # 16-byte Folded Reload
8122 ; AVX1-ONLY-NEXT: # xmm2 = xmm2[3,3],mem[3,3]
8123 ; AVX1-ONLY-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm2 # 16-byte Folded Reload
8124 ; AVX1-ONLY-NEXT: # xmm2 = xmm2[0,1,2],mem[3]
8125 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm2[1,2,3],ymm0[4,5,6,7]
8126 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8127 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8128 ; AVX1-ONLY-NEXT: vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
8129 ; AVX1-ONLY-NEXT: # ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5]
8130 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
8131 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
8132 ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm2 = ymm2[0],ymm7[0],ymm2[2],ymm7[2]
8133 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm2 = ymm7[3,1],ymm2[0,2],ymm7[7,5],ymm2[4,6]
8134 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5],ymm2[6,7]
8135 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
8136 ; AVX1-ONLY-NEXT: vshufps $255, (%rsp), %xmm2, %xmm2 # 16-byte Folded Reload
8137 ; AVX1-ONLY-NEXT: # xmm2 = xmm2[3,3],mem[3,3]
8138 ; AVX1-ONLY-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm2 # 16-byte Folded Reload
8139 ; AVX1-ONLY-NEXT: # xmm2 = xmm2[0,1,2],mem[3]
8140 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm3 = ymm0[0],ymm2[1,2,3],ymm0[4,5,6,7]
8141 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8142 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} ymm0 = ymm0[0],ymm6[0],ymm0[1],ymm6[1],ymm0[4],ymm6[4],ymm0[5],ymm6[5]
8143 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
8144 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
8145 ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm2 = ymm2[0],ymm6[0],ymm2[2],ymm6[2]
8146 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm2 = ymm6[3,1],ymm2[0,2],ymm6[7,5],ymm2[4,6]
8147 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5],ymm2[6,7]
8148 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
8149 ; AVX1-ONLY-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm2 # 16-byte Folded Reload
8150 ; AVX1-ONLY-NEXT: # xmm2 = xmm2[3,3],mem[3,3]
8151 ; AVX1-ONLY-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm2 # 16-byte Folded Reload
8152 ; AVX1-ONLY-NEXT: # xmm2 = xmm2[0,1,2],mem[3]
8153 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm7 = ymm0[0],ymm2[1,2,3],ymm0[4,5,6,7]
8154 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8155 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} ymm0 = ymm0[0],ymm4[0],ymm0[1],ymm4[1],ymm0[4],ymm4[4],ymm0[5],ymm4[5]
8156 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
8157 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
8158 ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm2 = ymm2[0],ymm4[0],ymm2[2],ymm4[2]
8159 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm2 = ymm4[3,1],ymm2[0,2],ymm4[7,5],ymm2[4,6]
8160 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5],ymm2[6,7]
8161 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
8162 ; AVX1-ONLY-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm2 # 16-byte Folded Reload
8163 ; AVX1-ONLY-NEXT: # xmm2 = xmm2[3,3],mem[3,3]
8164 ; AVX1-ONLY-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm2 # 16-byte Folded Reload
8165 ; AVX1-ONLY-NEXT: # xmm2 = xmm2[0,1,2],mem[3]
8166 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm2[1,2,3],ymm0[4,5,6,7]
8167 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} ymm2 = ymm12[0],ymm1[0],ymm12[1],ymm1[1],ymm12[4],ymm1[4],ymm12[5],ymm1[5]
8168 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
8169 ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm12 = ymm4[0],ymm14[0],ymm4[2],ymm14[2]
8170 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm12 = ymm14[3,1],ymm12[0,2],ymm14[7,5],ymm12[4,6]
8171 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm12[0,1,2,3],ymm2[4,5],ymm12[6,7]
8172 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
8173 ; AVX1-ONLY-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm12 # 16-byte Folded Reload
8174 ; AVX1-ONLY-NEXT: # xmm12 = xmm1[3,3],mem[3,3]
8175 ; AVX1-ONLY-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm12, %xmm12 # 16-byte Folded Reload
8176 ; AVX1-ONLY-NEXT: # xmm12 = xmm12[0,1,2],mem[3]
8177 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0],ymm12[1,2,3],ymm2[4,5,6,7]
8178 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} ymm12 = ymm9[0],ymm10[0],ymm9[1],ymm10[1],ymm9[4],ymm10[4],ymm9[5],ymm10[5]
8179 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8180 ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm14 = ymm11[0],ymm1[0],ymm11[2],ymm1[2]
8181 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm10 = ymm1[3,1],ymm14[0,2],ymm1[7,5],ymm14[4,6]
8182 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0,1,2,3],ymm12[4,5],ymm10[6,7]
8183 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm12 = xmm13[3,3],xmm15[3,3]
8184 ; AVX1-ONLY-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm12, %xmm12 # 16-byte Folded Reload
8185 ; AVX1-ONLY-NEXT: # xmm12 = xmm12[0,1,2],mem[3]
8186 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0],ymm12[1,2,3],ymm10[4,5,6,7]
8187 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8188 ; AVX1-ONLY-NEXT: vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm12 # 32-byte Folded Reload
8189 ; AVX1-ONLY-NEXT: # ymm12 = ymm1[0],mem[0],ymm1[1],mem[1],ymm1[4],mem[4],ymm1[5],mem[5]
8190 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8191 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
8192 ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm14 = ymm4[0],ymm1[0],ymm4[2],ymm1[2]
8193 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm9 = ymm1[3,1],ymm14[0,2],ymm1[7,5],ymm14[4,6]
8194 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm9 = ymm9[0,1,2,3],ymm12[4,5],ymm9[6,7]
8195 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm12 = xmm5[3,3],xmm8[3,3]
8196 ; AVX1-ONLY-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm12, %xmm12 # 16-byte Folded Reload
8197 ; AVX1-ONLY-NEXT: # xmm12 = xmm12[0,1,2],mem[3]
8198 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm9 = ymm9[0],ymm12[1,2,3],ymm9[4,5,6,7]
8199 ; AVX1-ONLY-NEXT: movq {{[0-9]+}}(%rsp), %rax
8200 ; AVX1-ONLY-NEXT: vmovaps %ymm9, 1440(%rax)
8201 ; AVX1-ONLY-NEXT: vmovaps %ymm10, 1216(%rax)
8202 ; AVX1-ONLY-NEXT: vmovaps %ymm2, 992(%rax)
8203 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 768(%rax)
8204 ; AVX1-ONLY-NEXT: vmovaps %ymm7, 544(%rax)
8205 ; AVX1-ONLY-NEXT: vmovaps %ymm3, 320(%rax)
8206 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8207 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 96(%rax)
8208 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8209 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 1504(%rax)
8210 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8211 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 1408(%rax)
8212 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8213 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 1312(%rax)
8214 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8215 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 1280(%rax)
8216 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8217 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 1184(%rax)
8218 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8219 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 1088(%rax)
8220 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8221 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 1056(%rax)
8222 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8223 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 960(%rax)
8224 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8225 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 864(%rax)
8226 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8227 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 832(%rax)
8228 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8229 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 736(%rax)
8230 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8231 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 640(%rax)
8232 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8233 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 608(%rax)
8234 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8235 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 512(%rax)
8236 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8237 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 416(%rax)
8238 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8239 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 384(%rax)
8240 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8241 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 288(%rax)
8242 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8243 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 192(%rax)
8244 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8245 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 160(%rax)
8246 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8247 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 64(%rax)
8248 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8249 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 1696(%rax)
8250 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8251 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 1664(%rax)
8252 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8253 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 1472(%rax)
8254 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8255 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 1376(%rax)
8256 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8257 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 1344(%rax)
8258 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8259 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 1248(%rax)
8260 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8261 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 1152(%rax)
8262 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8263 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 1120(%rax)
8264 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8265 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 1024(%rax)
8266 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8267 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 928(%rax)
8268 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8269 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 896(%rax)
8270 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8271 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 800(%rax)
8272 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8273 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 704(%rax)
8274 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8275 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 672(%rax)
8276 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8277 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 576(%rax)
8278 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8279 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 480(%rax)
8280 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8281 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 448(%rax)
8282 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8283 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 352(%rax)
8284 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8285 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 256(%rax)
8286 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8287 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 224(%rax)
8288 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8289 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 128(%rax)
8290 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8291 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 32(%rax)
8292 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8293 ; AVX1-ONLY-NEXT: vmovaps %ymm0, (%rax)
8294 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8295 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 1760(%rax)
8296 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8297 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 1728(%rax)
8298 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8299 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 1568(%rax)
8300 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8301 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 1536(%rax)
8302 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8303 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 1632(%rax)
8304 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8305 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 1600(%rax)
8306 ; AVX1-ONLY-NEXT: addq $3416, %rsp # imm = 0xD58
8307 ; AVX1-ONLY-NEXT: vzeroupper
8308 ; AVX1-ONLY-NEXT: retq
8310 ; AVX2-SLOW-LABEL: store_i32_stride7_vf64:
8311 ; AVX2-SLOW: # %bb.0:
8312 ; AVX2-SLOW-NEXT: subq $3000, %rsp # imm = 0xBB8
8313 ; AVX2-SLOW-NEXT: movq {{[0-9]+}}(%rsp), %rax
8314 ; AVX2-SLOW-NEXT: vmovaps (%rax), %xmm0
8315 ; AVX2-SLOW-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8316 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
8317 ; AVX2-SLOW-NEXT: vmovaps (%r8), %xmm2
8318 ; AVX2-SLOW-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8319 ; AVX2-SLOW-NEXT: vmovaps 32(%r8), %xmm4
8320 ; AVX2-SLOW-NEXT: vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8321 ; AVX2-SLOW-NEXT: vmovaps (%r9), %xmm1
8322 ; AVX2-SLOW-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8323 ; AVX2-SLOW-NEXT: vmovaps 32(%r9), %xmm7
8324 ; AVX2-SLOW-NEXT: vmovaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8325 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,1,1,1]
8326 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2,3]
8327 ; AVX2-SLOW-NEXT: vbroadcastsd %xmm1, %ymm1
8328 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5],ymm1[6,7]
8329 ; AVX2-SLOW-NEXT: vmovaps (%rcx), %xmm9
8330 ; AVX2-SLOW-NEXT: vmovaps 32(%rcx), %xmm3
8331 ; AVX2-SLOW-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8332 ; AVX2-SLOW-NEXT: vmovaps (%rdx), %xmm8
8333 ; AVX2-SLOW-NEXT: vinsertps {{.*#+}} xmm1 = zero,xmm8[1],xmm9[1],zero
8334 ; AVX2-SLOW-NEXT: vmovaps (%rdi), %xmm6
8335 ; AVX2-SLOW-NEXT: vmovaps 32(%rdi), %xmm10
8336 ; AVX2-SLOW-NEXT: vmovaps %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8337 ; AVX2-SLOW-NEXT: vmovaps (%rsi), %xmm5
8338 ; AVX2-SLOW-NEXT: vmovaps 32(%rsi), %xmm11
8339 ; AVX2-SLOW-NEXT: vmovaps %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8340 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm2 = xmm5[1,1,2,2]
8341 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm2 = xmm2[0,1],xmm6[2],xmm2[3]
8342 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,1,2,1]
8343 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0],ymm1[1,2],ymm2[3,4,5,6,7]
8344 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
8345 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8346 ; AVX2-SLOW-NEXT: vmovaps 32(%rax), %xmm0
8347 ; AVX2-SLOW-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8348 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
8349 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm7[1,1,1,1]
8350 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0],xmm4[1],xmm1[2,3]
8351 ; AVX2-SLOW-NEXT: vbroadcastsd %xmm1, %ymm1
8352 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5],ymm1[6,7]
8353 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm11[1,1,2,2]
8354 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm10[2],xmm1[3]
8355 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1]
8356 ; AVX2-SLOW-NEXT: vmovaps 32(%rdx), %xmm7
8357 ; AVX2-SLOW-NEXT: vinsertps {{.*#+}} xmm2 = zero,xmm7[1],xmm3[1],zero
8358 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2],ymm1[3,4,5,6,7]
8359 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
8360 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8361 ; AVX2-SLOW-NEXT: vmovaps 64(%r8), %xmm1
8362 ; AVX2-SLOW-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8363 ; AVX2-SLOW-NEXT: vmovaps 64(%r9), %xmm0
8364 ; AVX2-SLOW-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8365 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
8366 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
8367 ; AVX2-SLOW-NEXT: vbroadcastsd %xmm0, %ymm0
8368 ; AVX2-SLOW-NEXT: vmovaps 64(%rax), %xmm1
8369 ; AVX2-SLOW-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8370 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
8371 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
8372 ; AVX2-SLOW-NEXT: vmovaps 64(%rdi), %xmm2
8373 ; AVX2-SLOW-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8374 ; AVX2-SLOW-NEXT: vmovaps 64(%rsi), %xmm1
8375 ; AVX2-SLOW-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8376 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,1,2,2]
8377 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3]
8378 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1]
8379 ; AVX2-SLOW-NEXT: vmovaps 64(%rcx), %xmm2
8380 ; AVX2-SLOW-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8381 ; AVX2-SLOW-NEXT: vmovaps 64(%rdx), %xmm11
8382 ; AVX2-SLOW-NEXT: vinsertps {{.*#+}} xmm2 = zero,xmm11[1],xmm2[1],zero
8383 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2],ymm1[3,4,5,6,7]
8384 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
8385 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8386 ; AVX2-SLOW-NEXT: vmovaps 96(%r8), %xmm1
8387 ; AVX2-SLOW-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8388 ; AVX2-SLOW-NEXT: vmovaps 96(%r9), %xmm0
8389 ; AVX2-SLOW-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8390 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
8391 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
8392 ; AVX2-SLOW-NEXT: vbroadcastsd %xmm0, %ymm0
8393 ; AVX2-SLOW-NEXT: vmovaps 96(%rax), %xmm1
8394 ; AVX2-SLOW-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8395 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
8396 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
8397 ; AVX2-SLOW-NEXT: vmovaps 96(%rdi), %xmm2
8398 ; AVX2-SLOW-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8399 ; AVX2-SLOW-NEXT: vmovaps 96(%rsi), %xmm1
8400 ; AVX2-SLOW-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8401 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,1,2,2]
8402 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3]
8403 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1]
8404 ; AVX2-SLOW-NEXT: vmovaps 96(%rcx), %xmm3
8405 ; AVX2-SLOW-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8406 ; AVX2-SLOW-NEXT: vmovaps 96(%rdx), %xmm2
8407 ; AVX2-SLOW-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8408 ; AVX2-SLOW-NEXT: vinsertps {{.*#+}} xmm2 = zero,xmm2[1],xmm3[1],zero
8409 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2],ymm1[3,4,5,6,7]
8410 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
8411 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8412 ; AVX2-SLOW-NEXT: vmovaps 128(%r8), %xmm1
8413 ; AVX2-SLOW-NEXT: vmovaps %xmm1, (%rsp) # 16-byte Spill
8414 ; AVX2-SLOW-NEXT: vmovaps 128(%r9), %xmm0
8415 ; AVX2-SLOW-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8416 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
8417 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
8418 ; AVX2-SLOW-NEXT: vbroadcastsd %xmm0, %ymm0
8419 ; AVX2-SLOW-NEXT: vmovaps 128(%rax), %xmm1
8420 ; AVX2-SLOW-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8421 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
8422 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
8423 ; AVX2-SLOW-NEXT: vmovaps 128(%rdi), %xmm2
8424 ; AVX2-SLOW-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8425 ; AVX2-SLOW-NEXT: vmovaps 128(%rsi), %xmm1
8426 ; AVX2-SLOW-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8427 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,1,2,2]
8428 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3]
8429 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1]
8430 ; AVX2-SLOW-NEXT: vmovaps 128(%rcx), %xmm3
8431 ; AVX2-SLOW-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8432 ; AVX2-SLOW-NEXT: vmovaps 128(%rdx), %xmm2
8433 ; AVX2-SLOW-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8434 ; AVX2-SLOW-NEXT: vinsertps {{.*#+}} xmm2 = zero,xmm2[1],xmm3[1],zero
8435 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2],ymm1[3,4,5,6,7]
8436 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
8437 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8438 ; AVX2-SLOW-NEXT: vmovaps 160(%r8), %xmm1
8439 ; AVX2-SLOW-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8440 ; AVX2-SLOW-NEXT: vmovaps 160(%r9), %xmm0
8441 ; AVX2-SLOW-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8442 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
8443 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
8444 ; AVX2-SLOW-NEXT: vbroadcastsd %xmm0, %ymm0
8445 ; AVX2-SLOW-NEXT: vmovaps 160(%rax), %xmm1
8446 ; AVX2-SLOW-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8447 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
8448 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
8449 ; AVX2-SLOW-NEXT: vmovaps 160(%rdi), %xmm2
8450 ; AVX2-SLOW-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8451 ; AVX2-SLOW-NEXT: vmovaps 160(%rsi), %xmm1
8452 ; AVX2-SLOW-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8453 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,1,2,2]
8454 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3]
8455 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1]
8456 ; AVX2-SLOW-NEXT: vmovaps 160(%rcx), %xmm3
8457 ; AVX2-SLOW-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8458 ; AVX2-SLOW-NEXT: vmovaps 160(%rdx), %xmm2
8459 ; AVX2-SLOW-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8460 ; AVX2-SLOW-NEXT: vinsertps {{.*#+}} xmm2 = zero,xmm2[1],xmm3[1],zero
8461 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2],ymm1[3,4,5,6,7]
8462 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
8463 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8464 ; AVX2-SLOW-NEXT: vmovaps 192(%r9), %xmm0
8465 ; AVX2-SLOW-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8466 ; AVX2-SLOW-NEXT: vmovaps 192(%r8), %xmm1
8467 ; AVX2-SLOW-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8468 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
8469 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
8470 ; AVX2-SLOW-NEXT: vbroadcastsd %xmm0, %ymm0
8471 ; AVX2-SLOW-NEXT: vmovaps 192(%rax), %xmm1
8472 ; AVX2-SLOW-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8473 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
8474 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
8475 ; AVX2-SLOW-NEXT: vmovaps 192(%rdi), %xmm2
8476 ; AVX2-SLOW-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8477 ; AVX2-SLOW-NEXT: vmovaps 192(%rsi), %xmm1
8478 ; AVX2-SLOW-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8479 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,1,2,2]
8480 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3]
8481 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1]
8482 ; AVX2-SLOW-NEXT: vmovaps 192(%rcx), %xmm3
8483 ; AVX2-SLOW-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8484 ; AVX2-SLOW-NEXT: vmovaps 192(%rdx), %xmm2
8485 ; AVX2-SLOW-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8486 ; AVX2-SLOW-NEXT: vinsertps {{.*#+}} xmm2 = zero,xmm2[1],xmm3[1],zero
8487 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2],ymm1[3,4,5,6,7]
8488 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
8489 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8490 ; AVX2-SLOW-NEXT: vmovaps (%rdi), %ymm0
8491 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8492 ; AVX2-SLOW-NEXT: vmovaps (%rsi), %ymm1
8493 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8494 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
8495 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
8496 ; AVX2-SLOW-NEXT: vmovaps (%rdx), %ymm2
8497 ; AVX2-SLOW-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8498 ; AVX2-SLOW-NEXT: vmovaps (%rcx), %ymm1
8499 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8500 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm1 = ymm2[2],ymm1[2],ymm2[3],ymm1[3],ymm2[6],ymm1[6],ymm2[7],ymm1[7]
8501 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
8502 ; AVX2-SLOW-NEXT: vmovaps (%r8), %ymm2
8503 ; AVX2-SLOW-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8504 ; AVX2-SLOW-NEXT: vmovaps (%r9), %ymm1
8505 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8506 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm1 = ymm1[1,1,2,2,5,5,6,6]
8507 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4,5],ymm2[6],ymm1[7]
8508 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
8509 ; AVX2-SLOW-NEXT: vmovaps 16(%rax), %xmm2
8510 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7]
8511 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
8512 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8513 ; AVX2-SLOW-NEXT: vmovaps 32(%rdi), %ymm1
8514 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8515 ; AVX2-SLOW-NEXT: vmovaps 32(%rsi), %ymm0
8516 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8517 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm0 = ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[6],ymm0[6],ymm1[7],ymm0[7]
8518 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
8519 ; AVX2-SLOW-NEXT: vmovaps 32(%rdx), %ymm1
8520 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8521 ; AVX2-SLOW-NEXT: vmovaps 32(%rcx), %ymm2
8522 ; AVX2-SLOW-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8523 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm1 = ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[6],ymm2[6],ymm1[7],ymm2[7]
8524 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
8525 ; AVX2-SLOW-NEXT: vmovaps 32(%r8), %ymm2
8526 ; AVX2-SLOW-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8527 ; AVX2-SLOW-NEXT: vmovaps 32(%r9), %ymm1
8528 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8529 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm1 = ymm1[1,1,2,2,5,5,6,6]
8530 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4,5],ymm2[6],ymm1[7]
8531 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
8532 ; AVX2-SLOW-NEXT: vmovaps 48(%rax), %xmm2
8533 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7]
8534 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
8535 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8536 ; AVX2-SLOW-NEXT: vmovaps 64(%rdi), %ymm1
8537 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8538 ; AVX2-SLOW-NEXT: vmovaps 64(%rsi), %ymm0
8539 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8540 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm0 = ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[6],ymm0[6],ymm1[7],ymm0[7]
8541 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
8542 ; AVX2-SLOW-NEXT: vmovaps 64(%rdx), %ymm1
8543 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8544 ; AVX2-SLOW-NEXT: vmovaps 64(%rcx), %ymm2
8545 ; AVX2-SLOW-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8546 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm1 = ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[6],ymm2[6],ymm1[7],ymm2[7]
8547 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
8548 ; AVX2-SLOW-NEXT: vmovaps 64(%r8), %ymm2
8549 ; AVX2-SLOW-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8550 ; AVX2-SLOW-NEXT: vmovaps 64(%r9), %ymm1
8551 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8552 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm1 = ymm1[1,1,2,2,5,5,6,6]
8553 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4,5],ymm2[6],ymm1[7]
8554 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
8555 ; AVX2-SLOW-NEXT: vmovaps 80(%rax), %xmm2
8556 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7]
8557 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
8558 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8559 ; AVX2-SLOW-NEXT: vmovaps 96(%rdi), %ymm1
8560 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8561 ; AVX2-SLOW-NEXT: vmovaps 96(%rsi), %ymm0
8562 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8563 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm0 = ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[6],ymm0[6],ymm1[7],ymm0[7]
8564 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
8565 ; AVX2-SLOW-NEXT: vmovaps 96(%rdx), %ymm1
8566 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8567 ; AVX2-SLOW-NEXT: vmovaps 96(%rcx), %ymm2
8568 ; AVX2-SLOW-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8569 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm1 = ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[6],ymm2[6],ymm1[7],ymm2[7]
8570 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
8571 ; AVX2-SLOW-NEXT: vmovaps 96(%r8), %ymm2
8572 ; AVX2-SLOW-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8573 ; AVX2-SLOW-NEXT: vmovaps 96(%r9), %ymm1
8574 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8575 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm1 = ymm1[1,1,2,2,5,5,6,6]
8576 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4,5],ymm2[6],ymm1[7]
8577 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
8578 ; AVX2-SLOW-NEXT: vmovaps 112(%rax), %xmm2
8579 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7]
8580 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
8581 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8582 ; AVX2-SLOW-NEXT: vmovaps 128(%rdi), %ymm1
8583 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8584 ; AVX2-SLOW-NEXT: vmovaps 128(%rsi), %ymm0
8585 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8586 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm0 = ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[6],ymm0[6],ymm1[7],ymm0[7]
8587 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
8588 ; AVX2-SLOW-NEXT: vmovaps 128(%rdx), %ymm1
8589 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8590 ; AVX2-SLOW-NEXT: vmovaps 128(%rcx), %ymm2
8591 ; AVX2-SLOW-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8592 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm1 = ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[6],ymm2[6],ymm1[7],ymm2[7]
8593 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
8594 ; AVX2-SLOW-NEXT: vmovaps 128(%r8), %ymm2
8595 ; AVX2-SLOW-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8596 ; AVX2-SLOW-NEXT: vmovaps 128(%r9), %ymm1
8597 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8598 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm1 = ymm1[1,1,2,2,5,5,6,6]
8599 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4,5],ymm2[6],ymm1[7]
8600 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
8601 ; AVX2-SLOW-NEXT: vmovaps 144(%rax), %xmm2
8602 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7]
8603 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
8604 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8605 ; AVX2-SLOW-NEXT: vmovaps 160(%rdi), %ymm1
8606 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8607 ; AVX2-SLOW-NEXT: vmovaps 160(%rsi), %ymm0
8608 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8609 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm0 = ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[6],ymm0[6],ymm1[7],ymm0[7]
8610 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
8611 ; AVX2-SLOW-NEXT: vmovaps 160(%rdx), %ymm1
8612 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8613 ; AVX2-SLOW-NEXT: vmovaps 160(%rcx), %ymm2
8614 ; AVX2-SLOW-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8615 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm1 = ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[6],ymm2[6],ymm1[7],ymm2[7]
8616 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
8617 ; AVX2-SLOW-NEXT: vmovaps 160(%r8), %ymm2
8618 ; AVX2-SLOW-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8619 ; AVX2-SLOW-NEXT: vmovaps 160(%r9), %ymm1
8620 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8621 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm1 = ymm1[1,1,2,2,5,5,6,6]
8622 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4,5],ymm2[6],ymm1[7]
8623 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
8624 ; AVX2-SLOW-NEXT: vmovaps 176(%rax), %xmm2
8625 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7]
8626 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
8627 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8628 ; AVX2-SLOW-NEXT: vmovaps 192(%rdi), %ymm0
8629 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8630 ; AVX2-SLOW-NEXT: vmovaps 192(%rsi), %ymm1
8631 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8632 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
8633 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
8634 ; AVX2-SLOW-NEXT: vmovaps 192(%rdx), %ymm1
8635 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8636 ; AVX2-SLOW-NEXT: vmovaps 192(%rcx), %ymm2
8637 ; AVX2-SLOW-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8638 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm1 = ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[6],ymm2[6],ymm1[7],ymm2[7]
8639 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
8640 ; AVX2-SLOW-NEXT: vmovaps 192(%r8), %ymm2
8641 ; AVX2-SLOW-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8642 ; AVX2-SLOW-NEXT: vmovaps 192(%r9), %ymm1
8643 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8644 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm1 = ymm1[1,1,2,2,5,5,6,6]
8645 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4,5],ymm2[6],ymm1[7]
8646 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
8647 ; AVX2-SLOW-NEXT: vmovaps 208(%rax), %xmm2
8648 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7]
8649 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
8650 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8651 ; AVX2-SLOW-NEXT: vmovaps 224(%rcx), %xmm0
8652 ; AVX2-SLOW-NEXT: vbroadcastss %xmm0, %xmm2
8653 ; AVX2-SLOW-NEXT: vmovaps 224(%rdx), %xmm1
8654 ; AVX2-SLOW-NEXT: vbroadcastss %xmm1, %xmm3
8655 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
8656 ; AVX2-SLOW-NEXT: vmovaps 224(%rsi), %xmm4
8657 ; AVX2-SLOW-NEXT: vmovaps 224(%rdi), %xmm2
8658 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} xmm15 = xmm2[0],xmm4[0],xmm2[1],xmm4[1]
8659 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm15 = xmm15[0,1,2,2]
8660 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm15 = ymm15[0,1,2,1]
8661 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm3 = ymm15[0,1],ymm3[2,3],ymm15[4,5,6,7]
8662 ; AVX2-SLOW-NEXT: vbroadcastsd 224(%r8), %ymm15
8663 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm15 = ymm3[0,1,2,3],ymm15[4,5],ymm3[6,7]
8664 ; AVX2-SLOW-NEXT: vmovaps 224(%r9), %xmm3
8665 ; AVX2-SLOW-NEXT: vbroadcastss %xmm3, %ymm14
8666 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm14 = ymm15[0,1,2,3,4],ymm14[5],ymm15[6,7]
8667 ; AVX2-SLOW-NEXT: vbroadcastss 224(%rax), %ymm15
8668 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm10 = ymm14[0,1,2,3,4,5],ymm15[6],ymm14[7]
8669 ; AVX2-SLOW-NEXT: vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8670 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm14 = xmm4[1,1,2,2]
8671 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm14 = xmm14[0,1],xmm2[2],xmm14[3]
8672 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm14 = ymm14[0,1,2,1]
8673 ; AVX2-SLOW-NEXT: vinsertps {{.*#+}} xmm15 = zero,xmm1[1],xmm0[1],zero
8674 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm14 = ymm14[0],ymm15[1,2],ymm14[3,4,5,6,7]
8675 ; AVX2-SLOW-NEXT: vbroadcastss 228(%r8), %ymm13
8676 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm13 = ymm14[0,1,2],ymm13[3],ymm14[4,5,6,7]
8677 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm14 = xmm3[1,1,1,1]
8678 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm14, %ymm0, %ymm14
8679 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm13 = ymm13[0,1,2,3],ymm14[4,5],ymm13[6,7]
8680 ; AVX2-SLOW-NEXT: vinsertf128 $1, 224(%rax), %ymm15, %ymm14
8681 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm10 = ymm13[0],ymm14[1],ymm13[2,3,4],ymm14[5],ymm13[6,7]
8682 ; AVX2-SLOW-NEXT: vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8683 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm2 = xmm2[3,3],xmm4[3,3]
8684 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} xmm0 = xmm1[2],xmm0[2],xmm1[3],xmm0[3]
8685 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm1
8686 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,1,2,2]
8687 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,2,1]
8688 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6],ymm0[7]
8689 ; AVX2-SLOW-NEXT: vmovaps 224(%r8), %ymm1
8690 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8691 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
8692 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm3[2,2,2,2]
8693 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3],ymm0[4,5,6,7]
8694 ; AVX2-SLOW-NEXT: vbroadcastss 232(%rax), %ymm1
8695 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4],ymm0[5,6,7]
8696 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8697 ; AVX2-SLOW-NEXT: vmovaps 224(%rdi), %ymm12
8698 ; AVX2-SLOW-NEXT: vmovaps 224(%rsi), %ymm10
8699 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm0 = ymm10[1,1,1,1,5,5,5,5]
8700 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm12[1],ymm0[2,3,4],ymm12[5],ymm0[6,7]
8701 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm13 = ymm0[2,2,2,2]
8702 ; AVX2-SLOW-NEXT: vmovaps 224(%rdx), %ymm0
8703 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8704 ; AVX2-SLOW-NEXT: vmovaps 224(%rcx), %ymm2
8705 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm14 = ymm0[1,1],ymm2[1,1],ymm0[5,5],ymm2[5,5]
8706 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm13 = ymm13[0,1,2,3,4],ymm14[5,6],ymm13[7]
8707 ; AVX2-SLOW-NEXT: vbroadcastsd 240(%r8), %ymm14
8708 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm13 = ymm14[0],ymm13[1,2,3,4,5,6],ymm14[7]
8709 ; AVX2-SLOW-NEXT: vbroadcastss 240(%r9), %xmm14
8710 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm13 = ymm13[0],ymm14[1],ymm13[2,3,4,5,6,7]
8711 ; AVX2-SLOW-NEXT: vbroadcastss 240(%rax), %ymm14
8712 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm13[0,1],ymm14[2],ymm13[3,4,5,6,7]
8713 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8714 ; AVX2-SLOW-NEXT: vbroadcastss %xmm9, %xmm13
8715 ; AVX2-SLOW-NEXT: vbroadcastss %xmm8, %xmm14
8716 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} xmm13 = xmm14[0],xmm13[0],xmm14[1],xmm13[1]
8717 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} xmm14 = xmm6[0],xmm5[0],xmm6[1],xmm5[1]
8718 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm14 = xmm14[0,1,2,2]
8719 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm14 = ymm14[0,1,2,1]
8720 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm14[0,1],ymm13[2,3],ymm14[4,5,6,7]
8721 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
8722 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
8723 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} xmm14 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
8724 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm14, %ymm0, %ymm14
8725 ; AVX2-SLOW-NEXT: vbroadcastsd {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 16-byte Folded Reload
8726 ; AVX2-SLOW-NEXT: vunpcklpd {{.*#+}} ymm14 = ymm14[0],ymm15[0],ymm14[2],ymm15[2]
8727 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm14[4,5,6],ymm1[7]
8728 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8729 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm5 = xmm6[3,3],xmm5[3,3]
8730 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} xmm6 = xmm8[2],xmm9[2],xmm8[3],xmm9[3]
8731 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm5, %ymm0, %ymm5
8732 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm6 = xmm6[0,1,2,2]
8733 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[0,1,2,1]
8734 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm5 = ymm6[0,1,2,3,4],ymm5[5,6],ymm6[7]
8735 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm6 = xmm4[2,2,2,2]
8736 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm6 = xmm0[0,1,2],xmm6[3]
8737 ; AVX2-SLOW-NEXT: vbroadcastsd 8(%rax), %ymm8
8738 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm8[4,5,6,7]
8739 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm5[0,1],ymm6[2,3,4],ymm5[5,6,7]
8740 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8741 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
8742 ; AVX2-SLOW-NEXT: vbroadcastss %xmm1, %xmm5
8743 ; AVX2-SLOW-NEXT: vbroadcastss %xmm7, %xmm6
8744 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} xmm5 = xmm6[0],xmm5[0],xmm6[1],xmm5[1]
8745 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
8746 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
8747 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} xmm6 = xmm4[0],xmm14[0],xmm4[1],xmm14[1]
8748 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm6 = xmm6[0,1,2,2]
8749 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[0,1,2,1]
8750 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm5 = ymm6[0,1],ymm5[2,3],ymm6[4,5,6,7]
8751 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
8752 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
8753 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} xmm6 = xmm9[0],xmm13[0],xmm9[1],xmm13[1]
8754 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm6, %ymm0, %ymm6
8755 ; AVX2-SLOW-NEXT: vbroadcastsd {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 16-byte Folded Reload
8756 ; AVX2-SLOW-NEXT: vunpcklpd {{.*#+}} ymm6 = ymm6[0],ymm8[0],ymm6[2],ymm8[2]
8757 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm5[0,1,2,3],ymm6[4,5,6],ymm5[7]
8758 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8759 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm5 = xmm4[3,3],xmm14[3,3]
8760 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} xmm6 = xmm7[2],xmm1[2],xmm7[3],xmm1[3]
8761 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm5, %ymm0, %ymm5
8762 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm6 = xmm6[0,1,2,2]
8763 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[0,1,2,1]
8764 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm5 = ymm6[0,1,2,3,4],ymm5[5,6],ymm6[7]
8765 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm6 = xmm13[2,2,2,2]
8766 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm6 = xmm9[0,1,2],xmm6[3]
8767 ; AVX2-SLOW-NEXT: vbroadcastsd 40(%rax), %ymm7
8768 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm7[4,5,6,7]
8769 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm5[0,1],ymm6[2,3,4],ymm5[5,6,7]
8770 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8771 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
8772 ; AVX2-SLOW-NEXT: vbroadcastss %xmm13, %xmm5
8773 ; AVX2-SLOW-NEXT: vbroadcastss %xmm11, %xmm6
8774 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} xmm5 = xmm6[0],xmm5[0],xmm6[1],xmm5[1]
8775 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
8776 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
8777 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} xmm6 = xmm9[0],xmm8[0],xmm9[1],xmm8[1]
8778 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm6 = xmm6[0,1,2,2]
8779 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[0,1,2,1]
8780 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm5 = ymm6[0,1],ymm5[2,3],ymm6[4,5,6,7]
8781 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
8782 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
8783 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} xmm6 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
8784 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm6, %ymm0, %ymm6
8785 ; AVX2-SLOW-NEXT: vbroadcastsd {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 16-byte Folded Reload
8786 ; AVX2-SLOW-NEXT: vunpcklpd {{.*#+}} ymm6 = ymm6[0],ymm7[0],ymm6[2],ymm7[2]
8787 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0,1,2,3],ymm6[4,5,6],ymm5[7]
8788 ; AVX2-SLOW-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8789 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm5 = xmm9[3,3],xmm8[3,3]
8790 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} xmm6 = xmm11[2],xmm13[2],xmm11[3],xmm13[3]
8791 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm5, %ymm0, %ymm5
8792 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm6 = xmm6[0,1,2,2]
8793 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[0,1,2,1]
8794 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm5 = ymm6[0,1,2,3,4],ymm5[5,6],ymm6[7]
8795 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm6 = xmm0[2,2,2,2]
8796 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm6 = xmm1[0,1,2],xmm6[3]
8797 ; AVX2-SLOW-NEXT: vbroadcastsd 72(%rax), %ymm7
8798 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm7[4,5,6,7]
8799 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm5[0,1],ymm6[2,3,4],ymm5[5,6,7]
8800 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8801 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
8802 ; AVX2-SLOW-NEXT: vbroadcastss %xmm9, %xmm5
8803 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
8804 ; AVX2-SLOW-NEXT: vbroadcastss %xmm13, %xmm6
8805 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} xmm5 = xmm6[0],xmm5[0],xmm6[1],xmm5[1]
8806 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
8807 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
8808 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} xmm6 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
8809 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm6 = xmm6[0,1,2,2]
8810 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[0,1,2,1]
8811 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm5 = ymm6[0,1],ymm5[2,3],ymm6[4,5,6,7]
8812 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
8813 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
8814 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} xmm6 = xmm8[0],xmm11[0],xmm8[1],xmm11[1]
8815 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm6, %ymm0, %ymm6
8816 ; AVX2-SLOW-NEXT: vbroadcastsd {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 16-byte Folded Reload
8817 ; AVX2-SLOW-NEXT: vunpcklpd {{.*#+}} ymm6 = ymm6[0],ymm7[0],ymm6[2],ymm7[2]
8818 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm5[0,1,2,3],ymm6[4,5,6],ymm5[7]
8819 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8820 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm5 = xmm4[3,3],xmm1[3,3]
8821 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} xmm6 = xmm13[2],xmm9[2],xmm13[3],xmm9[3]
8822 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm5, %ymm0, %ymm5
8823 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm6 = xmm6[0,1,2,2]
8824 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[0,1,2,1]
8825 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm5 = ymm6[0,1,2,3,4],ymm5[5,6],ymm6[7]
8826 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm6 = xmm11[2,2,2,2]
8827 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm6 = xmm8[0,1,2],xmm6[3]
8828 ; AVX2-SLOW-NEXT: vbroadcastsd 104(%rax), %ymm7
8829 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm7[4,5,6,7]
8830 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm5[0,1],ymm6[2,3,4],ymm5[5,6,7]
8831 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8832 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
8833 ; AVX2-SLOW-NEXT: vbroadcastss %xmm9, %xmm5
8834 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
8835 ; AVX2-SLOW-NEXT: vbroadcastss %xmm13, %xmm6
8836 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} xmm5 = xmm6[0],xmm5[0],xmm6[1],xmm5[1]
8837 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
8838 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
8839 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} xmm6 = xmm8[0],xmm4[0],xmm8[1],xmm4[1]
8840 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm6 = xmm6[0,1,2,2]
8841 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[0,1,2,1]
8842 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm5 = ymm6[0,1],ymm5[2,3],ymm6[4,5,6,7]
8843 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
8844 ; AVX2-SLOW-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
8845 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} xmm6 = xmm0[0],xmm11[0],xmm0[1],xmm11[1]
8846 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm6, %ymm0, %ymm6
8847 ; AVX2-SLOW-NEXT: vbroadcastsd {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 16-byte Folded Reload
8848 ; AVX2-SLOW-NEXT: vunpcklpd {{.*#+}} ymm6 = ymm6[0],ymm7[0],ymm6[2],ymm7[2]
8849 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm5[0,1,2,3],ymm6[4,5,6],ymm5[7]
8850 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8851 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm5 = xmm8[3,3],xmm4[3,3]
8852 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} xmm6 = xmm13[2],xmm9[2],xmm13[3],xmm9[3]
8853 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm5, %ymm0, %ymm5
8854 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm6 = xmm6[0,1,2,2]
8855 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[0,1,2,1]
8856 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm5 = ymm6[0,1,2,3,4],ymm5[5,6],ymm6[7]
8857 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm6 = xmm11[2,2,2,2]
8858 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm6 = xmm0[0,1,2],xmm6[3]
8859 ; AVX2-SLOW-NEXT: vbroadcastsd 136(%rax), %ymm7
8860 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm7[4,5,6,7]
8861 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm5[0,1],ymm6[2,3,4],ymm5[5,6,7]
8862 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8863 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
8864 ; AVX2-SLOW-NEXT: vbroadcastss %xmm9, %xmm5
8865 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
8866 ; AVX2-SLOW-NEXT: vbroadcastss %xmm13, %xmm6
8867 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} xmm5 = xmm6[0],xmm5[0],xmm6[1],xmm5[1]
8868 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
8869 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
8870 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} xmm6 = xmm8[0],xmm4[0],xmm8[1],xmm4[1]
8871 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm6 = xmm6[0,1,2,2]
8872 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[0,1,2,1]
8873 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm5 = ymm6[0,1],ymm5[2,3],ymm6[4,5,6,7]
8874 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
8875 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
8876 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} xmm6 = xmm0[0],xmm11[0],xmm0[1],xmm11[1]
8877 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm6, %ymm0, %ymm6
8878 ; AVX2-SLOW-NEXT: vbroadcastsd {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 16-byte Folded Reload
8879 ; AVX2-SLOW-NEXT: vunpcklpd {{.*#+}} ymm6 = ymm6[0],ymm7[0],ymm6[2],ymm7[2]
8880 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm5[0,1,2,3],ymm6[4,5,6],ymm5[7]
8881 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8882 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm5 = xmm8[3,3],xmm4[3,3]
8883 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} xmm6 = xmm13[2],xmm9[2],xmm13[3],xmm9[3]
8884 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm5, %ymm0, %ymm5
8885 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm6 = xmm6[0,1,2,2]
8886 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[0,1,2,1]
8887 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm5 = ymm6[0,1,2,3,4],ymm5[5,6],ymm6[7]
8888 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm6 = xmm11[2,2,2,2]
8889 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm6 = xmm0[0,1,2],xmm6[3]
8890 ; AVX2-SLOW-NEXT: vbroadcastsd 168(%rax), %ymm7
8891 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm7[4,5,6,7]
8892 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm5[0,1],ymm6[2,3,4],ymm5[5,6,7]
8893 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8894 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
8895 ; AVX2-SLOW-NEXT: vbroadcastss %xmm9, %xmm5
8896 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
8897 ; AVX2-SLOW-NEXT: vbroadcastss %xmm11, %xmm6
8898 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} xmm5 = xmm6[0],xmm5[0],xmm6[1],xmm5[1]
8899 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
8900 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
8901 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} xmm6 = xmm8[0],xmm4[0],xmm8[1],xmm4[1]
8902 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm6 = xmm6[0,1,2,2]
8903 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[0,1,2,1]
8904 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm5 = ymm6[0,1],ymm5[2,3],ymm6[4,5,6,7]
8905 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
8906 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
8907 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} xmm6 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
8908 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm6, %ymm0, %ymm6
8909 ; AVX2-SLOW-NEXT: vbroadcastsd {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 16-byte Folded Reload
8910 ; AVX2-SLOW-NEXT: vunpcklpd {{.*#+}} ymm6 = ymm6[0],ymm7[0],ymm6[2],ymm7[2]
8911 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm6[4,5,6],ymm5[7]
8912 ; AVX2-SLOW-NEXT: vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8913 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm5 = xmm8[3,3],xmm4[3,3]
8914 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} xmm6 = xmm11[2],xmm9[2],xmm11[3],xmm9[3]
8915 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm5, %ymm0, %ymm5
8916 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm6 = xmm6[0,1,2,2]
8917 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[0,1,2,1]
8918 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm5 = ymm6[0,1,2,3,4],ymm5[5,6],ymm6[7]
8919 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm6 = xmm0[2,2,2,2]
8920 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm6 = xmm1[0,1,2],xmm6[3]
8921 ; AVX2-SLOW-NEXT: vbroadcastsd 200(%rax), %ymm7
8922 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm7[4,5,6,7]
8923 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm5[0,1],ymm6[2,3,4],ymm5[5,6,7]
8924 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8925 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
8926 ; AVX2-SLOW-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm5 # 32-byte Folded Reload
8927 ; AVX2-SLOW-NEXT: # ymm5 = ymm4[2],mem[2],ymm4[3],mem[3],ymm4[6],mem[6],ymm4[7],mem[7]
8928 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[3,3,3,3]
8929 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
8930 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
8931 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm6 = ymm13[2],ymm11[2],ymm13[3],ymm11[3],ymm13[6],ymm11[6],ymm13[7],ymm11[7]
8932 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[3,3,3,3]
8933 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm5 = ymm6[0,1,2],ymm5[3,4,5,6,7]
8934 ; AVX2-SLOW-NEXT: vbroadcastss 220(%r8), %ymm6
8935 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3,4],ymm6[5],ymm5[6,7]
8936 ; AVX2-SLOW-NEXT: vbroadcastss 220(%r9), %ymm6
8937 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3,4,5],ymm6[6,7]
8938 ; AVX2-SLOW-NEXT: vbroadcastsd 216(%rax), %ymm6
8939 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm6[0],ymm5[1,2,3,4,5,6],ymm6[7]
8940 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8941 ; AVX2-SLOW-NEXT: vbroadcastss 240(%rdx), %ymm5
8942 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm6 = ymm2[3,1,2,0,7,5,6,4]
8943 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm5 = ymm6[0,1,2,3,4,5],ymm5[6],ymm6[7]
8944 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} ymm6 = ymm12[0],ymm10[0],ymm12[1],ymm10[1],ymm12[4],ymm10[4],ymm12[5],ymm10[5]
8945 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm6[4,5],ymm5[6,7]
8946 ; AVX2-SLOW-NEXT: vbroadcastss 236(%r8), %ymm6
8947 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0],ymm6[1],ymm5[2,3,4,5,6,7]
8948 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm3 = xmm3[2,2,3,3]
8949 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm3 = ymm5[0,1],ymm3[2,3],ymm5[4,5,6,7]
8950 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm5 = ymm12[2],ymm10[2],ymm12[3],ymm10[3],ymm12[6],ymm10[6],ymm12[7],ymm10[7]
8951 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[2,2,2,2]
8952 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8953 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm6 = ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[6],ymm2[6],ymm1[7],ymm2[7]
8954 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm6[4,5,6,7]
8955 ; AVX2-SLOW-NEXT: vblendps $192, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm5 # 32-byte Folded Reload
8956 ; AVX2-SLOW-NEXT: # ymm5 = ymm5[0,1,2,3,4,5],mem[6,7]
8957 ; AVX2-SLOW-NEXT: vpermilps {{.*#+}} ymm7 = mem[1,2,2,3,5,6,6,7]
8958 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm7 = ymm7[2,2,2,2]
8959 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm5 = ymm7[0],ymm5[1,2,3,4,5,6],ymm7[7]
8960 ; AVX2-SLOW-NEXT: vmovaps 224(%rax), %ymm7
8961 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm3[0,1,2],ymm7[3],ymm3[4,5,6,7]
8962 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8963 ; AVX2-SLOW-NEXT: vperm2f128 {{.*#+}} ymm3 = ymm7[2,3],ymm6[2,3]
8964 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm5[0],ymm3[1],ymm5[2,3,4],ymm3[5],ymm5[6,7]
8965 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8966 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm0 = ymm2[2],ymm1[2],ymm2[3],ymm1[3],ymm2[6],ymm1[6],ymm2[7],ymm1[7]
8967 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm1 = ymm10[2],ymm12[2],ymm10[3],ymm12[3],ymm10[6],ymm12[6],ymm10[7],ymm12[7]
8968 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,3,3,3]
8969 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[3,3,3,3]
8970 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
8971 ; AVX2-SLOW-NEXT: vbroadcastss 252(%r8), %ymm1
8972 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
8973 ; AVX2-SLOW-NEXT: vbroadcastss 252(%r9), %ymm1
8974 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
8975 ; AVX2-SLOW-NEXT: vbroadcastsd 248(%rax), %ymm1
8976 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5,6],ymm1[7]
8977 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8978 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
8979 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm0 = ymm6[1,1,1,1,5,5,5,5]
8980 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
8981 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm5[1],ymm0[2,3,4],ymm5[5],ymm0[6,7]
8982 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
8983 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
8984 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
8985 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm1 = ymm8[1,1],ymm3[1,1],ymm8[5,5],ymm3[5,5]
8986 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6],ymm0[7]
8987 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
8988 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm1 = ymm12[0,0,0,0,4,4,4,4]
8989 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
8990 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm2 = ymm9[0,1,0,1,4,5,4,5]
8991 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2,3,4],ymm1[5],ymm2[6,7]
8992 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
8993 ; AVX2-SLOW-NEXT: vbroadcastsd 16(%rax), %ymm2
8994 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3],ymm1[4,5,6,7]
8995 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6],ymm1[7]
8996 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8997 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
8998 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm0 = ymm14[1,1,1,1,5,5,5,5]
8999 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
9000 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm10[1],ymm0[2,3,4],ymm10[5],ymm0[6,7]
9001 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
9002 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
9003 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
9004 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm1 = ymm1[1,1],ymm7[1,1],ymm1[5,5],ymm7[5,5]
9005 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6],ymm0[7]
9006 ; AVX2-SLOW-NEXT: vpermilps $0, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
9007 ; AVX2-SLOW-NEXT: # ymm1 = mem[0,0,0,0,4,4,4,4]
9008 ; AVX2-SLOW-NEXT: vpermilps $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
9009 ; AVX2-SLOW-NEXT: # ymm2 = mem[0,1,0,1,4,5,4,5]
9010 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2,3,4],ymm1[5],ymm2[6,7]
9011 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
9012 ; AVX2-SLOW-NEXT: vbroadcastsd 48(%rax), %ymm2
9013 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3],ymm1[4,5,6,7]
9014 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6],ymm1[7]
9015 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9016 ; AVX2-SLOW-NEXT: vpermilps $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
9017 ; AVX2-SLOW-NEXT: # ymm0 = mem[1,1,1,1,5,5,5,5]
9018 ; AVX2-SLOW-NEXT: vblendps $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
9019 ; AVX2-SLOW-NEXT: # ymm0 = ymm0[0],mem[1],ymm0[2,3,4],mem[5],ymm0[6,7]
9020 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
9021 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
9022 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
9023 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm1 = ymm1[1,1],ymm15[1,1],ymm1[5,5],ymm15[5,5]
9024 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6],ymm0[7]
9025 ; AVX2-SLOW-NEXT: vpermilps $0, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
9026 ; AVX2-SLOW-NEXT: # ymm1 = mem[0,0,0,0,4,4,4,4]
9027 ; AVX2-SLOW-NEXT: vpermilps $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
9028 ; AVX2-SLOW-NEXT: # ymm2 = mem[0,1,0,1,4,5,4,5]
9029 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2,3,4],ymm1[5],ymm2[6,7]
9030 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
9031 ; AVX2-SLOW-NEXT: vbroadcastsd 80(%rax), %ymm2
9032 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3],ymm1[4,5,6,7]
9033 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6],ymm1[7]
9034 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9035 ; AVX2-SLOW-NEXT: vpermilps $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
9036 ; AVX2-SLOW-NEXT: # ymm0 = mem[1,1,1,1,5,5,5,5]
9037 ; AVX2-SLOW-NEXT: vblendps $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
9038 ; AVX2-SLOW-NEXT: # ymm0 = ymm0[0],mem[1],ymm0[2,3,4],mem[5],ymm0[6,7]
9039 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
9040 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
9041 ; AVX2-SLOW-NEXT: vshufps $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
9042 ; AVX2-SLOW-NEXT: # ymm1 = ymm1[1,1],mem[1,1],ymm1[5,5],mem[5,5]
9043 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6],ymm0[7]
9044 ; AVX2-SLOW-NEXT: vpermilps $0, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
9045 ; AVX2-SLOW-NEXT: # ymm1 = mem[0,0,0,0,4,4,4,4]
9046 ; AVX2-SLOW-NEXT: vpermilps $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
9047 ; AVX2-SLOW-NEXT: # ymm2 = mem[0,1,0,1,4,5,4,5]
9048 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2,3,4],ymm1[5],ymm2[6,7]
9049 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
9050 ; AVX2-SLOW-NEXT: vbroadcastsd 112(%rax), %ymm2
9051 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3],ymm1[4,5,6,7]
9052 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6],ymm1[7]
9053 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9054 ; AVX2-SLOW-NEXT: vpermilps $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
9055 ; AVX2-SLOW-NEXT: # ymm0 = mem[1,1,1,1,5,5,5,5]
9056 ; AVX2-SLOW-NEXT: vblendps $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
9057 ; AVX2-SLOW-NEXT: # ymm0 = ymm0[0],mem[1],ymm0[2,3,4],mem[5],ymm0[6,7]
9058 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
9059 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
9060 ; AVX2-SLOW-NEXT: vshufps $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
9061 ; AVX2-SLOW-NEXT: # ymm1 = ymm1[1,1],mem[1,1],ymm1[5,5],mem[5,5]
9062 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6],ymm0[7]
9063 ; AVX2-SLOW-NEXT: vpermilps $0, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
9064 ; AVX2-SLOW-NEXT: # ymm1 = mem[0,0,0,0,4,4,4,4]
9065 ; AVX2-SLOW-NEXT: vpermilps $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
9066 ; AVX2-SLOW-NEXT: # ymm2 = mem[0,1,0,1,4,5,4,5]
9067 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2,3,4],ymm1[5],ymm2[6,7]
9068 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
9069 ; AVX2-SLOW-NEXT: vbroadcastsd 144(%rax), %ymm2
9070 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3],ymm1[4,5,6,7]
9071 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6],ymm1[7]
9072 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9073 ; AVX2-SLOW-NEXT: vpermilps $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
9074 ; AVX2-SLOW-NEXT: # ymm0 = mem[1,1,1,1,5,5,5,5]
9075 ; AVX2-SLOW-NEXT: vblendps $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
9076 ; AVX2-SLOW-NEXT: # ymm0 = ymm0[0],mem[1],ymm0[2,3,4],mem[5],ymm0[6,7]
9077 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
9078 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
9079 ; AVX2-SLOW-NEXT: vshufps $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
9080 ; AVX2-SLOW-NEXT: # ymm1 = ymm1[1,1],mem[1,1],ymm1[5,5],mem[5,5]
9081 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6],ymm0[7]
9082 ; AVX2-SLOW-NEXT: vpermilps $0, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
9083 ; AVX2-SLOW-NEXT: # ymm1 = mem[0,0,0,0,4,4,4,4]
9084 ; AVX2-SLOW-NEXT: vpermilps $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
9085 ; AVX2-SLOW-NEXT: # ymm2 = mem[0,1,0,1,4,5,4,5]
9086 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2,3,4],ymm1[5],ymm2[6,7]
9087 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
9088 ; AVX2-SLOW-NEXT: vbroadcastsd 176(%rax), %ymm2
9089 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3],ymm1[4,5,6,7]
9090 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6],ymm1[7]
9091 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9092 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9093 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm0 = ymm0[1,1],ymm4[1,1],ymm0[5,5],ymm4[5,5]
9094 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm1 = ymm13[1,1,1,1,5,5,5,5]
9095 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm11[1],ymm1[2,3,4],ymm11[5],ymm1[6,7]
9096 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,2,2,2]
9097 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5,6],ymm1[7]
9098 ; AVX2-SLOW-NEXT: vpermilps $0, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
9099 ; AVX2-SLOW-NEXT: # ymm1 = mem[0,0,0,0,4,4,4,4]
9100 ; AVX2-SLOW-NEXT: vpermilps $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
9101 ; AVX2-SLOW-NEXT: # ymm2 = mem[0,1,0,1,4,5,4,5]
9102 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2,3,4],ymm1[5],ymm2[6,7]
9103 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
9104 ; AVX2-SLOW-NEXT: vbroadcastsd 208(%rax), %ymm2
9105 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3],ymm1[4,5,6,7]
9106 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6],ymm1[7]
9107 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9108 ; AVX2-SLOW-NEXT: vbroadcastss 16(%rdx), %ymm0
9109 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm1 = ymm3[3,1,2,0,7,5,6,4]
9110 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6],ymm1[7]
9111 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} ymm1 = ymm5[0],ymm6[0],ymm5[1],ymm6[1],ymm5[4],ymm6[4],ymm5[5],ymm6[5]
9112 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
9113 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
9114 ; AVX2-SLOW-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
9115 ; AVX2-SLOW-NEXT: # xmm1 = xmm1[3,3],mem[3,3]
9116 ; AVX2-SLOW-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
9117 ; AVX2-SLOW-NEXT: # xmm1 = xmm1[0,1,2],mem[3]
9118 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4,5,6,7]
9119 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9120 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm0 = ymm3[2],ymm8[2],ymm3[3],ymm8[3],ymm3[6],ymm8[6],ymm3[7],ymm8[7]
9121 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm1 = ymm6[2],ymm5[2],ymm6[3],ymm5[3],ymm6[6],ymm5[6],ymm6[7],ymm5[7]
9122 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,3,3,3]
9123 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[3,3,3,3]
9124 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
9125 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm1 = ymm9[3,3],ymm12[3,3],ymm9[7,7],ymm12[7,7]
9126 ; AVX2-SLOW-NEXT: vpermilps {{.*#+}} ymm2 = mem[2,3,2,3,6,7,6,7]
9127 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0],ymm1[1,2],ymm2[3,4],ymm1[5,6],ymm2[7]
9128 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
9129 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm9 = ymm1[0],ymm0[1,2,3,4],ymm1[5,6,7]
9130 ; AVX2-SLOW-NEXT: vbroadcastss 48(%rdx), %ymm0
9131 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm1 = ymm7[3,1,2,0,7,5,6,4]
9132 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6],ymm1[7]
9133 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} ymm1 = ymm10[0],ymm14[0],ymm10[1],ymm14[1],ymm10[4],ymm14[4],ymm10[5],ymm14[5]
9134 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
9135 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
9136 ; AVX2-SLOW-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
9137 ; AVX2-SLOW-NEXT: # xmm1 = xmm1[3,3],mem[3,3]
9138 ; AVX2-SLOW-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
9139 ; AVX2-SLOW-NEXT: # xmm1 = xmm1[0,1,2],mem[3]
9140 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm12 = ymm0[0],ymm1[1,2,3],ymm0[4,5,6,7]
9141 ; AVX2-SLOW-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm0 # 32-byte Folded Reload
9142 ; AVX2-SLOW-NEXT: # ymm0 = ymm7[2],mem[2],ymm7[3],mem[3],ymm7[6],mem[6],ymm7[7],mem[7]
9143 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm1 = ymm14[2],ymm10[2],ymm14[3],ymm10[3],ymm14[6],ymm10[6],ymm14[7],ymm10[7]
9144 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,3,3,3]
9145 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[3,3,3,3]
9146 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
9147 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
9148 ; AVX2-SLOW-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
9149 ; AVX2-SLOW-NEXT: # ymm1 = ymm1[3,3],mem[3,3],ymm1[7,7],mem[7,7]
9150 ; AVX2-SLOW-NEXT: vpermilps {{.*#+}} ymm2 = mem[2,3,2,3,6,7,6,7]
9151 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0],ymm1[1,2],ymm2[3,4],ymm1[5,6],ymm2[7]
9152 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
9153 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm14 = ymm1[0],ymm0[1,2,3,4],ymm1[5,6,7]
9154 ; AVX2-SLOW-NEXT: vbroadcastss 80(%rdx), %ymm0
9155 ; AVX2-SLOW-NEXT: vmovaps %ymm15, %ymm13
9156 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm1 = ymm15[3,1,2,0,7,5,6,4]
9157 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6],ymm1[7]
9158 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
9159 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
9160 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} ymm1 = ymm2[0],ymm4[0],ymm2[1],ymm4[1],ymm2[4],ymm4[4],ymm2[5],ymm4[5]
9161 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
9162 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
9163 ; AVX2-SLOW-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
9164 ; AVX2-SLOW-NEXT: # xmm1 = xmm1[3,3],mem[3,3]
9165 ; AVX2-SLOW-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
9166 ; AVX2-SLOW-NEXT: # xmm1 = xmm1[0,1,2],mem[3]
9167 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm15 = ymm0[0],ymm1[1,2,3],ymm0[4,5,6,7]
9168 ; AVX2-SLOW-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm0 # 32-byte Folded Reload
9169 ; AVX2-SLOW-NEXT: # ymm0 = ymm13[2],mem[2],ymm13[3],mem[3],ymm13[6],mem[6],ymm13[7],mem[7]
9170 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm1 = ymm4[2],ymm2[2],ymm4[3],ymm2[3],ymm4[6],ymm2[6],ymm4[7],ymm2[7]
9171 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,3,3,3]
9172 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[3,3,3,3]
9173 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
9174 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
9175 ; AVX2-SLOW-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
9176 ; AVX2-SLOW-NEXT: # ymm1 = ymm1[3,3],mem[3,3],ymm1[7,7],mem[7,7]
9177 ; AVX2-SLOW-NEXT: vpermilps {{.*#+}} ymm2 = mem[2,3,2,3,6,7,6,7]
9178 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0],ymm1[1,2],ymm2[3,4],ymm1[5,6],ymm2[7]
9179 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
9180 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm13 = ymm1[0],ymm0[1,2,3,4],ymm1[5,6,7]
9181 ; AVX2-SLOW-NEXT: vbroadcastss 112(%rdx), %ymm0
9182 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
9183 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm1 = ymm4[3,1,2,0,7,5,6,4]
9184 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6],ymm1[7]
9185 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
9186 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
9187 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} ymm1 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[4],ymm3[4],ymm2[5],ymm3[5]
9188 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
9189 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
9190 ; AVX2-SLOW-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
9191 ; AVX2-SLOW-NEXT: # xmm1 = xmm1[3,3],mem[3,3]
9192 ; AVX2-SLOW-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
9193 ; AVX2-SLOW-NEXT: # xmm1 = xmm1[0,1,2],mem[3]
9194 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm6 = ymm0[0],ymm1[1,2,3],ymm0[4,5,6,7]
9195 ; AVX2-SLOW-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm0 # 32-byte Folded Reload
9196 ; AVX2-SLOW-NEXT: # ymm0 = ymm4[2],mem[2],ymm4[3],mem[3],ymm4[6],mem[6],ymm4[7],mem[7]
9197 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm1 = ymm3[2],ymm2[2],ymm3[3],ymm2[3],ymm3[6],ymm2[6],ymm3[7],ymm2[7]
9198 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,3,3,3]
9199 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[3,3,3,3]
9200 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
9201 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
9202 ; AVX2-SLOW-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
9203 ; AVX2-SLOW-NEXT: # ymm1 = ymm1[3,3],mem[3,3],ymm1[7,7],mem[7,7]
9204 ; AVX2-SLOW-NEXT: vpermilps {{.*#+}} ymm8 = mem[2,3,2,3,6,7,6,7]
9205 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm8[0],ymm1[1,2],ymm8[3,4],ymm1[5,6],ymm8[7]
9206 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
9207 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm4 = ymm1[0],ymm0[1,2,3,4],ymm1[5,6,7]
9208 ; AVX2-SLOW-NEXT: vbroadcastss 144(%rdx), %ymm1
9209 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9210 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm8 = ymm0[3,1,2,0,7,5,6,4]
9211 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm3 = ymm8[0,1,2,3,4,5],ymm1[6],ymm8[7]
9212 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
9213 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
9214 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} ymm8 = ymm1[0],ymm2[0],ymm1[1],ymm2[1],ymm1[4],ymm2[4],ymm1[5],ymm2[5]
9215 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm8[4,5],ymm3[6,7]
9216 ; AVX2-SLOW-NEXT: vmovaps (%rsp), %xmm5 # 16-byte Reload
9217 ; AVX2-SLOW-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5, %xmm8 # 16-byte Folded Reload
9218 ; AVX2-SLOW-NEXT: # xmm8 = xmm5[3,3],mem[3,3]
9219 ; AVX2-SLOW-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8, %xmm8 # 16-byte Folded Reload
9220 ; AVX2-SLOW-NEXT: # xmm8 = xmm8[0,1,2],mem[3]
9221 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0],ymm8[1,2,3],ymm3[4,5,6,7]
9222 ; AVX2-SLOW-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm8 # 32-byte Folded Reload
9223 ; AVX2-SLOW-NEXT: # ymm8 = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7]
9224 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm0 = ymm2[2],ymm1[2],ymm2[3],ymm1[3],ymm2[6],ymm1[6],ymm2[7],ymm1[7]
9225 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm8 = ymm8[3,3,3,3]
9226 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,3,3,3]
9227 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm8[3,4,5,6,7]
9228 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
9229 ; AVX2-SLOW-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm8 # 32-byte Folded Reload
9230 ; AVX2-SLOW-NEXT: # ymm8 = ymm1[3,3],mem[3,3],ymm1[7,7],mem[7,7]
9231 ; AVX2-SLOW-NEXT: vpermilps {{.*#+}} ymm7 = mem[2,3,2,3,6,7,6,7]
9232 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0],ymm8[1,2],ymm7[3,4],ymm8[5,6],ymm7[7]
9233 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm7 = ymm7[2,1,2,3]
9234 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm7[0],ymm0[1,2,3,4],ymm7[5,6,7]
9235 ; AVX2-SLOW-NEXT: vbroadcastss 176(%rdx), %ymm7
9236 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9237 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm8 = ymm0[3,1,2,0,7,5,6,4]
9238 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm7 = ymm8[0,1,2,3,4,5],ymm7[6],ymm8[7]
9239 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
9240 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
9241 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} ymm8 = ymm1[0],ymm10[0],ymm1[1],ymm10[1],ymm1[4],ymm10[4],ymm1[5],ymm10[5]
9242 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm8[4,5],ymm7[6,7]
9243 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
9244 ; AVX2-SLOW-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5, %xmm8 # 16-byte Folded Reload
9245 ; AVX2-SLOW-NEXT: # xmm8 = xmm5[3,3],mem[3,3]
9246 ; AVX2-SLOW-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8, %xmm8 # 16-byte Folded Reload
9247 ; AVX2-SLOW-NEXT: # xmm8 = xmm8[0,1,2],mem[3]
9248 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0],ymm8[1,2,3],ymm7[4,5,6,7]
9249 ; AVX2-SLOW-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm8 # 32-byte Folded Reload
9250 ; AVX2-SLOW-NEXT: # ymm8 = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7]
9251 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm0 = ymm10[2],ymm1[2],ymm10[3],ymm1[3],ymm10[6],ymm1[6],ymm10[7],ymm1[7]
9252 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm8 = ymm8[3,3,3,3]
9253 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,3,3,3]
9254 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm8[3,4,5,6,7]
9255 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
9256 ; AVX2-SLOW-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm8 # 32-byte Folded Reload
9257 ; AVX2-SLOW-NEXT: # ymm8 = ymm1[3,3],mem[3,3],ymm1[7,7],mem[7,7]
9258 ; AVX2-SLOW-NEXT: vpermilps {{.*#+}} ymm10 = mem[2,3,2,3,6,7,6,7]
9259 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm8 = ymm10[0],ymm8[1,2],ymm10[3,4],ymm8[5,6],ymm10[7]
9260 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm8 = ymm8[2,1,2,3]
9261 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm8[0],ymm0[1,2,3,4],ymm8[5,6,7]
9262 ; AVX2-SLOW-NEXT: vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm8 # 32-byte Folded Reload
9263 ; AVX2-SLOW-NEXT: # ymm8 = ymm11[0],mem[0],ymm11[1],mem[1],ymm11[4],mem[4],ymm11[5],mem[5]
9264 ; AVX2-SLOW-NEXT: vpermilps $39, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Folded Reload
9265 ; AVX2-SLOW-NEXT: # ymm10 = mem[3,1,2,0,7,5,6,4]
9266 ; AVX2-SLOW-NEXT: vbroadcastss 208(%rdx), %ymm11
9267 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0,1,2,3,4,5],ymm11[6],ymm10[7]
9268 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm8 = ymm10[0,1,2,3],ymm8[4,5],ymm10[6,7]
9269 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
9270 ; AVX2-SLOW-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm10 # 16-byte Folded Reload
9271 ; AVX2-SLOW-NEXT: # xmm10 = xmm1[3,3],mem[3,3]
9272 ; AVX2-SLOW-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm10, %xmm10 # 16-byte Folded Reload
9273 ; AVX2-SLOW-NEXT: # xmm10 = xmm10[0,1,2],mem[3]
9274 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0],ymm10[1,2,3],ymm8[4,5,6,7]
9275 ; AVX2-SLOW-NEXT: movq {{[0-9]+}}(%rsp), %rax
9276 ; AVX2-SLOW-NEXT: vmovaps %ymm8, 1440(%rax)
9277 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 1312(%rax)
9278 ; AVX2-SLOW-NEXT: vmovaps %ymm7, 1216(%rax)
9279 ; AVX2-SLOW-NEXT: vmovaps %ymm2, 1088(%rax)
9280 ; AVX2-SLOW-NEXT: vmovaps %ymm3, 992(%rax)
9281 ; AVX2-SLOW-NEXT: vmovaps %ymm4, 864(%rax)
9282 ; AVX2-SLOW-NEXT: vmovaps %ymm6, 768(%rax)
9283 ; AVX2-SLOW-NEXT: vmovaps %ymm13, 640(%rax)
9284 ; AVX2-SLOW-NEXT: vmovaps %ymm15, 544(%rax)
9285 ; AVX2-SLOW-NEXT: vmovaps %ymm14, 416(%rax)
9286 ; AVX2-SLOW-NEXT: vmovaps %ymm12, 320(%rax)
9287 ; AVX2-SLOW-NEXT: vmovaps %ymm9, 192(%rax)
9288 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9289 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 96(%rax)
9290 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9291 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 1504(%rax)
9292 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9293 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 1472(%rax)
9294 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9295 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 1280(%rax)
9296 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9297 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 1248(%rax)
9298 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9299 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 1056(%rax)
9300 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9301 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 1024(%rax)
9302 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9303 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 832(%rax)
9304 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9305 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 800(%rax)
9306 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9307 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 608(%rax)
9308 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9309 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 576(%rax)
9310 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9311 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 384(%rax)
9312 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9313 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 352(%rax)
9314 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9315 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 160(%rax)
9316 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9317 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 128(%rax)
9318 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9319 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 1760(%rax)
9320 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9321 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 1728(%rax)
9322 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9323 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 1664(%rax)
9324 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9325 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 1536(%rax)
9326 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9327 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 1408(%rax)
9328 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9329 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 1376(%rax)
9330 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9331 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 1344(%rax)
9332 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9333 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 1184(%rax)
9334 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9335 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 1152(%rax)
9336 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9337 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 1120(%rax)
9338 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9339 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 960(%rax)
9340 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9341 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 928(%rax)
9342 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9343 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 896(%rax)
9344 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9345 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 736(%rax)
9346 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9347 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 704(%rax)
9348 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9349 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 672(%rax)
9350 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9351 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 512(%rax)
9352 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9353 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 480(%rax)
9354 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9355 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 448(%rax)
9356 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9357 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 288(%rax)
9358 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9359 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 256(%rax)
9360 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9361 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 224(%rax)
9362 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9363 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 64(%rax)
9364 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9365 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 32(%rax)
9366 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9367 ; AVX2-SLOW-NEXT: vmovaps %ymm0, (%rax)
9368 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9369 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 1696(%rax)
9370 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9371 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 1632(%rax)
9372 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9373 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 1600(%rax)
9374 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9375 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 1568(%rax)
9376 ; AVX2-SLOW-NEXT: addq $3000, %rsp # imm = 0xBB8
9377 ; AVX2-SLOW-NEXT: vzeroupper
9378 ; AVX2-SLOW-NEXT: retq
9380 ; AVX2-FAST-LABEL: store_i32_stride7_vf64:
9381 ; AVX2-FAST: # %bb.0:
9382 ; AVX2-FAST-NEXT: subq $3096, %rsp # imm = 0xC18
9383 ; AVX2-FAST-NEXT: movq {{[0-9]+}}(%rsp), %rax
9384 ; AVX2-FAST-NEXT: vmovaps (%rax), %xmm0
9385 ; AVX2-FAST-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9386 ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
9387 ; AVX2-FAST-NEXT: vmovaps (%r8), %xmm2
9388 ; AVX2-FAST-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9389 ; AVX2-FAST-NEXT: vmovaps 32(%r8), %xmm3
9390 ; AVX2-FAST-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9391 ; AVX2-FAST-NEXT: vmovaps (%r9), %xmm1
9392 ; AVX2-FAST-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9393 ; AVX2-FAST-NEXT: vmovaps 32(%r9), %xmm4
9394 ; AVX2-FAST-NEXT: vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9395 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,1,1,1]
9396 ; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2,3]
9397 ; AVX2-FAST-NEXT: vbroadcastsd %xmm1, %ymm1
9398 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5],ymm1[6,7]
9399 ; AVX2-FAST-NEXT: vmovaps (%rcx), %xmm2
9400 ; AVX2-FAST-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9401 ; AVX2-FAST-NEXT: vmovaps 32(%rcx), %xmm5
9402 ; AVX2-FAST-NEXT: vmovaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9403 ; AVX2-FAST-NEXT: vmovaps (%rdx), %xmm1
9404 ; AVX2-FAST-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9405 ; AVX2-FAST-NEXT: vinsertps {{.*#+}} xmm1 = zero,xmm1[1],xmm2[1],zero
9406 ; AVX2-FAST-NEXT: vmovaps (%rdi), %xmm8
9407 ; AVX2-FAST-NEXT: vmovaps %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9408 ; AVX2-FAST-NEXT: vmovaps 32(%rdi), %xmm6
9409 ; AVX2-FAST-NEXT: vmovaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9410 ; AVX2-FAST-NEXT: vmovaps (%rsi), %xmm2
9411 ; AVX2-FAST-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9412 ; AVX2-FAST-NEXT: vmovaps 32(%rsi), %xmm7
9413 ; AVX2-FAST-NEXT: vmovaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9414 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm2 = xmm2[1,1,2,2]
9415 ; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm2 = xmm2[0,1],xmm8[2],xmm2[3]
9416 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,1,2,1]
9417 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0],ymm1[1,2],ymm2[3,4,5,6,7]
9418 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
9419 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9420 ; AVX2-FAST-NEXT: vmovaps 32(%rax), %xmm0
9421 ; AVX2-FAST-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9422 ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
9423 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm1 = xmm4[1,1,1,1]
9424 ; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0],xmm3[1],xmm1[2,3]
9425 ; AVX2-FAST-NEXT: vbroadcastsd %xmm1, %ymm1
9426 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5],ymm1[6,7]
9427 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm1 = xmm7[1,1,2,2]
9428 ; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm6[2],xmm1[3]
9429 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1]
9430 ; AVX2-FAST-NEXT: vmovaps 32(%rdx), %xmm2
9431 ; AVX2-FAST-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9432 ; AVX2-FAST-NEXT: vinsertps {{.*#+}} xmm2 = zero,xmm2[1],xmm5[1],zero
9433 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2],ymm1[3,4,5,6,7]
9434 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
9435 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9436 ; AVX2-FAST-NEXT: vmovaps 64(%r8), %xmm1
9437 ; AVX2-FAST-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9438 ; AVX2-FAST-NEXT: vmovaps 64(%r9), %xmm0
9439 ; AVX2-FAST-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9440 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
9441 ; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
9442 ; AVX2-FAST-NEXT: vbroadcastsd %xmm0, %ymm0
9443 ; AVX2-FAST-NEXT: vmovaps 64(%rax), %xmm1
9444 ; AVX2-FAST-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9445 ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
9446 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
9447 ; AVX2-FAST-NEXT: vmovaps 64(%rdi), %xmm2
9448 ; AVX2-FAST-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9449 ; AVX2-FAST-NEXT: vmovaps 64(%rsi), %xmm1
9450 ; AVX2-FAST-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9451 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,1,2,2]
9452 ; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3]
9453 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1]
9454 ; AVX2-FAST-NEXT: vmovaps 64(%rcx), %xmm3
9455 ; AVX2-FAST-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9456 ; AVX2-FAST-NEXT: vmovaps 64(%rdx), %xmm2
9457 ; AVX2-FAST-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9458 ; AVX2-FAST-NEXT: vinsertps {{.*#+}} xmm2 = zero,xmm2[1],xmm3[1],zero
9459 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2],ymm1[3,4,5,6,7]
9460 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
9461 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9462 ; AVX2-FAST-NEXT: vmovaps 96(%r8), %xmm1
9463 ; AVX2-FAST-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9464 ; AVX2-FAST-NEXT: vmovaps 96(%r9), %xmm0
9465 ; AVX2-FAST-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9466 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
9467 ; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
9468 ; AVX2-FAST-NEXT: vbroadcastsd %xmm0, %ymm0
9469 ; AVX2-FAST-NEXT: vmovaps 96(%rax), %xmm1
9470 ; AVX2-FAST-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9471 ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
9472 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
9473 ; AVX2-FAST-NEXT: vmovaps 96(%rdi), %xmm2
9474 ; AVX2-FAST-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9475 ; AVX2-FAST-NEXT: vmovaps 96(%rsi), %xmm1
9476 ; AVX2-FAST-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9477 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,1,2,2]
9478 ; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3]
9479 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1]
9480 ; AVX2-FAST-NEXT: vmovaps 96(%rcx), %xmm3
9481 ; AVX2-FAST-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9482 ; AVX2-FAST-NEXT: vmovaps 96(%rdx), %xmm2
9483 ; AVX2-FAST-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9484 ; AVX2-FAST-NEXT: vinsertps {{.*#+}} xmm2 = zero,xmm2[1],xmm3[1],zero
9485 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2],ymm1[3,4,5,6,7]
9486 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
9487 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9488 ; AVX2-FAST-NEXT: vmovaps 128(%r8), %xmm1
9489 ; AVX2-FAST-NEXT: vmovaps %xmm1, (%rsp) # 16-byte Spill
9490 ; AVX2-FAST-NEXT: vmovaps 128(%r9), %xmm0
9491 ; AVX2-FAST-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9492 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
9493 ; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
9494 ; AVX2-FAST-NEXT: vbroadcastsd %xmm0, %ymm0
9495 ; AVX2-FAST-NEXT: vmovaps 128(%rax), %xmm1
9496 ; AVX2-FAST-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9497 ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
9498 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
9499 ; AVX2-FAST-NEXT: vmovaps 128(%rdi), %xmm2
9500 ; AVX2-FAST-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9501 ; AVX2-FAST-NEXT: vmovaps 128(%rsi), %xmm1
9502 ; AVX2-FAST-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9503 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,1,2,2]
9504 ; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3]
9505 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1]
9506 ; AVX2-FAST-NEXT: vmovaps 128(%rcx), %xmm3
9507 ; AVX2-FAST-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9508 ; AVX2-FAST-NEXT: vmovaps 128(%rdx), %xmm2
9509 ; AVX2-FAST-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9510 ; AVX2-FAST-NEXT: vinsertps {{.*#+}} xmm2 = zero,xmm2[1],xmm3[1],zero
9511 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2],ymm1[3,4,5,6,7]
9512 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
9513 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9514 ; AVX2-FAST-NEXT: vmovaps 160(%r8), %xmm1
9515 ; AVX2-FAST-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9516 ; AVX2-FAST-NEXT: vmovaps 160(%r9), %xmm0
9517 ; AVX2-FAST-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9518 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
9519 ; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
9520 ; AVX2-FAST-NEXT: vbroadcastsd %xmm0, %ymm0
9521 ; AVX2-FAST-NEXT: vmovaps 160(%rax), %xmm1
9522 ; AVX2-FAST-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9523 ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
9524 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
9525 ; AVX2-FAST-NEXT: vmovaps 160(%rdi), %xmm2
9526 ; AVX2-FAST-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9527 ; AVX2-FAST-NEXT: vmovaps 160(%rsi), %xmm1
9528 ; AVX2-FAST-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9529 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,1,2,2]
9530 ; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3]
9531 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1]
9532 ; AVX2-FAST-NEXT: vmovaps 160(%rcx), %xmm3
9533 ; AVX2-FAST-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9534 ; AVX2-FAST-NEXT: vmovaps 160(%rdx), %xmm2
9535 ; AVX2-FAST-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9536 ; AVX2-FAST-NEXT: vinsertps {{.*#+}} xmm2 = zero,xmm2[1],xmm3[1],zero
9537 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2],ymm1[3,4,5,6,7]
9538 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
9539 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9540 ; AVX2-FAST-NEXT: vmovaps 192(%r9), %xmm0
9541 ; AVX2-FAST-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9542 ; AVX2-FAST-NEXT: vmovaps 192(%r8), %xmm1
9543 ; AVX2-FAST-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9544 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
9545 ; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
9546 ; AVX2-FAST-NEXT: vbroadcastsd %xmm0, %ymm0
9547 ; AVX2-FAST-NEXT: vmovaps 192(%rax), %xmm1
9548 ; AVX2-FAST-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9549 ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
9550 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
9551 ; AVX2-FAST-NEXT: vmovaps 192(%rdi), %xmm2
9552 ; AVX2-FAST-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9553 ; AVX2-FAST-NEXT: vmovaps 192(%rsi), %xmm1
9554 ; AVX2-FAST-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9555 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,1,2,2]
9556 ; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3]
9557 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1]
9558 ; AVX2-FAST-NEXT: vmovaps 192(%rcx), %xmm3
9559 ; AVX2-FAST-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9560 ; AVX2-FAST-NEXT: vmovaps 192(%rdx), %xmm2
9561 ; AVX2-FAST-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9562 ; AVX2-FAST-NEXT: vinsertps {{.*#+}} xmm2 = zero,xmm2[1],xmm3[1],zero
9563 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2],ymm1[3,4,5,6,7]
9564 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
9565 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9566 ; AVX2-FAST-NEXT: vmovaps (%rdi), %ymm0
9567 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9568 ; AVX2-FAST-NEXT: vmovaps (%rsi), %ymm1
9569 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9570 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
9571 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
9572 ; AVX2-FAST-NEXT: vmovaps (%rdx), %ymm11
9573 ; AVX2-FAST-NEXT: vmovaps (%rcx), %ymm1
9574 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9575 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm1 = ymm11[2],ymm1[2],ymm11[3],ymm1[3],ymm11[6],ymm1[6],ymm11[7],ymm1[7]
9576 ; AVX2-FAST-NEXT: vmovups %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9577 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
9578 ; AVX2-FAST-NEXT: vmovaps (%r8), %ymm2
9579 ; AVX2-FAST-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9580 ; AVX2-FAST-NEXT: vmovaps (%r9), %ymm12
9581 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm1 = ymm12[1,1,2,2,5,5,6,6]
9582 ; AVX2-FAST-NEXT: vmovups %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9583 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4,5],ymm2[6],ymm1[7]
9584 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
9585 ; AVX2-FAST-NEXT: vmovaps 16(%rax), %xmm2
9586 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7]
9587 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
9588 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9589 ; AVX2-FAST-NEXT: vmovaps 32(%rdi), %ymm1
9590 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9591 ; AVX2-FAST-NEXT: vmovaps 32(%rsi), %ymm0
9592 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9593 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm0 = ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[6],ymm0[6],ymm1[7],ymm0[7]
9594 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
9595 ; AVX2-FAST-NEXT: vmovaps 32(%rdx), %ymm1
9596 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9597 ; AVX2-FAST-NEXT: vmovaps 32(%rcx), %ymm2
9598 ; AVX2-FAST-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9599 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm1 = ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[6],ymm2[6],ymm1[7],ymm2[7]
9600 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
9601 ; AVX2-FAST-NEXT: vmovaps 32(%r8), %ymm2
9602 ; AVX2-FAST-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9603 ; AVX2-FAST-NEXT: vmovaps 32(%r9), %ymm1
9604 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9605 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm1 = ymm1[1,1,2,2,5,5,6,6]
9606 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4,5],ymm2[6],ymm1[7]
9607 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
9608 ; AVX2-FAST-NEXT: vmovaps 48(%rax), %xmm2
9609 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7]
9610 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
9611 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9612 ; AVX2-FAST-NEXT: vmovaps 64(%rdi), %ymm0
9613 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9614 ; AVX2-FAST-NEXT: vmovaps 64(%rsi), %ymm1
9615 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9616 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
9617 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
9618 ; AVX2-FAST-NEXT: vmovaps 64(%rdx), %ymm1
9619 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9620 ; AVX2-FAST-NEXT: vmovaps 64(%rcx), %ymm2
9621 ; AVX2-FAST-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9622 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm1 = ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[6],ymm2[6],ymm1[7],ymm2[7]
9623 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
9624 ; AVX2-FAST-NEXT: vmovaps 64(%r8), %ymm2
9625 ; AVX2-FAST-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9626 ; AVX2-FAST-NEXT: vmovaps 64(%r9), %ymm1
9627 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9628 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm1 = ymm1[1,1,2,2,5,5,6,6]
9629 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4,5],ymm2[6],ymm1[7]
9630 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
9631 ; AVX2-FAST-NEXT: vmovaps 80(%rax), %xmm2
9632 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7]
9633 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
9634 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9635 ; AVX2-FAST-NEXT: vmovaps 96(%rdi), %ymm1
9636 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9637 ; AVX2-FAST-NEXT: vmovaps 96(%rsi), %ymm0
9638 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9639 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm0 = ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[6],ymm0[6],ymm1[7],ymm0[7]
9640 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
9641 ; AVX2-FAST-NEXT: vmovaps 96(%rdx), %ymm1
9642 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9643 ; AVX2-FAST-NEXT: vmovaps 96(%rcx), %ymm2
9644 ; AVX2-FAST-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9645 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm1 = ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[6],ymm2[6],ymm1[7],ymm2[7]
9646 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
9647 ; AVX2-FAST-NEXT: vmovaps 96(%r8), %ymm2
9648 ; AVX2-FAST-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9649 ; AVX2-FAST-NEXT: vmovaps 96(%r9), %ymm1
9650 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9651 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm1 = ymm1[1,1,2,2,5,5,6,6]
9652 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4,5],ymm2[6],ymm1[7]
9653 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
9654 ; AVX2-FAST-NEXT: vmovaps 112(%rax), %xmm2
9655 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7]
9656 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
9657 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9658 ; AVX2-FAST-NEXT: vmovaps 128(%rdi), %ymm1
9659 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9660 ; AVX2-FAST-NEXT: vmovaps 128(%rsi), %ymm0
9661 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9662 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm0 = ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[6],ymm0[6],ymm1[7],ymm0[7]
9663 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
9664 ; AVX2-FAST-NEXT: vmovaps 128(%rdx), %ymm1
9665 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9666 ; AVX2-FAST-NEXT: vmovaps 128(%rcx), %ymm2
9667 ; AVX2-FAST-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9668 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm1 = ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[6],ymm2[6],ymm1[7],ymm2[7]
9669 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
9670 ; AVX2-FAST-NEXT: vmovaps 128(%r8), %ymm2
9671 ; AVX2-FAST-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9672 ; AVX2-FAST-NEXT: vmovaps 128(%r9), %ymm1
9673 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9674 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm1 = ymm1[1,1,2,2,5,5,6,6]
9675 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4,5],ymm2[6],ymm1[7]
9676 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
9677 ; AVX2-FAST-NEXT: vmovaps 144(%rax), %xmm2
9678 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7]
9679 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
9680 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9681 ; AVX2-FAST-NEXT: vmovaps 160(%rdi), %ymm1
9682 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9683 ; AVX2-FAST-NEXT: vmovaps 160(%rsi), %ymm0
9684 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9685 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm0 = ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[6],ymm0[6],ymm1[7],ymm0[7]
9686 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
9687 ; AVX2-FAST-NEXT: vmovaps 160(%rdx), %ymm1
9688 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9689 ; AVX2-FAST-NEXT: vmovaps 160(%rcx), %ymm2
9690 ; AVX2-FAST-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9691 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm1 = ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[6],ymm2[6],ymm1[7],ymm2[7]
9692 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
9693 ; AVX2-FAST-NEXT: vmovaps 160(%r8), %ymm2
9694 ; AVX2-FAST-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9695 ; AVX2-FAST-NEXT: vmovaps 160(%r9), %ymm1
9696 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9697 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm1 = ymm1[1,1,2,2,5,5,6,6]
9698 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4,5],ymm2[6],ymm1[7]
9699 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
9700 ; AVX2-FAST-NEXT: vmovaps 176(%rax), %xmm2
9701 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7]
9702 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
9703 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9704 ; AVX2-FAST-NEXT: vmovaps 192(%rdi), %ymm1
9705 ; AVX2-FAST-NEXT: vmovaps 192(%rsi), %ymm9
9706 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm0 = ymm1[2],ymm9[2],ymm1[3],ymm9[3],ymm1[6],ymm9[6],ymm1[7],ymm9[7]
9707 ; AVX2-FAST-NEXT: vmovups %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9708 ; AVX2-FAST-NEXT: vmovaps %ymm1, %ymm13
9709 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9710 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
9711 ; AVX2-FAST-NEXT: vmovaps 192(%rdx), %ymm7
9712 ; AVX2-FAST-NEXT: vmovaps 192(%rcx), %ymm8
9713 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm1 = ymm7[2],ymm8[2],ymm7[3],ymm8[3],ymm7[6],ymm8[6],ymm7[7],ymm8[7]
9714 ; AVX2-FAST-NEXT: vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9715 ; AVX2-FAST-NEXT: vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9716 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
9717 ; AVX2-FAST-NEXT: vmovaps 192(%r8), %ymm2
9718 ; AVX2-FAST-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9719 ; AVX2-FAST-NEXT: vmovaps 192(%r9), %ymm1
9720 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9721 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm1 = ymm1[1,1,2,2,5,5,6,6]
9722 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4,5],ymm2[6],ymm1[7]
9723 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
9724 ; AVX2-FAST-NEXT: vmovaps 208(%rax), %xmm2
9725 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7]
9726 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
9727 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9728 ; AVX2-FAST-NEXT: vmovaps 224(%rdi), %xmm0
9729 ; AVX2-FAST-NEXT: vmovaps 224(%rsi), %xmm1
9730 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm2 = xmm1[1,1,2,2]
9731 ; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm2 = xmm2[0,1],xmm0[2],xmm2[3]
9732 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,1,2,1]
9733 ; AVX2-FAST-NEXT: vmovaps 224(%rcx), %xmm3
9734 ; AVX2-FAST-NEXT: vmovaps 224(%rdx), %xmm6
9735 ; AVX2-FAST-NEXT: vinsertps {{.*#+}} xmm5 = zero,xmm6[1],xmm3[1],zero
9736 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0],ymm5[1,2],ymm2[3,4,5,6,7]
9737 ; AVX2-FAST-NEXT: vbroadcastss 228(%r8), %ymm4
9738 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2],ymm4[3],ymm2[4,5,6,7]
9739 ; AVX2-FAST-NEXT: vmovaps 224(%r9), %xmm4
9740 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm15 = xmm4[1,1,1,1]
9741 ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm15, %ymm0, %ymm15
9742 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm15[4,5],ymm2[6,7]
9743 ; AVX2-FAST-NEXT: vinsertf128 $1, 224(%rax), %ymm5, %ymm5
9744 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0],ymm5[1],ymm2[2,3,4],ymm5[5],ymm2[6,7]
9745 ; AVX2-FAST-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9746 ; AVX2-FAST-NEXT: vbroadcastss %xmm3, %xmm2
9747 ; AVX2-FAST-NEXT: vbroadcastss %xmm6, %xmm5
9748 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} xmm2 = xmm5[0],xmm2[0],xmm5[1],xmm2[1]
9749 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} xmm15 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
9750 ; AVX2-FAST-NEXT: vbroadcastf128 {{.*#+}} ymm10 = [0,1,2,2,0,1,2,2]
9751 ; AVX2-FAST-NEXT: # ymm10 = mem[0,1,0,1]
9752 ; AVX2-FAST-NEXT: vpermps %ymm15, %ymm10, %ymm15
9753 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm15[0,1],ymm2[2,3],ymm15[4,5,6,7]
9754 ; AVX2-FAST-NEXT: vbroadcastsd 224(%r8), %ymm15
9755 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm15[4,5],ymm2[6,7]
9756 ; AVX2-FAST-NEXT: vbroadcastss %xmm4, %ymm15
9757 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm15[5],ymm2[6,7]
9758 ; AVX2-FAST-NEXT: vbroadcastss 224(%rax), %ymm15
9759 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5],ymm15[6],ymm2[7]
9760 ; AVX2-FAST-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9761 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm0 = xmm0[3,3],xmm1[3,3]
9762 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} xmm1 = xmm6[2],xmm3[2],xmm6[3],xmm3[3]
9763 ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
9764 ; AVX2-FAST-NEXT: vpermps %ymm1, %ymm10, %ymm1
9765 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5,6],ymm1[7]
9766 ; AVX2-FAST-NEXT: vmovaps 224(%r8), %ymm6
9767 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm6[2,3],ymm0[4,5,6,7]
9768 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm1 = xmm4[2,2,2,2]
9769 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3],ymm0[4,5,6,7]
9770 ; AVX2-FAST-NEXT: vbroadcastss 232(%rax), %ymm1
9771 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4],ymm0[5,6,7]
9772 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9773 ; AVX2-FAST-NEXT: vmovaps 224(%rdi), %ymm2
9774 ; AVX2-FAST-NEXT: vmovaps 224(%rsi), %ymm1
9775 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm0 = ymm1[1,1,1,1,5,5,5,5]
9776 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2,3,4],ymm2[5],ymm0[6,7]
9777 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm15 = ymm0[2,2,2,2]
9778 ; AVX2-FAST-NEXT: vmovaps 224(%rdx), %ymm3
9779 ; AVX2-FAST-NEXT: vmovaps 224(%rcx), %ymm0
9780 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm14 = ymm3[1,1],ymm0[1,1],ymm3[5,5],ymm0[5,5]
9781 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm14 = ymm15[0,1,2,3,4],ymm14[5,6],ymm15[7]
9782 ; AVX2-FAST-NEXT: vbroadcastsd 240(%r8), %ymm15
9783 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm14 = ymm15[0],ymm14[1,2,3,4,5,6],ymm15[7]
9784 ; AVX2-FAST-NEXT: vbroadcastss 240(%r9), %xmm15
9785 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm14 = ymm14[0],ymm15[1],ymm14[2,3,4,5,6,7]
9786 ; AVX2-FAST-NEXT: vbroadcastss 240(%rax), %ymm15
9787 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm14 = ymm14[0,1],ymm15[2],ymm14[3,4,5,6,7]
9788 ; AVX2-FAST-NEXT: vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9789 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm14 = ymm8[2],ymm7[2],ymm8[3],ymm7[3],ymm8[6],ymm7[6],ymm8[7],ymm7[7]
9790 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm14 = ymm14[3,3,3,3]
9791 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm15 = ymm9[2],ymm13[2],ymm9[3],ymm13[3],ymm9[6],ymm13[6],ymm9[7],ymm13[7]
9792 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm15 = ymm15[3,3,3,3]
9793 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm14 = ymm15[0,1,2],ymm14[3,4,5,6,7]
9794 ; AVX2-FAST-NEXT: vbroadcastss 220(%r8), %ymm15
9795 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm14 = ymm14[0,1,2,3,4],ymm15[5],ymm14[6,7]
9796 ; AVX2-FAST-NEXT: vbroadcastss 220(%r9), %ymm15
9797 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm14 = ymm14[0,1,2,3,4,5],ymm15[6,7]
9798 ; AVX2-FAST-NEXT: vbroadcastsd 216(%rax), %ymm15
9799 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm7 = ymm15[0],ymm14[1,2,3,4,5,6],ymm15[7]
9800 ; AVX2-FAST-NEXT: vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9801 ; AVX2-FAST-NEXT: vbroadcastss 240(%rdx), %ymm14
9802 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm15 = ymm0[3,1,2,0,7,5,6,4]
9803 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm14 = ymm15[0,1,2,3,4,5],ymm14[6],ymm15[7]
9804 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} ymm15 = ymm2[0],ymm1[0],ymm2[1],ymm1[1],ymm2[4],ymm1[4],ymm2[5],ymm1[5]
9805 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm14 = ymm14[0,1,2,3],ymm15[4,5],ymm14[6,7]
9806 ; AVX2-FAST-NEXT: vbroadcastss 236(%r8), %ymm15
9807 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm14 = ymm14[0],ymm15[1],ymm14[2,3,4,5,6,7]
9808 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm4 = xmm4[2,2,3,3]
9809 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm4 = ymm14[0,1],ymm4[2,3],ymm14[4,5,6,7]
9810 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm14 = ymm2[2],ymm1[2],ymm2[3],ymm1[3],ymm2[6],ymm1[6],ymm2[7],ymm1[7]
9811 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm14 = ymm14[2,2,2,2]
9812 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm15 = ymm3[2],ymm0[2],ymm3[3],ymm0[3],ymm3[6],ymm0[6],ymm3[7],ymm0[7]
9813 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm14 = ymm14[0,1,2,3],ymm15[4,5,6,7]
9814 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm6 = ymm14[0,1,2,3,4,5],ymm6[6,7]
9815 ; AVX2-FAST-NEXT: vbroadcastsd {{.*#+}} ymm14 = [5,6,5,6,5,6,5,6]
9816 ; AVX2-FAST-NEXT: vpermps 224(%r9), %ymm14, %ymm14
9817 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm6 = ymm14[0],ymm6[1,2,3,4,5,6],ymm14[7]
9818 ; AVX2-FAST-NEXT: vmovaps 224(%rax), %ymm14
9819 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2],ymm14[3],ymm4[4,5,6,7]
9820 ; AVX2-FAST-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9821 ; AVX2-FAST-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm14[2,3],ymm15[2,3]
9822 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm4 = ymm6[0],ymm4[1],ymm6[2,3,4],ymm4[5],ymm6[6,7]
9823 ; AVX2-FAST-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9824 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm3[2],ymm0[3],ymm3[3],ymm0[6],ymm3[6],ymm0[7],ymm3[7]
9825 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm1 = ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[6],ymm2[6],ymm1[7],ymm2[7]
9826 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,3,3,3]
9827 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[3,3,3,3]
9828 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
9829 ; AVX2-FAST-NEXT: vbroadcastss 252(%r8), %ymm1
9830 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
9831 ; AVX2-FAST-NEXT: vbroadcastss 252(%r9), %ymm1
9832 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
9833 ; AVX2-FAST-NEXT: vbroadcastsd 248(%rax), %ymm1
9834 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5,6],ymm1[7]
9835 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9836 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
9837 ; AVX2-FAST-NEXT: vbroadcastss %xmm5, %xmm0
9838 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
9839 ; AVX2-FAST-NEXT: vbroadcastss %xmm4, %xmm1
9840 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
9841 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
9842 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
9843 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} xmm1 = xmm8[0],xmm7[0],xmm8[1],xmm7[1]
9844 ; AVX2-FAST-NEXT: vpermps %ymm1, %ymm10, %ymm1
9845 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5,6,7]
9846 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
9847 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
9848 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} xmm1 = xmm3[0],xmm6[0],xmm3[1],xmm6[1]
9849 ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
9850 ; AVX2-FAST-NEXT: vbroadcastsd {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 16-byte Folded Reload
9851 ; AVX2-FAST-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[2],ymm2[2]
9852 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6],ymm0[7]
9853 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9854 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm0 = xmm8[3,3],xmm7[3,3]
9855 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} xmm1 = xmm4[2],xmm5[2],xmm4[3],xmm5[3]
9856 ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
9857 ; AVX2-FAST-NEXT: vpermps %ymm1, %ymm10, %ymm1
9858 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5,6],ymm1[7]
9859 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm1 = xmm6[2,2,2,2]
9860 ; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm1 = xmm3[0,1,2],xmm1[3]
9861 ; AVX2-FAST-NEXT: vbroadcastsd 8(%rax), %ymm2
9862 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
9863 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4],ymm0[5,6,7]
9864 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9865 ; AVX2-FAST-NEXT: vpermilps $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
9866 ; AVX2-FAST-NEXT: # ymm0 = mem[1,1,1,1,5,5,5,5]
9867 ; AVX2-FAST-NEXT: vblendps $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
9868 ; AVX2-FAST-NEXT: # ymm0 = ymm0[0],mem[1],ymm0[2,3,4],mem[5],ymm0[6,7]
9869 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
9870 ; AVX2-FAST-NEXT: vshufps $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm1 # 32-byte Folded Reload
9871 ; AVX2-FAST-NEXT: # ymm1 = ymm11[1,1],mem[1,1],ymm11[5,5],mem[5,5]
9872 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6],ymm0[7]
9873 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm1 = ymm12[0,0,0,0,4,4,4,4]
9874 ; AVX2-FAST-NEXT: vpermilps $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
9875 ; AVX2-FAST-NEXT: # ymm2 = mem[0,1,0,1,4,5,4,5]
9876 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2,3,4],ymm1[5],ymm2[6,7]
9877 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
9878 ; AVX2-FAST-NEXT: vbroadcastsd 16(%rax), %ymm2
9879 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3],ymm1[4,5,6,7]
9880 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6],ymm1[7]
9881 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9882 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
9883 ; AVX2-FAST-NEXT: vbroadcastss %xmm4, %xmm0
9884 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
9885 ; AVX2-FAST-NEXT: vbroadcastss %xmm8, %xmm1
9886 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
9887 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
9888 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
9889 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} xmm1 = xmm5[0],xmm6[0],xmm5[1],xmm6[1]
9890 ; AVX2-FAST-NEXT: vpermps %ymm1, %ymm10, %ymm1
9891 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5,6,7]
9892 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
9893 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
9894 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} xmm1 = xmm3[0],xmm7[0],xmm3[1],xmm7[1]
9895 ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
9896 ; AVX2-FAST-NEXT: vbroadcastsd {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 16-byte Folded Reload
9897 ; AVX2-FAST-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[2],ymm2[2]
9898 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6],ymm0[7]
9899 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9900 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm0 = xmm5[3,3],xmm6[3,3]
9901 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} xmm1 = xmm8[2],xmm4[2],xmm8[3],xmm4[3]
9902 ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
9903 ; AVX2-FAST-NEXT: vpermps %ymm1, %ymm10, %ymm1
9904 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5,6],ymm1[7]
9905 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm1 = xmm7[2,2,2,2]
9906 ; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm1 = xmm3[0,1,2],xmm1[3]
9907 ; AVX2-FAST-NEXT: vbroadcastsd 40(%rax), %ymm2
9908 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
9909 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4],ymm0[5,6,7]
9910 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9911 ; AVX2-FAST-NEXT: vpermilps $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
9912 ; AVX2-FAST-NEXT: # ymm0 = mem[1,1,1,1,5,5,5,5]
9913 ; AVX2-FAST-NEXT: vblendps $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
9914 ; AVX2-FAST-NEXT: # ymm0 = ymm0[0],mem[1],ymm0[2,3,4],mem[5],ymm0[6,7]
9915 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
9916 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
9917 ; AVX2-FAST-NEXT: vshufps $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
9918 ; AVX2-FAST-NEXT: # ymm1 = ymm1[1,1],mem[1,1],ymm1[5,5],mem[5,5]
9919 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6],ymm0[7]
9920 ; AVX2-FAST-NEXT: vpermilps $0, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
9921 ; AVX2-FAST-NEXT: # ymm1 = mem[0,0,0,0,4,4,4,4]
9922 ; AVX2-FAST-NEXT: vpermilps $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
9923 ; AVX2-FAST-NEXT: # ymm2 = mem[0,1,0,1,4,5,4,5]
9924 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2,3,4],ymm1[5],ymm2[6,7]
9925 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
9926 ; AVX2-FAST-NEXT: vbroadcastsd 48(%rax), %ymm2
9927 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3],ymm1[4,5,6,7]
9928 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6],ymm1[7]
9929 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9930 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
9931 ; AVX2-FAST-NEXT: vbroadcastss %xmm6, %xmm0
9932 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
9933 ; AVX2-FAST-NEXT: vbroadcastss %xmm7, %xmm1
9934 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
9935 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
9936 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
9937 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} xmm1 = xmm5[0],xmm4[0],xmm5[1],xmm4[1]
9938 ; AVX2-FAST-NEXT: vpermps %ymm1, %ymm10, %ymm1
9939 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5,6,7]
9940 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
9941 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
9942 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} xmm1 = xmm8[0],xmm3[0],xmm8[1],xmm3[1]
9943 ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
9944 ; AVX2-FAST-NEXT: vbroadcastsd {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 16-byte Folded Reload
9945 ; AVX2-FAST-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[2],ymm2[2]
9946 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6],ymm0[7]
9947 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9948 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm0 = xmm5[3,3],xmm4[3,3]
9949 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} xmm1 = xmm7[2],xmm6[2],xmm7[3],xmm6[3]
9950 ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
9951 ; AVX2-FAST-NEXT: vpermps %ymm1, %ymm10, %ymm1
9952 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5,6],ymm1[7]
9953 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm1 = xmm3[2,2,2,2]
9954 ; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm1 = xmm8[0,1,2],xmm1[3]
9955 ; AVX2-FAST-NEXT: vbroadcastsd 72(%rax), %ymm2
9956 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
9957 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4],ymm0[5,6,7]
9958 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9959 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
9960 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm0 = ymm15[1,1,1,1,5,5,5,5]
9961 ; AVX2-FAST-NEXT: vblendps $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
9962 ; AVX2-FAST-NEXT: # ymm0 = ymm0[0],mem[1],ymm0[2,3,4],mem[5],ymm0[6,7]
9963 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
9964 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
9965 ; AVX2-FAST-NEXT: vshufps $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
9966 ; AVX2-FAST-NEXT: # ymm1 = ymm1[1,1],mem[1,1],ymm1[5,5],mem[5,5]
9967 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6],ymm0[7]
9968 ; AVX2-FAST-NEXT: vpermilps $0, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
9969 ; AVX2-FAST-NEXT: # ymm1 = mem[0,0,0,0,4,4,4,4]
9970 ; AVX2-FAST-NEXT: vpermilps $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
9971 ; AVX2-FAST-NEXT: # ymm2 = mem[0,1,0,1,4,5,4,5]
9972 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2,3,4],ymm1[5],ymm2[6,7]
9973 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
9974 ; AVX2-FAST-NEXT: vbroadcastsd 80(%rax), %ymm2
9975 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3],ymm1[4,5,6,7]
9976 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6],ymm1[7]
9977 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9978 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
9979 ; AVX2-FAST-NEXT: vbroadcastss %xmm5, %xmm0
9980 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
9981 ; AVX2-FAST-NEXT: vbroadcastss %xmm6, %xmm1
9982 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
9983 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
9984 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
9985 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} xmm1 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
9986 ; AVX2-FAST-NEXT: vpermps %ymm1, %ymm10, %ymm1
9987 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5,6,7]
9988 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
9989 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
9990 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} xmm1 = xmm8[0],xmm7[0],xmm8[1],xmm7[1]
9991 ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
9992 ; AVX2-FAST-NEXT: vbroadcastsd {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 16-byte Folded Reload
9993 ; AVX2-FAST-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[2],ymm2[2]
9994 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6],ymm0[7]
9995 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9996 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm0 = xmm4[3,3],xmm3[3,3]
9997 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} xmm1 = xmm6[2],xmm5[2],xmm6[3],xmm5[3]
9998 ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
9999 ; AVX2-FAST-NEXT: vpermps %ymm1, %ymm10, %ymm1
10000 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5,6],ymm1[7]
10001 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm1 = xmm7[2,2,2,2]
10002 ; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm1 = xmm8[0,1,2],xmm1[3]
10003 ; AVX2-FAST-NEXT: vbroadcastsd 104(%rax), %ymm2
10004 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
10005 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4],ymm0[5,6,7]
10006 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10007 ; AVX2-FAST-NEXT: vpermilps $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
10008 ; AVX2-FAST-NEXT: # ymm0 = mem[1,1,1,1,5,5,5,5]
10009 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
10010 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm12[1],ymm0[2,3,4],ymm12[5],ymm0[6,7]
10011 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
10012 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
10013 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
10014 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm1 = ymm1[1,1],ymm11[1,1],ymm1[5,5],ymm11[5,5]
10015 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6],ymm0[7]
10016 ; AVX2-FAST-NEXT: vpermilps $0, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
10017 ; AVX2-FAST-NEXT: # ymm1 = mem[0,0,0,0,4,4,4,4]
10018 ; AVX2-FAST-NEXT: vpermilps $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
10019 ; AVX2-FAST-NEXT: # ymm2 = mem[0,1,0,1,4,5,4,5]
10020 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2,3,4],ymm1[5],ymm2[6,7]
10021 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
10022 ; AVX2-FAST-NEXT: vbroadcastsd 112(%rax), %ymm2
10023 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3],ymm1[4,5,6,7]
10024 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6],ymm1[7]
10025 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10026 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
10027 ; AVX2-FAST-NEXT: vbroadcastss %xmm5, %xmm0
10028 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
10029 ; AVX2-FAST-NEXT: vbroadcastss %xmm6, %xmm1
10030 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
10031 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
10032 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
10033 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} xmm1 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
10034 ; AVX2-FAST-NEXT: vpermps %ymm1, %ymm10, %ymm1
10035 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5,6,7]
10036 ; AVX2-FAST-NEXT: vmovaps (%rsp), %xmm8 # 16-byte Reload
10037 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
10038 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} xmm1 = xmm8[0],xmm7[0],xmm8[1],xmm7[1]
10039 ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
10040 ; AVX2-FAST-NEXT: vbroadcastsd {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 16-byte Folded Reload
10041 ; AVX2-FAST-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[2],ymm2[2]
10042 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6],ymm0[7]
10043 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10044 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm0 = xmm4[3,3],xmm3[3,3]
10045 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} xmm1 = xmm6[2],xmm5[2],xmm6[3],xmm5[3]
10046 ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
10047 ; AVX2-FAST-NEXT: vpermps %ymm1, %ymm10, %ymm1
10048 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5,6],ymm1[7]
10049 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm1 = xmm7[2,2,2,2]
10050 ; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm1 = xmm8[0,1,2],xmm1[3]
10051 ; AVX2-FAST-NEXT: vbroadcastsd 136(%rax), %ymm2
10052 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
10053 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4],ymm0[5,6,7]
10054 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10055 ; AVX2-FAST-NEXT: vpermilps $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
10056 ; AVX2-FAST-NEXT: # ymm0 = mem[1,1,1,1,5,5,5,5]
10057 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
10058 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm9[1],ymm0[2,3,4],ymm9[5],ymm0[6,7]
10059 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
10060 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
10061 ; AVX2-FAST-NEXT: vshufps $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
10062 ; AVX2-FAST-NEXT: # ymm1 = ymm1[1,1],mem[1,1],ymm1[5,5],mem[5,5]
10063 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6],ymm0[7]
10064 ; AVX2-FAST-NEXT: vpermilps $0, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
10065 ; AVX2-FAST-NEXT: # ymm1 = mem[0,0,0,0,4,4,4,4]
10066 ; AVX2-FAST-NEXT: vpermilps $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
10067 ; AVX2-FAST-NEXT: # ymm2 = mem[0,1,0,1,4,5,4,5]
10068 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2,3,4],ymm1[5],ymm2[6,7]
10069 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
10070 ; AVX2-FAST-NEXT: vbroadcastsd 144(%rax), %ymm2
10071 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3],ymm1[4,5,6,7]
10072 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6],ymm1[7]
10073 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10074 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
10075 ; AVX2-FAST-NEXT: vbroadcastss %xmm5, %xmm0
10076 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
10077 ; AVX2-FAST-NEXT: vbroadcastss %xmm6, %xmm1
10078 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
10079 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
10080 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
10081 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} xmm1 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
10082 ; AVX2-FAST-NEXT: vpermps %ymm1, %ymm10, %ymm1
10083 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5,6,7]
10084 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
10085 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
10086 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} xmm1 = xmm8[0],xmm7[0],xmm8[1],xmm7[1]
10087 ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
10088 ; AVX2-FAST-NEXT: vbroadcastsd {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 16-byte Folded Reload
10089 ; AVX2-FAST-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[2],ymm2[2]
10090 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6],ymm0[7]
10091 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10092 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm0 = xmm4[3,3],xmm3[3,3]
10093 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} xmm1 = xmm6[2],xmm5[2],xmm6[3],xmm5[3]
10094 ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
10095 ; AVX2-FAST-NEXT: vpermps %ymm1, %ymm10, %ymm1
10096 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5,6],ymm1[7]
10097 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm1 = xmm7[2,2,2,2]
10098 ; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm1 = xmm8[0,1,2],xmm1[3]
10099 ; AVX2-FAST-NEXT: vbroadcastsd 168(%rax), %ymm2
10100 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
10101 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4],ymm0[5,6,7]
10102 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10103 ; AVX2-FAST-NEXT: vpermilps $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
10104 ; AVX2-FAST-NEXT: # ymm0 = mem[1,1,1,1,5,5,5,5]
10105 ; AVX2-FAST-NEXT: vblendps $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
10106 ; AVX2-FAST-NEXT: # ymm0 = ymm0[0],mem[1],ymm0[2,3,4],mem[5],ymm0[6,7]
10107 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
10108 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
10109 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
10110 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm1 = ymm1[1,1],ymm14[1,1],ymm1[5,5],ymm14[5,5]
10111 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6],ymm0[7]
10112 ; AVX2-FAST-NEXT: vpermilps $0, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
10113 ; AVX2-FAST-NEXT: # ymm1 = mem[0,0,0,0,4,4,4,4]
10114 ; AVX2-FAST-NEXT: vpermilps $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
10115 ; AVX2-FAST-NEXT: # ymm2 = mem[0,1,0,1,4,5,4,5]
10116 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2,3,4],ymm1[5],ymm2[6,7]
10117 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
10118 ; AVX2-FAST-NEXT: vbroadcastsd 176(%rax), %ymm2
10119 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3],ymm1[4,5,6,7]
10120 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6],ymm1[7]
10121 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10122 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
10123 ; AVX2-FAST-NEXT: vbroadcastss %xmm5, %xmm0
10124 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
10125 ; AVX2-FAST-NEXT: vbroadcastss %xmm4, %xmm1
10126 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} xmm2 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
10127 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
10128 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
10129 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} xmm1 = xmm0[0],xmm7[0],xmm0[1],xmm7[1]
10130 ; AVX2-FAST-NEXT: vpermps %ymm1, %ymm10, %ymm1
10131 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm8 = ymm1[0,1],ymm2[2,3],ymm1[4,5,6,7]
10132 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
10133 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
10134 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} xmm1 = xmm3[0],xmm6[0],xmm3[1],xmm6[1]
10135 ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
10136 ; AVX2-FAST-NEXT: vbroadcastsd {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 16-byte Folded Reload
10137 ; AVX2-FAST-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[2],ymm2[2]
10138 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm8[0,1,2,3],ymm1[4,5,6],ymm8[7]
10139 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10140 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm0 = xmm0[3,3],xmm7[3,3]
10141 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} xmm1 = xmm4[2],xmm5[2],xmm4[3],xmm5[3]
10142 ; AVX2-FAST-NEXT: vpermps %ymm1, %ymm10, %ymm1
10143 ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
10144 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5,6],ymm1[7]
10145 ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm1 = xmm6[2,2,2,2]
10146 ; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm1 = xmm3[0,1,2],xmm1[3]
10147 ; AVX2-FAST-NEXT: vbroadcastsd 200(%rax), %ymm2
10148 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
10149 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4],ymm0[5,6,7]
10150 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10151 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
10152 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10153 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm0 = ymm0[1,1],ymm13[1,1],ymm0[5,5],ymm13[5,5]
10154 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
10155 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm1 = ymm7[1,1,1,1,5,5,5,5]
10156 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
10157 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm6[1],ymm1[2,3,4],ymm6[5],ymm1[6,7]
10158 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,2,2,2]
10159 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5,6],ymm1[7]
10160 ; AVX2-FAST-NEXT: vpermilps $0, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
10161 ; AVX2-FAST-NEXT: # ymm1 = mem[0,0,0,0,4,4,4,4]
10162 ; AVX2-FAST-NEXT: vpermilps $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
10163 ; AVX2-FAST-NEXT: # ymm2 = mem[0,1,0,1,4,5,4,5]
10164 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2,3,4],ymm1[5],ymm2[6,7]
10165 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
10166 ; AVX2-FAST-NEXT: vbroadcastsd 208(%rax), %ymm2
10167 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3],ymm1[4,5,6,7]
10168 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6],ymm1[7]
10169 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10170 ; AVX2-FAST-NEXT: vbroadcastss 16(%rdx), %ymm0
10171 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
10172 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm1 = ymm5[3,1,2,0,7,5,6,4]
10173 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6],ymm1[7]
10174 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
10175 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
10176 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} ymm1 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[4],ymm3[4],ymm2[5],ymm3[5]
10177 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
10178 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
10179 ; AVX2-FAST-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
10180 ; AVX2-FAST-NEXT: # xmm1 = xmm1[3,3],mem[3,3]
10181 ; AVX2-FAST-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
10182 ; AVX2-FAST-NEXT: # xmm1 = xmm1[0,1,2],mem[3]
10183 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4,5,6,7]
10184 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10185 ; AVX2-FAST-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm0 # 32-byte Folded Reload
10186 ; AVX2-FAST-NEXT: # ymm0 = ymm5[2],mem[2],ymm5[3],mem[3],ymm5[6],mem[6],ymm5[7],mem[7]
10187 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm1 = ymm3[2],ymm2[2],ymm3[3],ymm2[3],ymm3[6],ymm2[6],ymm3[7],ymm2[7]
10188 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,3,3,3]
10189 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[3,3,3,3]
10190 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
10191 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
10192 ; AVX2-FAST-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
10193 ; AVX2-FAST-NEXT: # ymm1 = ymm1[3,3],mem[3,3],ymm1[7,7],mem[7,7]
10194 ; AVX2-FAST-NEXT: vpermilps {{.*#+}} ymm5 = mem[2,3,2,3,6,7,6,7]
10195 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm5[0],ymm1[1,2],ymm5[3,4],ymm1[5,6],ymm5[7]
10196 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
10197 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4],ymm1[5,6,7]
10198 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10199 ; AVX2-FAST-NEXT: vbroadcastss 48(%rdx), %ymm0
10200 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
10201 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm1 = ymm5[3,1,2,0,7,5,6,4]
10202 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6],ymm1[7]
10203 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
10204 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
10205 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} ymm1 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[4],ymm3[4],ymm2[5],ymm3[5]
10206 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
10207 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
10208 ; AVX2-FAST-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
10209 ; AVX2-FAST-NEXT: # xmm1 = xmm1[3,3],mem[3,3]
10210 ; AVX2-FAST-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
10211 ; AVX2-FAST-NEXT: # xmm1 = xmm1[0,1,2],mem[3]
10212 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4,5,6,7]
10213 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10214 ; AVX2-FAST-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm0 # 32-byte Folded Reload
10215 ; AVX2-FAST-NEXT: # ymm0 = ymm5[2],mem[2],ymm5[3],mem[3],ymm5[6],mem[6],ymm5[7],mem[7]
10216 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm1 = ymm3[2],ymm2[2],ymm3[3],ymm2[3],ymm3[6],ymm2[6],ymm3[7],ymm2[7]
10217 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,3,3,3]
10218 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[3,3,3,3]
10219 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
10220 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
10221 ; AVX2-FAST-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
10222 ; AVX2-FAST-NEXT: # ymm1 = ymm1[3,3],mem[3,3],ymm1[7,7],mem[7,7]
10223 ; AVX2-FAST-NEXT: vpermilps {{.*#+}} ymm5 = mem[2,3,2,3,6,7,6,7]
10224 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm5[0],ymm1[1,2],ymm5[3,4],ymm1[5,6],ymm5[7]
10225 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
10226 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4],ymm1[5,6,7]
10227 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10228 ; AVX2-FAST-NEXT: vbroadcastss 80(%rdx), %ymm0
10229 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
10230 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm1 = ymm5[3,1,2,0,7,5,6,4]
10231 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6],ymm1[7]
10232 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
10233 ; AVX2-FAST-NEXT: vmovaps %ymm15, %ymm3
10234 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} ymm1 = ymm2[0],ymm15[0],ymm2[1],ymm15[1],ymm2[4],ymm15[4],ymm2[5],ymm15[5]
10235 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
10236 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
10237 ; AVX2-FAST-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
10238 ; AVX2-FAST-NEXT: # xmm1 = xmm1[3,3],mem[3,3]
10239 ; AVX2-FAST-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
10240 ; AVX2-FAST-NEXT: # xmm1 = xmm1[0,1,2],mem[3]
10241 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm15 = ymm0[0],ymm1[1,2,3],ymm0[4,5,6,7]
10242 ; AVX2-FAST-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm0 # 32-byte Folded Reload
10243 ; AVX2-FAST-NEXT: # ymm0 = ymm5[2],mem[2],ymm5[3],mem[3],ymm5[6],mem[6],ymm5[7],mem[7]
10244 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm1 = ymm3[2],ymm2[2],ymm3[3],ymm2[3],ymm3[6],ymm2[6],ymm3[7],ymm2[7]
10245 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,3,3,3]
10246 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[3,3,3,3]
10247 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
10248 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
10249 ; AVX2-FAST-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
10250 ; AVX2-FAST-NEXT: # ymm1 = ymm1[3,3],mem[3,3],ymm1[7,7],mem[7,7]
10251 ; AVX2-FAST-NEXT: vpermilps {{.*#+}} ymm5 = mem[2,3,2,3,6,7,6,7]
10252 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm5[0],ymm1[1,2],ymm5[3,4],ymm1[5,6],ymm5[7]
10253 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
10254 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm10 = ymm1[0],ymm0[1,2,3,4],ymm1[5,6,7]
10255 ; AVX2-FAST-NEXT: vbroadcastss 112(%rdx), %ymm0
10256 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm5 = ymm11[3,1,2,0,7,5,6,4]
10257 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm5[0,1,2,3,4,5],ymm0[6],ymm5[7]
10258 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
10259 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} ymm5 = ymm12[0],ymm2[0],ymm12[1],ymm2[1],ymm12[4],ymm2[4],ymm12[5],ymm2[5]
10260 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm5[4,5],ymm0[6,7]
10261 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
10262 ; AVX2-FAST-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm5 # 16-byte Folded Reload
10263 ; AVX2-FAST-NEXT: # xmm5 = xmm4[3,3],mem[3,3]
10264 ; AVX2-FAST-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5, %xmm5 # 16-byte Folded Reload
10265 ; AVX2-FAST-NEXT: # xmm5 = xmm5[0,1,2],mem[3]
10266 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm4 = ymm0[0],ymm5[1,2,3],ymm0[4,5,6,7]
10267 ; AVX2-FAST-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm5 # 32-byte Folded Reload
10268 ; AVX2-FAST-NEXT: # ymm5 = ymm11[2],mem[2],ymm11[3],mem[3],ymm11[6],mem[6],ymm11[7],mem[7]
10269 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm11 = ymm2[2],ymm12[2],ymm2[3],ymm12[3],ymm2[6],ymm12[6],ymm2[7],ymm12[7]
10270 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[3,3,3,3]
10271 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm11 = ymm11[3,3,3,3]
10272 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm5 = ymm11[0,1,2],ymm5[3,4,5,6,7]
10273 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10274 ; AVX2-FAST-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm11 # 32-byte Folded Reload
10275 ; AVX2-FAST-NEXT: # ymm11 = ymm0[3,3],mem[3,3],ymm0[7,7],mem[7,7]
10276 ; AVX2-FAST-NEXT: vpermilps {{.*#+}} ymm12 = mem[2,3,2,3,6,7,6,7]
10277 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm11 = ymm12[0],ymm11[1,2],ymm12[3,4],ymm11[5,6],ymm12[7]
10278 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm11 = ymm11[2,1,2,3]
10279 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm11 = ymm11[0],ymm5[1,2,3,4],ymm11[5,6,7]
10280 ; AVX2-FAST-NEXT: vbroadcastss 144(%rdx), %ymm5
10281 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
10282 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm12 = ymm2[3,1,2,0,7,5,6,4]
10283 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm5 = ymm12[0,1,2,3,4,5],ymm5[6],ymm12[7]
10284 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
10285 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} ymm12 = ymm9[0],ymm1[0],ymm9[1],ymm1[1],ymm9[4],ymm1[4],ymm9[5],ymm1[5]
10286 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm12[4,5],ymm5[6,7]
10287 ; AVX2-FAST-NEXT: vmovaps (%rsp), %xmm3 # 16-byte Reload
10288 ; AVX2-FAST-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm12 # 16-byte Folded Reload
10289 ; AVX2-FAST-NEXT: # xmm12 = xmm3[3,3],mem[3,3]
10290 ; AVX2-FAST-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm12, %xmm12 # 16-byte Folded Reload
10291 ; AVX2-FAST-NEXT: # xmm12 = xmm12[0,1,2],mem[3]
10292 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm12 = ymm5[0],ymm12[1,2,3],ymm5[4,5,6,7]
10293 ; AVX2-FAST-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm5 # 32-byte Folded Reload
10294 ; AVX2-FAST-NEXT: # ymm5 = ymm2[2],mem[2],ymm2[3],mem[3],ymm2[6],mem[6],ymm2[7],mem[7]
10295 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm0 = ymm1[2],ymm9[2],ymm1[3],ymm9[3],ymm1[6],ymm9[6],ymm1[7],ymm9[7]
10296 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[3,3,3,3]
10297 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,3,3,3]
10298 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm5[3,4,5,6,7]
10299 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
10300 ; AVX2-FAST-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm5 # 32-byte Folded Reload
10301 ; AVX2-FAST-NEXT: # ymm5 = ymm1[3,3],mem[3,3],ymm1[7,7],mem[7,7]
10302 ; AVX2-FAST-NEXT: vpermilps {{.*#+}} ymm9 = mem[2,3,2,3,6,7,6,7]
10303 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm5 = ymm9[0],ymm5[1,2],ymm9[3,4],ymm5[5,6],ymm9[7]
10304 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[2,1,2,3]
10305 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm3 = ymm5[0],ymm0[1,2,3,4],ymm5[5,6,7]
10306 ; AVX2-FAST-NEXT: vbroadcastss 176(%rdx), %ymm5
10307 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm9 = ymm14[3,1,2,0,7,5,6,4]
10308 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm5 = ymm9[0,1,2,3,4,5],ymm5[6],ymm9[7]
10309 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
10310 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10311 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} ymm9 = ymm2[0],ymm0[0],ymm2[1],ymm0[1],ymm2[4],ymm0[4],ymm2[5],ymm0[5]
10312 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm9[4,5],ymm5[6,7]
10313 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
10314 ; AVX2-FAST-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8, %xmm9 # 16-byte Folded Reload
10315 ; AVX2-FAST-NEXT: # xmm9 = xmm8[3,3],mem[3,3]
10316 ; AVX2-FAST-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9, %xmm9 # 16-byte Folded Reload
10317 ; AVX2-FAST-NEXT: # xmm9 = xmm9[0,1,2],mem[3]
10318 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0],ymm9[1,2,3],ymm5[4,5,6,7]
10319 ; AVX2-FAST-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm9 # 32-byte Folded Reload
10320 ; AVX2-FAST-NEXT: # ymm9 = ymm14[2],mem[2],ymm14[3],mem[3],ymm14[6],mem[6],ymm14[7],mem[7]
10321 ; AVX2-FAST-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm2[2],ymm0[3],ymm2[3],ymm0[6],ymm2[6],ymm0[7],ymm2[7]
10322 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm9 = ymm9[3,3,3,3]
10323 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,3,3,3]
10324 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm9[3,4,5,6,7]
10325 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
10326 ; AVX2-FAST-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm9 # 32-byte Folded Reload
10327 ; AVX2-FAST-NEXT: # ymm9 = ymm1[3,3],mem[3,3],ymm1[7,7],mem[7,7]
10328 ; AVX2-FAST-NEXT: vpermilps {{.*#+}} ymm8 = mem[2,3,2,3,6,7,6,7]
10329 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0],ymm9[1,2],ymm8[3,4],ymm9[5,6],ymm8[7]
10330 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm8 = ymm8[2,1,2,3]
10331 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm8[0],ymm0[1,2,3,4],ymm8[5,6,7]
10332 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} ymm8 = ymm6[0],ymm7[0],ymm6[1],ymm7[1],ymm6[4],ymm7[4],ymm6[5],ymm7[5]
10333 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm9 = ymm13[3,1,2,0,7,5,6,4]
10334 ; AVX2-FAST-NEXT: vbroadcastss 208(%rdx), %ymm7
10335 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm7 = ymm9[0,1,2,3,4,5],ymm7[6],ymm9[7]
10336 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm8[4,5],ymm7[6,7]
10337 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
10338 ; AVX2-FAST-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm8 # 16-byte Folded Reload
10339 ; AVX2-FAST-NEXT: # xmm8 = xmm1[3,3],mem[3,3]
10340 ; AVX2-FAST-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8, %xmm8 # 16-byte Folded Reload
10341 ; AVX2-FAST-NEXT: # xmm8 = xmm8[0,1,2],mem[3]
10342 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0],ymm8[1,2,3],ymm7[4,5,6,7]
10343 ; AVX2-FAST-NEXT: movq {{[0-9]+}}(%rsp), %rax
10344 ; AVX2-FAST-NEXT: vmovaps %ymm7, 1440(%rax)
10345 ; AVX2-FAST-NEXT: vmovaps %ymm0, 1312(%rax)
10346 ; AVX2-FAST-NEXT: vmovaps %ymm5, 1216(%rax)
10347 ; AVX2-FAST-NEXT: vmovaps %ymm3, 1088(%rax)
10348 ; AVX2-FAST-NEXT: vmovaps %ymm12, 992(%rax)
10349 ; AVX2-FAST-NEXT: vmovaps %ymm11, 864(%rax)
10350 ; AVX2-FAST-NEXT: vmovaps %ymm4, 768(%rax)
10351 ; AVX2-FAST-NEXT: vmovaps %ymm10, 640(%rax)
10352 ; AVX2-FAST-NEXT: vmovaps %ymm15, 544(%rax)
10353 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10354 ; AVX2-FAST-NEXT: vmovaps %ymm0, 416(%rax)
10355 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10356 ; AVX2-FAST-NEXT: vmovaps %ymm0, 320(%rax)
10357 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10358 ; AVX2-FAST-NEXT: vmovaps %ymm0, 192(%rax)
10359 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10360 ; AVX2-FAST-NEXT: vmovaps %ymm0, 96(%rax)
10361 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10362 ; AVX2-FAST-NEXT: vmovaps %ymm0, 1504(%rax)
10363 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10364 ; AVX2-FAST-NEXT: vmovaps %ymm0, 1472(%rax)
10365 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10366 ; AVX2-FAST-NEXT: vmovaps %ymm0, 1408(%rax)
10367 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10368 ; AVX2-FAST-NEXT: vmovaps %ymm0, 1344(%rax)
10369 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10370 ; AVX2-FAST-NEXT: vmovaps %ymm0, 1280(%rax)
10371 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10372 ; AVX2-FAST-NEXT: vmovaps %ymm0, 1248(%rax)
10373 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10374 ; AVX2-FAST-NEXT: vmovaps %ymm0, 1184(%rax)
10375 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10376 ; AVX2-FAST-NEXT: vmovaps %ymm0, 1120(%rax)
10377 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10378 ; AVX2-FAST-NEXT: vmovaps %ymm0, 1056(%rax)
10379 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10380 ; AVX2-FAST-NEXT: vmovaps %ymm0, 1024(%rax)
10381 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10382 ; AVX2-FAST-NEXT: vmovaps %ymm0, 960(%rax)
10383 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10384 ; AVX2-FAST-NEXT: vmovaps %ymm0, 896(%rax)
10385 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10386 ; AVX2-FAST-NEXT: vmovaps %ymm0, 832(%rax)
10387 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10388 ; AVX2-FAST-NEXT: vmovaps %ymm0, 800(%rax)
10389 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10390 ; AVX2-FAST-NEXT: vmovaps %ymm0, 736(%rax)
10391 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10392 ; AVX2-FAST-NEXT: vmovaps %ymm0, 672(%rax)
10393 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10394 ; AVX2-FAST-NEXT: vmovaps %ymm0, 608(%rax)
10395 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10396 ; AVX2-FAST-NEXT: vmovaps %ymm0, 576(%rax)
10397 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10398 ; AVX2-FAST-NEXT: vmovaps %ymm0, 512(%rax)
10399 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10400 ; AVX2-FAST-NEXT: vmovaps %ymm0, 448(%rax)
10401 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10402 ; AVX2-FAST-NEXT: vmovaps %ymm0, 384(%rax)
10403 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10404 ; AVX2-FAST-NEXT: vmovaps %ymm0, 352(%rax)
10405 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10406 ; AVX2-FAST-NEXT: vmovaps %ymm0, 288(%rax)
10407 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10408 ; AVX2-FAST-NEXT: vmovaps %ymm0, 224(%rax)
10409 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10410 ; AVX2-FAST-NEXT: vmovaps %ymm0, 160(%rax)
10411 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10412 ; AVX2-FAST-NEXT: vmovaps %ymm0, 128(%rax)
10413 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10414 ; AVX2-FAST-NEXT: vmovaps %ymm0, 64(%rax)
10415 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10416 ; AVX2-FAST-NEXT: vmovaps %ymm0, (%rax)
10417 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10418 ; AVX2-FAST-NEXT: vmovaps %ymm0, 1760(%rax)
10419 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10420 ; AVX2-FAST-NEXT: vmovaps %ymm0, 1728(%rax)
10421 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10422 ; AVX2-FAST-NEXT: vmovaps %ymm0, 1664(%rax)
10423 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10424 ; AVX2-FAST-NEXT: vmovaps %ymm0, 1536(%rax)
10425 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10426 ; AVX2-FAST-NEXT: vmovaps %ymm0, 1376(%rax)
10427 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10428 ; AVX2-FAST-NEXT: vmovaps %ymm0, 1152(%rax)
10429 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10430 ; AVX2-FAST-NEXT: vmovaps %ymm0, 928(%rax)
10431 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10432 ; AVX2-FAST-NEXT: vmovaps %ymm0, 704(%rax)
10433 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10434 ; AVX2-FAST-NEXT: vmovaps %ymm0, 480(%rax)
10435 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10436 ; AVX2-FAST-NEXT: vmovaps %ymm0, 256(%rax)
10437 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10438 ; AVX2-FAST-NEXT: vmovaps %ymm0, 32(%rax)
10439 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10440 ; AVX2-FAST-NEXT: vmovaps %ymm0, 1696(%rax)
10441 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10442 ; AVX2-FAST-NEXT: vmovaps %ymm0, 1632(%rax)
10443 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10444 ; AVX2-FAST-NEXT: vmovaps %ymm0, 1568(%rax)
10445 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10446 ; AVX2-FAST-NEXT: vmovaps %ymm0, 1600(%rax)
10447 ; AVX2-FAST-NEXT: addq $3096, %rsp # imm = 0xC18
10448 ; AVX2-FAST-NEXT: vzeroupper
10449 ; AVX2-FAST-NEXT: retq
10451 ; AVX2-FAST-PERLANE-LABEL: store_i32_stride7_vf64:
10452 ; AVX2-FAST-PERLANE: # %bb.0:
10453 ; AVX2-FAST-PERLANE-NEXT: subq $3000, %rsp # imm = 0xBB8
10454 ; AVX2-FAST-PERLANE-NEXT: movq {{[0-9]+}}(%rsp), %rax
10455 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rax), %xmm0
10456 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10457 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
10458 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%r8), %xmm2
10459 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10460 ; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%r8), %xmm4
10461 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10462 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%r9), %xmm1
10463 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10464 ; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%r9), %xmm7
10465 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10466 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,1,1,1]
10467 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2,3]
10468 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd %xmm1, %ymm1
10469 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5],ymm1[6,7]
10470 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rcx), %xmm9
10471 ; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%rcx), %xmm3
10472 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10473 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rdx), %xmm8
10474 ; AVX2-FAST-PERLANE-NEXT: vinsertps {{.*#+}} xmm1 = zero,xmm8[1],xmm9[1],zero
10475 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rdi), %xmm6
10476 ; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%rdi), %xmm10
10477 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10478 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rsi), %xmm5
10479 ; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%rsi), %xmm11
10480 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10481 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm2 = xmm5[1,1,2,2]
10482 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} xmm2 = xmm2[0,1],xmm6[2],xmm2[3]
10483 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,1,2,1]
10484 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0],ymm1[1,2],ymm2[3,4,5,6,7]
10485 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
10486 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10487 ; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%rax), %xmm0
10488 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10489 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
10490 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm1 = xmm7[1,1,1,1]
10491 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0],xmm4[1],xmm1[2,3]
10492 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd %xmm1, %ymm1
10493 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5],ymm1[6,7]
10494 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm1 = xmm11[1,1,2,2]
10495 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm10[2],xmm1[3]
10496 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1]
10497 ; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%rdx), %xmm7
10498 ; AVX2-FAST-PERLANE-NEXT: vinsertps {{.*#+}} xmm2 = zero,xmm7[1],xmm3[1],zero
10499 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2],ymm1[3,4,5,6,7]
10500 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
10501 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10502 ; AVX2-FAST-PERLANE-NEXT: vmovaps 64(%r8), %xmm1
10503 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10504 ; AVX2-FAST-PERLANE-NEXT: vmovaps 64(%r9), %xmm0
10505 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10506 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
10507 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
10508 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd %xmm0, %ymm0
10509 ; AVX2-FAST-PERLANE-NEXT: vmovaps 64(%rax), %xmm1
10510 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10511 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
10512 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
10513 ; AVX2-FAST-PERLANE-NEXT: vmovaps 64(%rdi), %xmm2
10514 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10515 ; AVX2-FAST-PERLANE-NEXT: vmovaps 64(%rsi), %xmm1
10516 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10517 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,1,2,2]
10518 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3]
10519 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1]
10520 ; AVX2-FAST-PERLANE-NEXT: vmovaps 64(%rcx), %xmm2
10521 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10522 ; AVX2-FAST-PERLANE-NEXT: vmovaps 64(%rdx), %xmm11
10523 ; AVX2-FAST-PERLANE-NEXT: vinsertps {{.*#+}} xmm2 = zero,xmm11[1],xmm2[1],zero
10524 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2],ymm1[3,4,5,6,7]
10525 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
10526 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10527 ; AVX2-FAST-PERLANE-NEXT: vmovaps 96(%r8), %xmm1
10528 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10529 ; AVX2-FAST-PERLANE-NEXT: vmovaps 96(%r9), %xmm0
10530 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10531 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
10532 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
10533 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd %xmm0, %ymm0
10534 ; AVX2-FAST-PERLANE-NEXT: vmovaps 96(%rax), %xmm1
10535 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10536 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
10537 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
10538 ; AVX2-FAST-PERLANE-NEXT: vmovaps 96(%rdi), %xmm2
10539 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10540 ; AVX2-FAST-PERLANE-NEXT: vmovaps 96(%rsi), %xmm1
10541 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10542 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,1,2,2]
10543 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3]
10544 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1]
10545 ; AVX2-FAST-PERLANE-NEXT: vmovaps 96(%rcx), %xmm3
10546 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10547 ; AVX2-FAST-PERLANE-NEXT: vmovaps 96(%rdx), %xmm2
10548 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10549 ; AVX2-FAST-PERLANE-NEXT: vinsertps {{.*#+}} xmm2 = zero,xmm2[1],xmm3[1],zero
10550 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2],ymm1[3,4,5,6,7]
10551 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
10552 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10553 ; AVX2-FAST-PERLANE-NEXT: vmovaps 128(%r8), %xmm1
10554 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm1, (%rsp) # 16-byte Spill
10555 ; AVX2-FAST-PERLANE-NEXT: vmovaps 128(%r9), %xmm0
10556 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10557 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
10558 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
10559 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd %xmm0, %ymm0
10560 ; AVX2-FAST-PERLANE-NEXT: vmovaps 128(%rax), %xmm1
10561 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10562 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
10563 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
10564 ; AVX2-FAST-PERLANE-NEXT: vmovaps 128(%rdi), %xmm2
10565 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10566 ; AVX2-FAST-PERLANE-NEXT: vmovaps 128(%rsi), %xmm1
10567 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10568 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,1,2,2]
10569 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3]
10570 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1]
10571 ; AVX2-FAST-PERLANE-NEXT: vmovaps 128(%rcx), %xmm3
10572 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10573 ; AVX2-FAST-PERLANE-NEXT: vmovaps 128(%rdx), %xmm2
10574 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10575 ; AVX2-FAST-PERLANE-NEXT: vinsertps {{.*#+}} xmm2 = zero,xmm2[1],xmm3[1],zero
10576 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2],ymm1[3,4,5,6,7]
10577 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
10578 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10579 ; AVX2-FAST-PERLANE-NEXT: vmovaps 160(%r8), %xmm1
10580 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10581 ; AVX2-FAST-PERLANE-NEXT: vmovaps 160(%r9), %xmm0
10582 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10583 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
10584 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
10585 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd %xmm0, %ymm0
10586 ; AVX2-FAST-PERLANE-NEXT: vmovaps 160(%rax), %xmm1
10587 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10588 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
10589 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
10590 ; AVX2-FAST-PERLANE-NEXT: vmovaps 160(%rdi), %xmm2
10591 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10592 ; AVX2-FAST-PERLANE-NEXT: vmovaps 160(%rsi), %xmm1
10593 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10594 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,1,2,2]
10595 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3]
10596 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1]
10597 ; AVX2-FAST-PERLANE-NEXT: vmovaps 160(%rcx), %xmm3
10598 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10599 ; AVX2-FAST-PERLANE-NEXT: vmovaps 160(%rdx), %xmm2
10600 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10601 ; AVX2-FAST-PERLANE-NEXT: vinsertps {{.*#+}} xmm2 = zero,xmm2[1],xmm3[1],zero
10602 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2],ymm1[3,4,5,6,7]
10603 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
10604 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10605 ; AVX2-FAST-PERLANE-NEXT: vmovaps 192(%r9), %xmm0
10606 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10607 ; AVX2-FAST-PERLANE-NEXT: vmovaps 192(%r8), %xmm1
10608 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10609 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
10610 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
10611 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd %xmm0, %ymm0
10612 ; AVX2-FAST-PERLANE-NEXT: vmovaps 192(%rax), %xmm1
10613 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10614 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
10615 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
10616 ; AVX2-FAST-PERLANE-NEXT: vmovaps 192(%rdi), %xmm2
10617 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10618 ; AVX2-FAST-PERLANE-NEXT: vmovaps 192(%rsi), %xmm1
10619 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10620 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,1,2,2]
10621 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3]
10622 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1]
10623 ; AVX2-FAST-PERLANE-NEXT: vmovaps 192(%rcx), %xmm3
10624 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10625 ; AVX2-FAST-PERLANE-NEXT: vmovaps 192(%rdx), %xmm2
10626 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10627 ; AVX2-FAST-PERLANE-NEXT: vinsertps {{.*#+}} xmm2 = zero,xmm2[1],xmm3[1],zero
10628 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2],ymm1[3,4,5,6,7]
10629 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
10630 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10631 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rdi), %ymm0
10632 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10633 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rsi), %ymm1
10634 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10635 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
10636 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
10637 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rdx), %ymm2
10638 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10639 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rcx), %ymm1
10640 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10641 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm1 = ymm2[2],ymm1[2],ymm2[3],ymm1[3],ymm2[6],ymm1[6],ymm2[7],ymm1[7]
10642 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
10643 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%r8), %ymm2
10644 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10645 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%r9), %ymm1
10646 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10647 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm1 = ymm1[1,1,2,2,5,5,6,6]
10648 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4,5],ymm2[6],ymm1[7]
10649 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
10650 ; AVX2-FAST-PERLANE-NEXT: vmovaps 16(%rax), %xmm2
10651 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7]
10652 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
10653 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10654 ; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%rdi), %ymm1
10655 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10656 ; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%rsi), %ymm0
10657 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10658 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm0 = ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[6],ymm0[6],ymm1[7],ymm0[7]
10659 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
10660 ; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%rdx), %ymm1
10661 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10662 ; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%rcx), %ymm2
10663 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10664 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm1 = ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[6],ymm2[6],ymm1[7],ymm2[7]
10665 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
10666 ; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%r8), %ymm2
10667 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10668 ; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%r9), %ymm1
10669 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10670 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm1 = ymm1[1,1,2,2,5,5,6,6]
10671 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4,5],ymm2[6],ymm1[7]
10672 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
10673 ; AVX2-FAST-PERLANE-NEXT: vmovaps 48(%rax), %xmm2
10674 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7]
10675 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
10676 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10677 ; AVX2-FAST-PERLANE-NEXT: vmovaps 64(%rdi), %ymm1
10678 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10679 ; AVX2-FAST-PERLANE-NEXT: vmovaps 64(%rsi), %ymm0
10680 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10681 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm0 = ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[6],ymm0[6],ymm1[7],ymm0[7]
10682 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
10683 ; AVX2-FAST-PERLANE-NEXT: vmovaps 64(%rdx), %ymm1
10684 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10685 ; AVX2-FAST-PERLANE-NEXT: vmovaps 64(%rcx), %ymm2
10686 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10687 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm1 = ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[6],ymm2[6],ymm1[7],ymm2[7]
10688 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
10689 ; AVX2-FAST-PERLANE-NEXT: vmovaps 64(%r8), %ymm2
10690 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10691 ; AVX2-FAST-PERLANE-NEXT: vmovaps 64(%r9), %ymm1
10692 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10693 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm1 = ymm1[1,1,2,2,5,5,6,6]
10694 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4,5],ymm2[6],ymm1[7]
10695 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
10696 ; AVX2-FAST-PERLANE-NEXT: vmovaps 80(%rax), %xmm2
10697 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7]
10698 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
10699 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10700 ; AVX2-FAST-PERLANE-NEXT: vmovaps 96(%rdi), %ymm1
10701 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10702 ; AVX2-FAST-PERLANE-NEXT: vmovaps 96(%rsi), %ymm0
10703 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10704 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm0 = ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[6],ymm0[6],ymm1[7],ymm0[7]
10705 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
10706 ; AVX2-FAST-PERLANE-NEXT: vmovaps 96(%rdx), %ymm1
10707 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10708 ; AVX2-FAST-PERLANE-NEXT: vmovaps 96(%rcx), %ymm2
10709 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10710 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm1 = ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[6],ymm2[6],ymm1[7],ymm2[7]
10711 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
10712 ; AVX2-FAST-PERLANE-NEXT: vmovaps 96(%r8), %ymm2
10713 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10714 ; AVX2-FAST-PERLANE-NEXT: vmovaps 96(%r9), %ymm1
10715 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10716 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm1 = ymm1[1,1,2,2,5,5,6,6]
10717 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4,5],ymm2[6],ymm1[7]
10718 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
10719 ; AVX2-FAST-PERLANE-NEXT: vmovaps 112(%rax), %xmm2
10720 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7]
10721 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
10722 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10723 ; AVX2-FAST-PERLANE-NEXT: vmovaps 128(%rdi), %ymm1
10724 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10725 ; AVX2-FAST-PERLANE-NEXT: vmovaps 128(%rsi), %ymm0
10726 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10727 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm0 = ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[6],ymm0[6],ymm1[7],ymm0[7]
10728 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
10729 ; AVX2-FAST-PERLANE-NEXT: vmovaps 128(%rdx), %ymm1
10730 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10731 ; AVX2-FAST-PERLANE-NEXT: vmovaps 128(%rcx), %ymm2
10732 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10733 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm1 = ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[6],ymm2[6],ymm1[7],ymm2[7]
10734 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
10735 ; AVX2-FAST-PERLANE-NEXT: vmovaps 128(%r8), %ymm2
10736 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10737 ; AVX2-FAST-PERLANE-NEXT: vmovaps 128(%r9), %ymm1
10738 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10739 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm1 = ymm1[1,1,2,2,5,5,6,6]
10740 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4,5],ymm2[6],ymm1[7]
10741 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
10742 ; AVX2-FAST-PERLANE-NEXT: vmovaps 144(%rax), %xmm2
10743 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7]
10744 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
10745 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10746 ; AVX2-FAST-PERLANE-NEXT: vmovaps 160(%rdi), %ymm1
10747 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10748 ; AVX2-FAST-PERLANE-NEXT: vmovaps 160(%rsi), %ymm0
10749 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10750 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm0 = ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[6],ymm0[6],ymm1[7],ymm0[7]
10751 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
10752 ; AVX2-FAST-PERLANE-NEXT: vmovaps 160(%rdx), %ymm1
10753 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10754 ; AVX2-FAST-PERLANE-NEXT: vmovaps 160(%rcx), %ymm2
10755 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10756 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm1 = ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[6],ymm2[6],ymm1[7],ymm2[7]
10757 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
10758 ; AVX2-FAST-PERLANE-NEXT: vmovaps 160(%r8), %ymm2
10759 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10760 ; AVX2-FAST-PERLANE-NEXT: vmovaps 160(%r9), %ymm1
10761 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10762 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm1 = ymm1[1,1,2,2,5,5,6,6]
10763 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4,5],ymm2[6],ymm1[7]
10764 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
10765 ; AVX2-FAST-PERLANE-NEXT: vmovaps 176(%rax), %xmm2
10766 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7]
10767 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
10768 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10769 ; AVX2-FAST-PERLANE-NEXT: vmovaps 192(%rdi), %ymm0
10770 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10771 ; AVX2-FAST-PERLANE-NEXT: vmovaps 192(%rsi), %ymm1
10772 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10773 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
10774 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
10775 ; AVX2-FAST-PERLANE-NEXT: vmovaps 192(%rdx), %ymm1
10776 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10777 ; AVX2-FAST-PERLANE-NEXT: vmovaps 192(%rcx), %ymm2
10778 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10779 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm1 = ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[6],ymm2[6],ymm1[7],ymm2[7]
10780 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
10781 ; AVX2-FAST-PERLANE-NEXT: vmovaps 192(%r8), %ymm2
10782 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10783 ; AVX2-FAST-PERLANE-NEXT: vmovaps 192(%r9), %ymm1
10784 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10785 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm1 = ymm1[1,1,2,2,5,5,6,6]
10786 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4,5],ymm2[6],ymm1[7]
10787 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
10788 ; AVX2-FAST-PERLANE-NEXT: vmovaps 208(%rax), %xmm2
10789 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7]
10790 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
10791 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10792 ; AVX2-FAST-PERLANE-NEXT: vmovaps 224(%rcx), %xmm0
10793 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss %xmm0, %xmm2
10794 ; AVX2-FAST-PERLANE-NEXT: vmovaps 224(%rdx), %xmm1
10795 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss %xmm1, %xmm3
10796 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
10797 ; AVX2-FAST-PERLANE-NEXT: vmovaps 224(%rsi), %xmm4
10798 ; AVX2-FAST-PERLANE-NEXT: vmovaps 224(%rdi), %xmm2
10799 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} xmm15 = xmm2[0],xmm4[0],xmm2[1],xmm4[1]
10800 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm15 = xmm15[0,1,2,2]
10801 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm15 = ymm15[0,1,2,1]
10802 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm3 = ymm15[0,1],ymm3[2,3],ymm15[4,5,6,7]
10803 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 224(%r8), %ymm15
10804 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm15 = ymm3[0,1,2,3],ymm15[4,5],ymm3[6,7]
10805 ; AVX2-FAST-PERLANE-NEXT: vmovaps 224(%r9), %xmm3
10806 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss %xmm3, %ymm14
10807 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm14 = ymm15[0,1,2,3,4],ymm14[5],ymm15[6,7]
10808 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss 224(%rax), %ymm15
10809 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm10 = ymm14[0,1,2,3,4,5],ymm15[6],ymm14[7]
10810 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10811 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm14 = xmm4[1,1,2,2]
10812 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} xmm14 = xmm14[0,1],xmm2[2],xmm14[3]
10813 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm14 = ymm14[0,1,2,1]
10814 ; AVX2-FAST-PERLANE-NEXT: vinsertps {{.*#+}} xmm15 = zero,xmm1[1],xmm0[1],zero
10815 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm14 = ymm14[0],ymm15[1,2],ymm14[3,4,5,6,7]
10816 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss 228(%r8), %ymm13
10817 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm13 = ymm14[0,1,2],ymm13[3],ymm14[4,5,6,7]
10818 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm14 = xmm3[1,1,1,1]
10819 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm14, %ymm0, %ymm14
10820 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm13 = ymm13[0,1,2,3],ymm14[4,5],ymm13[6,7]
10821 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, 224(%rax), %ymm15, %ymm14
10822 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm10 = ymm13[0],ymm14[1],ymm13[2,3,4],ymm14[5],ymm13[6,7]
10823 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10824 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm2 = xmm2[3,3],xmm4[3,3]
10825 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} xmm0 = xmm1[2],xmm0[2],xmm1[3],xmm0[3]
10826 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm1
10827 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,1,2,2]
10828 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,2,1]
10829 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6],ymm0[7]
10830 ; AVX2-FAST-PERLANE-NEXT: vmovaps 224(%r8), %ymm1
10831 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10832 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
10833 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm1 = xmm3[2,2,2,2]
10834 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3],ymm0[4,5,6,7]
10835 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss 232(%rax), %ymm1
10836 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4],ymm0[5,6,7]
10837 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10838 ; AVX2-FAST-PERLANE-NEXT: vmovaps 224(%rdi), %ymm12
10839 ; AVX2-FAST-PERLANE-NEXT: vmovaps 224(%rsi), %ymm10
10840 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm0 = ymm10[1,1,1,1,5,5,5,5]
10841 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm12[1],ymm0[2,3,4],ymm12[5],ymm0[6,7]
10842 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm13 = ymm0[2,2,2,2]
10843 ; AVX2-FAST-PERLANE-NEXT: vmovaps 224(%rdx), %ymm0
10844 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10845 ; AVX2-FAST-PERLANE-NEXT: vmovaps 224(%rcx), %ymm2
10846 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm14 = ymm0[1,1],ymm2[1,1],ymm0[5,5],ymm2[5,5]
10847 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm13 = ymm13[0,1,2,3,4],ymm14[5,6],ymm13[7]
10848 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 240(%r8), %ymm14
10849 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm13 = ymm14[0],ymm13[1,2,3,4,5,6],ymm14[7]
10850 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss 240(%r9), %xmm14
10851 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm13 = ymm13[0],ymm14[1],ymm13[2,3,4,5,6,7]
10852 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss 240(%rax), %ymm14
10853 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm13[0,1],ymm14[2],ymm13[3,4,5,6,7]
10854 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10855 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss %xmm9, %xmm13
10856 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss %xmm8, %xmm14
10857 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} xmm13 = xmm14[0],xmm13[0],xmm14[1],xmm13[1]
10858 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} xmm14 = xmm6[0],xmm5[0],xmm6[1],xmm5[1]
10859 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm14 = xmm14[0,1,2,2]
10860 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm14 = ymm14[0,1,2,1]
10861 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm14[0,1],ymm13[2,3],ymm14[4,5,6,7]
10862 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
10863 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
10864 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} xmm14 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
10865 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm14, %ymm0, %ymm14
10866 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 16-byte Folded Reload
10867 ; AVX2-FAST-PERLANE-NEXT: vunpcklpd {{.*#+}} ymm14 = ymm14[0],ymm15[0],ymm14[2],ymm15[2]
10868 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm14[4,5,6],ymm1[7]
10869 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10870 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm5 = xmm6[3,3],xmm5[3,3]
10871 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} xmm6 = xmm8[2],xmm9[2],xmm8[3],xmm9[3]
10872 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm5, %ymm0, %ymm5
10873 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm6 = xmm6[0,1,2,2]
10874 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[0,1,2,1]
10875 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm5 = ymm6[0,1,2,3,4],ymm5[5,6],ymm6[7]
10876 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm6 = xmm4[2,2,2,2]
10877 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} xmm6 = xmm0[0,1,2],xmm6[3]
10878 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 8(%rax), %ymm8
10879 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm8[4,5,6,7]
10880 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm5[0,1],ymm6[2,3,4],ymm5[5,6,7]
10881 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10882 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
10883 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss %xmm1, %xmm5
10884 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss %xmm7, %xmm6
10885 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} xmm5 = xmm6[0],xmm5[0],xmm6[1],xmm5[1]
10886 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
10887 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
10888 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} xmm6 = xmm4[0],xmm14[0],xmm4[1],xmm14[1]
10889 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm6 = xmm6[0,1,2,2]
10890 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[0,1,2,1]
10891 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm5 = ymm6[0,1],ymm5[2,3],ymm6[4,5,6,7]
10892 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
10893 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
10894 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} xmm6 = xmm9[0],xmm13[0],xmm9[1],xmm13[1]
10895 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm6, %ymm0, %ymm6
10896 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 16-byte Folded Reload
10897 ; AVX2-FAST-PERLANE-NEXT: vunpcklpd {{.*#+}} ymm6 = ymm6[0],ymm8[0],ymm6[2],ymm8[2]
10898 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm5[0,1,2,3],ymm6[4,5,6],ymm5[7]
10899 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10900 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm5 = xmm4[3,3],xmm14[3,3]
10901 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} xmm6 = xmm7[2],xmm1[2],xmm7[3],xmm1[3]
10902 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm5, %ymm0, %ymm5
10903 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm6 = xmm6[0,1,2,2]
10904 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[0,1,2,1]
10905 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm5 = ymm6[0,1,2,3,4],ymm5[5,6],ymm6[7]
10906 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm6 = xmm13[2,2,2,2]
10907 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} xmm6 = xmm9[0,1,2],xmm6[3]
10908 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 40(%rax), %ymm7
10909 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm7[4,5,6,7]
10910 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm5[0,1],ymm6[2,3,4],ymm5[5,6,7]
10911 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10912 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
10913 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss %xmm13, %xmm5
10914 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss %xmm11, %xmm6
10915 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} xmm5 = xmm6[0],xmm5[0],xmm6[1],xmm5[1]
10916 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
10917 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
10918 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} xmm6 = xmm9[0],xmm8[0],xmm9[1],xmm8[1]
10919 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm6 = xmm6[0,1,2,2]
10920 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[0,1,2,1]
10921 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm5 = ymm6[0,1],ymm5[2,3],ymm6[4,5,6,7]
10922 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
10923 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
10924 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} xmm6 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
10925 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm6, %ymm0, %ymm6
10926 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 16-byte Folded Reload
10927 ; AVX2-FAST-PERLANE-NEXT: vunpcklpd {{.*#+}} ymm6 = ymm6[0],ymm7[0],ymm6[2],ymm7[2]
10928 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0,1,2,3],ymm6[4,5,6],ymm5[7]
10929 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10930 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm5 = xmm9[3,3],xmm8[3,3]
10931 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} xmm6 = xmm11[2],xmm13[2],xmm11[3],xmm13[3]
10932 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm5, %ymm0, %ymm5
10933 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm6 = xmm6[0,1,2,2]
10934 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[0,1,2,1]
10935 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm5 = ymm6[0,1,2,3,4],ymm5[5,6],ymm6[7]
10936 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm6 = xmm0[2,2,2,2]
10937 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} xmm6 = xmm1[0,1,2],xmm6[3]
10938 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 72(%rax), %ymm7
10939 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm7[4,5,6,7]
10940 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm5[0,1],ymm6[2,3,4],ymm5[5,6,7]
10941 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10942 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
10943 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss %xmm9, %xmm5
10944 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
10945 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss %xmm13, %xmm6
10946 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} xmm5 = xmm6[0],xmm5[0],xmm6[1],xmm5[1]
10947 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
10948 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
10949 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} xmm6 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
10950 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm6 = xmm6[0,1,2,2]
10951 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[0,1,2,1]
10952 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm5 = ymm6[0,1],ymm5[2,3],ymm6[4,5,6,7]
10953 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
10954 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
10955 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} xmm6 = xmm8[0],xmm11[0],xmm8[1],xmm11[1]
10956 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm6, %ymm0, %ymm6
10957 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 16-byte Folded Reload
10958 ; AVX2-FAST-PERLANE-NEXT: vunpcklpd {{.*#+}} ymm6 = ymm6[0],ymm7[0],ymm6[2],ymm7[2]
10959 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm5[0,1,2,3],ymm6[4,5,6],ymm5[7]
10960 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10961 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm5 = xmm4[3,3],xmm1[3,3]
10962 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} xmm6 = xmm13[2],xmm9[2],xmm13[3],xmm9[3]
10963 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm5, %ymm0, %ymm5
10964 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm6 = xmm6[0,1,2,2]
10965 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[0,1,2,1]
10966 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm5 = ymm6[0,1,2,3,4],ymm5[5,6],ymm6[7]
10967 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm6 = xmm11[2,2,2,2]
10968 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} xmm6 = xmm8[0,1,2],xmm6[3]
10969 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 104(%rax), %ymm7
10970 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm7[4,5,6,7]
10971 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm5[0,1],ymm6[2,3,4],ymm5[5,6,7]
10972 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10973 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
10974 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss %xmm9, %xmm5
10975 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
10976 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss %xmm13, %xmm6
10977 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} xmm5 = xmm6[0],xmm5[0],xmm6[1],xmm5[1]
10978 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
10979 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
10980 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} xmm6 = xmm8[0],xmm4[0],xmm8[1],xmm4[1]
10981 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm6 = xmm6[0,1,2,2]
10982 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[0,1,2,1]
10983 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm5 = ymm6[0,1],ymm5[2,3],ymm6[4,5,6,7]
10984 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
10985 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
10986 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} xmm6 = xmm0[0],xmm11[0],xmm0[1],xmm11[1]
10987 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm6, %ymm0, %ymm6
10988 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 16-byte Folded Reload
10989 ; AVX2-FAST-PERLANE-NEXT: vunpcklpd {{.*#+}} ymm6 = ymm6[0],ymm7[0],ymm6[2],ymm7[2]
10990 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm5[0,1,2,3],ymm6[4,5,6],ymm5[7]
10991 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10992 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm5 = xmm8[3,3],xmm4[3,3]
10993 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} xmm6 = xmm13[2],xmm9[2],xmm13[3],xmm9[3]
10994 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm5, %ymm0, %ymm5
10995 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm6 = xmm6[0,1,2,2]
10996 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[0,1,2,1]
10997 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm5 = ymm6[0,1,2,3,4],ymm5[5,6],ymm6[7]
10998 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm6 = xmm11[2,2,2,2]
10999 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} xmm6 = xmm0[0,1,2],xmm6[3]
11000 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 136(%rax), %ymm7
11001 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm7[4,5,6,7]
11002 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm5[0,1],ymm6[2,3,4],ymm5[5,6,7]
11003 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11004 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
11005 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss %xmm9, %xmm5
11006 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
11007 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss %xmm13, %xmm6
11008 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} xmm5 = xmm6[0],xmm5[0],xmm6[1],xmm5[1]
11009 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
11010 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
11011 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} xmm6 = xmm8[0],xmm4[0],xmm8[1],xmm4[1]
11012 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm6 = xmm6[0,1,2,2]
11013 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[0,1,2,1]
11014 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm5 = ymm6[0,1],ymm5[2,3],ymm6[4,5,6,7]
11015 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
11016 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
11017 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} xmm6 = xmm0[0],xmm11[0],xmm0[1],xmm11[1]
11018 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm6, %ymm0, %ymm6
11019 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 16-byte Folded Reload
11020 ; AVX2-FAST-PERLANE-NEXT: vunpcklpd {{.*#+}} ymm6 = ymm6[0],ymm7[0],ymm6[2],ymm7[2]
11021 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm5[0,1,2,3],ymm6[4,5,6],ymm5[7]
11022 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11023 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm5 = xmm8[3,3],xmm4[3,3]
11024 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} xmm6 = xmm13[2],xmm9[2],xmm13[3],xmm9[3]
11025 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm5, %ymm0, %ymm5
11026 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm6 = xmm6[0,1,2,2]
11027 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[0,1,2,1]
11028 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm5 = ymm6[0,1,2,3,4],ymm5[5,6],ymm6[7]
11029 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm6 = xmm11[2,2,2,2]
11030 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} xmm6 = xmm0[0,1,2],xmm6[3]
11031 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 168(%rax), %ymm7
11032 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm7[4,5,6,7]
11033 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm5[0,1],ymm6[2,3,4],ymm5[5,6,7]
11034 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11035 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
11036 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss %xmm9, %xmm5
11037 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
11038 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss %xmm11, %xmm6
11039 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} xmm5 = xmm6[0],xmm5[0],xmm6[1],xmm5[1]
11040 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
11041 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
11042 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} xmm6 = xmm8[0],xmm4[0],xmm8[1],xmm4[1]
11043 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm6 = xmm6[0,1,2,2]
11044 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[0,1,2,1]
11045 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm5 = ymm6[0,1],ymm5[2,3],ymm6[4,5,6,7]
11046 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
11047 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
11048 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} xmm6 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
11049 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm6, %ymm0, %ymm6
11050 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 16-byte Folded Reload
11051 ; AVX2-FAST-PERLANE-NEXT: vunpcklpd {{.*#+}} ymm6 = ymm6[0],ymm7[0],ymm6[2],ymm7[2]
11052 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm6[4,5,6],ymm5[7]
11053 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11054 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm5 = xmm8[3,3],xmm4[3,3]
11055 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} xmm6 = xmm11[2],xmm9[2],xmm11[3],xmm9[3]
11056 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm5, %ymm0, %ymm5
11057 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm6 = xmm6[0,1,2,2]
11058 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[0,1,2,1]
11059 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm5 = ymm6[0,1,2,3,4],ymm5[5,6],ymm6[7]
11060 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm6 = xmm0[2,2,2,2]
11061 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} xmm6 = xmm1[0,1,2],xmm6[3]
11062 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 200(%rax), %ymm7
11063 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm7[4,5,6,7]
11064 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm5[0,1],ymm6[2,3,4],ymm5[5,6,7]
11065 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11066 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
11067 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm5 # 32-byte Folded Reload
11068 ; AVX2-FAST-PERLANE-NEXT: # ymm5 = ymm4[2],mem[2],ymm4[3],mem[3],ymm4[6],mem[6],ymm4[7],mem[7]
11069 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[3,3,3,3]
11070 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
11071 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
11072 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm6 = ymm13[2],ymm11[2],ymm13[3],ymm11[3],ymm13[6],ymm11[6],ymm13[7],ymm11[7]
11073 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[3,3,3,3]
11074 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm5 = ymm6[0,1,2],ymm5[3,4,5,6,7]
11075 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss 220(%r8), %ymm6
11076 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3,4],ymm6[5],ymm5[6,7]
11077 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss 220(%r9), %ymm6
11078 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3,4,5],ymm6[6,7]
11079 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 216(%rax), %ymm6
11080 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm6[0],ymm5[1,2,3,4,5,6],ymm6[7]
11081 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11082 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss 240(%rdx), %ymm5
11083 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm6 = ymm2[3,1,2,0,7,5,6,4]
11084 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm5 = ymm6[0,1,2,3,4,5],ymm5[6],ymm6[7]
11085 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} ymm6 = ymm12[0],ymm10[0],ymm12[1],ymm10[1],ymm12[4],ymm10[4],ymm12[5],ymm10[5]
11086 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm6[4,5],ymm5[6,7]
11087 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss 236(%r8), %ymm6
11088 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0],ymm6[1],ymm5[2,3,4,5,6,7]
11089 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm3 = xmm3[2,2,3,3]
11090 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm3 = ymm5[0,1],ymm3[2,3],ymm5[4,5,6,7]
11091 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm5 = ymm12[2],ymm10[2],ymm12[3],ymm10[3],ymm12[6],ymm10[6],ymm12[7],ymm10[7]
11092 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[2,2,2,2]
11093 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
11094 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm6 = ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[6],ymm2[6],ymm1[7],ymm2[7]
11095 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm6[4,5,6,7]
11096 ; AVX2-FAST-PERLANE-NEXT: vblendps $192, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm5 # 32-byte Folded Reload
11097 ; AVX2-FAST-PERLANE-NEXT: # ymm5 = ymm5[0,1,2,3,4,5],mem[6,7]
11098 ; AVX2-FAST-PERLANE-NEXT: vpermilps {{.*#+}} ymm7 = mem[1,2,2,3,5,6,6,7]
11099 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm7 = ymm7[2,2,2,2]
11100 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm5 = ymm7[0],ymm5[1,2,3,4,5,6],ymm7[7]
11101 ; AVX2-FAST-PERLANE-NEXT: vmovaps 224(%rax), %ymm7
11102 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm3[0,1,2],ymm7[3],ymm3[4,5,6,7]
11103 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11104 ; AVX2-FAST-PERLANE-NEXT: vperm2f128 {{.*#+}} ymm3 = ymm7[2,3],ymm6[2,3]
11105 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm5[0],ymm3[1],ymm5[2,3,4],ymm3[5],ymm5[6,7]
11106 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11107 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm0 = ymm2[2],ymm1[2],ymm2[3],ymm1[3],ymm2[6],ymm1[6],ymm2[7],ymm1[7]
11108 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm1 = ymm10[2],ymm12[2],ymm10[3],ymm12[3],ymm10[6],ymm12[6],ymm10[7],ymm12[7]
11109 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,3,3,3]
11110 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[3,3,3,3]
11111 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
11112 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss 252(%r8), %ymm1
11113 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
11114 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss 252(%r9), %ymm1
11115 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
11116 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 248(%rax), %ymm1
11117 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5,6],ymm1[7]
11118 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11119 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
11120 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm0 = ymm6[1,1,1,1,5,5,5,5]
11121 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
11122 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm5[1],ymm0[2,3,4],ymm5[5],ymm0[6,7]
11123 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
11124 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
11125 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
11126 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm1 = ymm8[1,1],ymm3[1,1],ymm8[5,5],ymm3[5,5]
11127 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6],ymm0[7]
11128 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
11129 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm1 = ymm12[0,0,0,0,4,4,4,4]
11130 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
11131 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm2 = ymm9[0,1,0,1,4,5,4,5]
11132 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2,3,4],ymm1[5],ymm2[6,7]
11133 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
11134 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 16(%rax), %ymm2
11135 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3],ymm1[4,5,6,7]
11136 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6],ymm1[7]
11137 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11138 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
11139 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm0 = ymm14[1,1,1,1,5,5,5,5]
11140 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
11141 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm10[1],ymm0[2,3,4],ymm10[5],ymm0[6,7]
11142 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
11143 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
11144 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
11145 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm1 = ymm1[1,1],ymm7[1,1],ymm1[5,5],ymm7[5,5]
11146 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6],ymm0[7]
11147 ; AVX2-FAST-PERLANE-NEXT: vpermilps $0, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
11148 ; AVX2-FAST-PERLANE-NEXT: # ymm1 = mem[0,0,0,0,4,4,4,4]
11149 ; AVX2-FAST-PERLANE-NEXT: vpermilps $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
11150 ; AVX2-FAST-PERLANE-NEXT: # ymm2 = mem[0,1,0,1,4,5,4,5]
11151 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2,3,4],ymm1[5],ymm2[6,7]
11152 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
11153 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 48(%rax), %ymm2
11154 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3],ymm1[4,5,6,7]
11155 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6],ymm1[7]
11156 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11157 ; AVX2-FAST-PERLANE-NEXT: vpermilps $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
11158 ; AVX2-FAST-PERLANE-NEXT: # ymm0 = mem[1,1,1,1,5,5,5,5]
11159 ; AVX2-FAST-PERLANE-NEXT: vblendps $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
11160 ; AVX2-FAST-PERLANE-NEXT: # ymm0 = ymm0[0],mem[1],ymm0[2,3,4],mem[5],ymm0[6,7]
11161 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
11162 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
11163 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
11164 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm1 = ymm1[1,1],ymm15[1,1],ymm1[5,5],ymm15[5,5]
11165 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6],ymm0[7]
11166 ; AVX2-FAST-PERLANE-NEXT: vpermilps $0, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
11167 ; AVX2-FAST-PERLANE-NEXT: # ymm1 = mem[0,0,0,0,4,4,4,4]
11168 ; AVX2-FAST-PERLANE-NEXT: vpermilps $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
11169 ; AVX2-FAST-PERLANE-NEXT: # ymm2 = mem[0,1,0,1,4,5,4,5]
11170 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2,3,4],ymm1[5],ymm2[6,7]
11171 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
11172 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 80(%rax), %ymm2
11173 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3],ymm1[4,5,6,7]
11174 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6],ymm1[7]
11175 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11176 ; AVX2-FAST-PERLANE-NEXT: vpermilps $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
11177 ; AVX2-FAST-PERLANE-NEXT: # ymm0 = mem[1,1,1,1,5,5,5,5]
11178 ; AVX2-FAST-PERLANE-NEXT: vblendps $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
11179 ; AVX2-FAST-PERLANE-NEXT: # ymm0 = ymm0[0],mem[1],ymm0[2,3,4],mem[5],ymm0[6,7]
11180 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
11181 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
11182 ; AVX2-FAST-PERLANE-NEXT: vshufps $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
11183 ; AVX2-FAST-PERLANE-NEXT: # ymm1 = ymm1[1,1],mem[1,1],ymm1[5,5],mem[5,5]
11184 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6],ymm0[7]
11185 ; AVX2-FAST-PERLANE-NEXT: vpermilps $0, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
11186 ; AVX2-FAST-PERLANE-NEXT: # ymm1 = mem[0,0,0,0,4,4,4,4]
11187 ; AVX2-FAST-PERLANE-NEXT: vpermilps $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
11188 ; AVX2-FAST-PERLANE-NEXT: # ymm2 = mem[0,1,0,1,4,5,4,5]
11189 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2,3,4],ymm1[5],ymm2[6,7]
11190 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
11191 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 112(%rax), %ymm2
11192 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3],ymm1[4,5,6,7]
11193 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6],ymm1[7]
11194 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11195 ; AVX2-FAST-PERLANE-NEXT: vpermilps $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
11196 ; AVX2-FAST-PERLANE-NEXT: # ymm0 = mem[1,1,1,1,5,5,5,5]
11197 ; AVX2-FAST-PERLANE-NEXT: vblendps $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
11198 ; AVX2-FAST-PERLANE-NEXT: # ymm0 = ymm0[0],mem[1],ymm0[2,3,4],mem[5],ymm0[6,7]
11199 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
11200 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
11201 ; AVX2-FAST-PERLANE-NEXT: vshufps $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
11202 ; AVX2-FAST-PERLANE-NEXT: # ymm1 = ymm1[1,1],mem[1,1],ymm1[5,5],mem[5,5]
11203 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6],ymm0[7]
11204 ; AVX2-FAST-PERLANE-NEXT: vpermilps $0, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
11205 ; AVX2-FAST-PERLANE-NEXT: # ymm1 = mem[0,0,0,0,4,4,4,4]
11206 ; AVX2-FAST-PERLANE-NEXT: vpermilps $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
11207 ; AVX2-FAST-PERLANE-NEXT: # ymm2 = mem[0,1,0,1,4,5,4,5]
11208 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2,3,4],ymm1[5],ymm2[6,7]
11209 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
11210 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 144(%rax), %ymm2
11211 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3],ymm1[4,5,6,7]
11212 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6],ymm1[7]
11213 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11214 ; AVX2-FAST-PERLANE-NEXT: vpermilps $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
11215 ; AVX2-FAST-PERLANE-NEXT: # ymm0 = mem[1,1,1,1,5,5,5,5]
11216 ; AVX2-FAST-PERLANE-NEXT: vblendps $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
11217 ; AVX2-FAST-PERLANE-NEXT: # ymm0 = ymm0[0],mem[1],ymm0[2,3,4],mem[5],ymm0[6,7]
11218 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
11219 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
11220 ; AVX2-FAST-PERLANE-NEXT: vshufps $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
11221 ; AVX2-FAST-PERLANE-NEXT: # ymm1 = ymm1[1,1],mem[1,1],ymm1[5,5],mem[5,5]
11222 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6],ymm0[7]
11223 ; AVX2-FAST-PERLANE-NEXT: vpermilps $0, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
11224 ; AVX2-FAST-PERLANE-NEXT: # ymm1 = mem[0,0,0,0,4,4,4,4]
11225 ; AVX2-FAST-PERLANE-NEXT: vpermilps $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
11226 ; AVX2-FAST-PERLANE-NEXT: # ymm2 = mem[0,1,0,1,4,5,4,5]
11227 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2,3,4],ymm1[5],ymm2[6,7]
11228 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
11229 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 176(%rax), %ymm2
11230 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3],ymm1[4,5,6,7]
11231 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6],ymm1[7]
11232 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11233 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11234 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm0 = ymm0[1,1],ymm4[1,1],ymm0[5,5],ymm4[5,5]
11235 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm1 = ymm13[1,1,1,1,5,5,5,5]
11236 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm11[1],ymm1[2,3,4],ymm11[5],ymm1[6,7]
11237 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,2,2,2]
11238 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5,6],ymm1[7]
11239 ; AVX2-FAST-PERLANE-NEXT: vpermilps $0, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
11240 ; AVX2-FAST-PERLANE-NEXT: # ymm1 = mem[0,0,0,0,4,4,4,4]
11241 ; AVX2-FAST-PERLANE-NEXT: vpermilps $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
11242 ; AVX2-FAST-PERLANE-NEXT: # ymm2 = mem[0,1,0,1,4,5,4,5]
11243 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2,3,4],ymm1[5],ymm2[6,7]
11244 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
11245 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 208(%rax), %ymm2
11246 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3],ymm1[4,5,6,7]
11247 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6],ymm1[7]
11248 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11249 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss 16(%rdx), %ymm0
11250 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm1 = ymm3[3,1,2,0,7,5,6,4]
11251 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6],ymm1[7]
11252 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} ymm1 = ymm5[0],ymm6[0],ymm5[1],ymm6[1],ymm5[4],ymm6[4],ymm5[5],ymm6[5]
11253 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
11254 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
11255 ; AVX2-FAST-PERLANE-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
11256 ; AVX2-FAST-PERLANE-NEXT: # xmm1 = xmm1[3,3],mem[3,3]
11257 ; AVX2-FAST-PERLANE-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
11258 ; AVX2-FAST-PERLANE-NEXT: # xmm1 = xmm1[0,1,2],mem[3]
11259 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4,5,6,7]
11260 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11261 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm0 = ymm3[2],ymm8[2],ymm3[3],ymm8[3],ymm3[6],ymm8[6],ymm3[7],ymm8[7]
11262 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm1 = ymm6[2],ymm5[2],ymm6[3],ymm5[3],ymm6[6],ymm5[6],ymm6[7],ymm5[7]
11263 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,3,3,3]
11264 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[3,3,3,3]
11265 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
11266 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm1 = ymm9[3,3],ymm12[3,3],ymm9[7,7],ymm12[7,7]
11267 ; AVX2-FAST-PERLANE-NEXT: vpermilps {{.*#+}} ymm2 = mem[2,3,2,3,6,7,6,7]
11268 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0],ymm1[1,2],ymm2[3,4],ymm1[5,6],ymm2[7]
11269 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
11270 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm9 = ymm1[0],ymm0[1,2,3,4],ymm1[5,6,7]
11271 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss 48(%rdx), %ymm0
11272 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm1 = ymm7[3,1,2,0,7,5,6,4]
11273 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6],ymm1[7]
11274 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} ymm1 = ymm10[0],ymm14[0],ymm10[1],ymm14[1],ymm10[4],ymm14[4],ymm10[5],ymm14[5]
11275 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
11276 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
11277 ; AVX2-FAST-PERLANE-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
11278 ; AVX2-FAST-PERLANE-NEXT: # xmm1 = xmm1[3,3],mem[3,3]
11279 ; AVX2-FAST-PERLANE-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
11280 ; AVX2-FAST-PERLANE-NEXT: # xmm1 = xmm1[0,1,2],mem[3]
11281 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm12 = ymm0[0],ymm1[1,2,3],ymm0[4,5,6,7]
11282 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm0 # 32-byte Folded Reload
11283 ; AVX2-FAST-PERLANE-NEXT: # ymm0 = ymm7[2],mem[2],ymm7[3],mem[3],ymm7[6],mem[6],ymm7[7],mem[7]
11284 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm1 = ymm14[2],ymm10[2],ymm14[3],ymm10[3],ymm14[6],ymm10[6],ymm14[7],ymm10[7]
11285 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,3,3,3]
11286 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[3,3,3,3]
11287 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
11288 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
11289 ; AVX2-FAST-PERLANE-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
11290 ; AVX2-FAST-PERLANE-NEXT: # ymm1 = ymm1[3,3],mem[3,3],ymm1[7,7],mem[7,7]
11291 ; AVX2-FAST-PERLANE-NEXT: vpermilps {{.*#+}} ymm2 = mem[2,3,2,3,6,7,6,7]
11292 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0],ymm1[1,2],ymm2[3,4],ymm1[5,6],ymm2[7]
11293 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
11294 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm14 = ymm1[0],ymm0[1,2,3,4],ymm1[5,6,7]
11295 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss 80(%rdx), %ymm0
11296 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm15, %ymm13
11297 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm1 = ymm15[3,1,2,0,7,5,6,4]
11298 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6],ymm1[7]
11299 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
11300 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
11301 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} ymm1 = ymm2[0],ymm4[0],ymm2[1],ymm4[1],ymm2[4],ymm4[4],ymm2[5],ymm4[5]
11302 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
11303 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
11304 ; AVX2-FAST-PERLANE-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
11305 ; AVX2-FAST-PERLANE-NEXT: # xmm1 = xmm1[3,3],mem[3,3]
11306 ; AVX2-FAST-PERLANE-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
11307 ; AVX2-FAST-PERLANE-NEXT: # xmm1 = xmm1[0,1,2],mem[3]
11308 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm15 = ymm0[0],ymm1[1,2,3],ymm0[4,5,6,7]
11309 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm0 # 32-byte Folded Reload
11310 ; AVX2-FAST-PERLANE-NEXT: # ymm0 = ymm13[2],mem[2],ymm13[3],mem[3],ymm13[6],mem[6],ymm13[7],mem[7]
11311 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm1 = ymm4[2],ymm2[2],ymm4[3],ymm2[3],ymm4[6],ymm2[6],ymm4[7],ymm2[7]
11312 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,3,3,3]
11313 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[3,3,3,3]
11314 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
11315 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
11316 ; AVX2-FAST-PERLANE-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
11317 ; AVX2-FAST-PERLANE-NEXT: # ymm1 = ymm1[3,3],mem[3,3],ymm1[7,7],mem[7,7]
11318 ; AVX2-FAST-PERLANE-NEXT: vpermilps {{.*#+}} ymm2 = mem[2,3,2,3,6,7,6,7]
11319 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0],ymm1[1,2],ymm2[3,4],ymm1[5,6],ymm2[7]
11320 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
11321 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm13 = ymm1[0],ymm0[1,2,3,4],ymm1[5,6,7]
11322 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss 112(%rdx), %ymm0
11323 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
11324 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm1 = ymm4[3,1,2,0,7,5,6,4]
11325 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6],ymm1[7]
11326 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
11327 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
11328 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} ymm1 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[4],ymm3[4],ymm2[5],ymm3[5]
11329 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
11330 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
11331 ; AVX2-FAST-PERLANE-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
11332 ; AVX2-FAST-PERLANE-NEXT: # xmm1 = xmm1[3,3],mem[3,3]
11333 ; AVX2-FAST-PERLANE-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
11334 ; AVX2-FAST-PERLANE-NEXT: # xmm1 = xmm1[0,1,2],mem[3]
11335 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm6 = ymm0[0],ymm1[1,2,3],ymm0[4,5,6,7]
11336 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm0 # 32-byte Folded Reload
11337 ; AVX2-FAST-PERLANE-NEXT: # ymm0 = ymm4[2],mem[2],ymm4[3],mem[3],ymm4[6],mem[6],ymm4[7],mem[7]
11338 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm1 = ymm3[2],ymm2[2],ymm3[3],ymm2[3],ymm3[6],ymm2[6],ymm3[7],ymm2[7]
11339 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,3,3,3]
11340 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[3,3,3,3]
11341 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
11342 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
11343 ; AVX2-FAST-PERLANE-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
11344 ; AVX2-FAST-PERLANE-NEXT: # ymm1 = ymm1[3,3],mem[3,3],ymm1[7,7],mem[7,7]
11345 ; AVX2-FAST-PERLANE-NEXT: vpermilps {{.*#+}} ymm8 = mem[2,3,2,3,6,7,6,7]
11346 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm8[0],ymm1[1,2],ymm8[3,4],ymm1[5,6],ymm8[7]
11347 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
11348 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm4 = ymm1[0],ymm0[1,2,3,4],ymm1[5,6,7]
11349 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss 144(%rdx), %ymm1
11350 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11351 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm8 = ymm0[3,1,2,0,7,5,6,4]
11352 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm3 = ymm8[0,1,2,3,4,5],ymm1[6],ymm8[7]
11353 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
11354 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
11355 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} ymm8 = ymm1[0],ymm2[0],ymm1[1],ymm2[1],ymm1[4],ymm2[4],ymm1[5],ymm2[5]
11356 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm8[4,5],ymm3[6,7]
11357 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rsp), %xmm5 # 16-byte Reload
11358 ; AVX2-FAST-PERLANE-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5, %xmm8 # 16-byte Folded Reload
11359 ; AVX2-FAST-PERLANE-NEXT: # xmm8 = xmm5[3,3],mem[3,3]
11360 ; AVX2-FAST-PERLANE-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8, %xmm8 # 16-byte Folded Reload
11361 ; AVX2-FAST-PERLANE-NEXT: # xmm8 = xmm8[0,1,2],mem[3]
11362 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0],ymm8[1,2,3],ymm3[4,5,6,7]
11363 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm8 # 32-byte Folded Reload
11364 ; AVX2-FAST-PERLANE-NEXT: # ymm8 = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7]
11365 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm0 = ymm2[2],ymm1[2],ymm2[3],ymm1[3],ymm2[6],ymm1[6],ymm2[7],ymm1[7]
11366 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm8 = ymm8[3,3,3,3]
11367 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,3,3,3]
11368 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm8[3,4,5,6,7]
11369 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
11370 ; AVX2-FAST-PERLANE-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm8 # 32-byte Folded Reload
11371 ; AVX2-FAST-PERLANE-NEXT: # ymm8 = ymm1[3,3],mem[3,3],ymm1[7,7],mem[7,7]
11372 ; AVX2-FAST-PERLANE-NEXT: vpermilps {{.*#+}} ymm7 = mem[2,3,2,3,6,7,6,7]
11373 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0],ymm8[1,2],ymm7[3,4],ymm8[5,6],ymm7[7]
11374 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm7 = ymm7[2,1,2,3]
11375 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm7[0],ymm0[1,2,3,4],ymm7[5,6,7]
11376 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss 176(%rdx), %ymm7
11377 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11378 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm8 = ymm0[3,1,2,0,7,5,6,4]
11379 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm7 = ymm8[0,1,2,3,4,5],ymm7[6],ymm8[7]
11380 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
11381 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
11382 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} ymm8 = ymm1[0],ymm10[0],ymm1[1],ymm10[1],ymm1[4],ymm10[4],ymm1[5],ymm10[5]
11383 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm8[4,5],ymm7[6,7]
11384 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
11385 ; AVX2-FAST-PERLANE-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5, %xmm8 # 16-byte Folded Reload
11386 ; AVX2-FAST-PERLANE-NEXT: # xmm8 = xmm5[3,3],mem[3,3]
11387 ; AVX2-FAST-PERLANE-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8, %xmm8 # 16-byte Folded Reload
11388 ; AVX2-FAST-PERLANE-NEXT: # xmm8 = xmm8[0,1,2],mem[3]
11389 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0],ymm8[1,2,3],ymm7[4,5,6,7]
11390 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm8 # 32-byte Folded Reload
11391 ; AVX2-FAST-PERLANE-NEXT: # ymm8 = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7]
11392 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm0 = ymm10[2],ymm1[2],ymm10[3],ymm1[3],ymm10[6],ymm1[6],ymm10[7],ymm1[7]
11393 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm8 = ymm8[3,3,3,3]
11394 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,3,3,3]
11395 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm8[3,4,5,6,7]
11396 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
11397 ; AVX2-FAST-PERLANE-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm8 # 32-byte Folded Reload
11398 ; AVX2-FAST-PERLANE-NEXT: # ymm8 = ymm1[3,3],mem[3,3],ymm1[7,7],mem[7,7]
11399 ; AVX2-FAST-PERLANE-NEXT: vpermilps {{.*#+}} ymm10 = mem[2,3,2,3,6,7,6,7]
11400 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm8 = ymm10[0],ymm8[1,2],ymm10[3,4],ymm8[5,6],ymm10[7]
11401 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm8 = ymm8[2,1,2,3]
11402 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm8[0],ymm0[1,2,3,4],ymm8[5,6,7]
11403 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm8 # 32-byte Folded Reload
11404 ; AVX2-FAST-PERLANE-NEXT: # ymm8 = ymm11[0],mem[0],ymm11[1],mem[1],ymm11[4],mem[4],ymm11[5],mem[5]
11405 ; AVX2-FAST-PERLANE-NEXT: vpermilps $39, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Folded Reload
11406 ; AVX2-FAST-PERLANE-NEXT: # ymm10 = mem[3,1,2,0,7,5,6,4]
11407 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss 208(%rdx), %ymm11
11408 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0,1,2,3,4,5],ymm11[6],ymm10[7]
11409 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm8 = ymm10[0,1,2,3],ymm8[4,5],ymm10[6,7]
11410 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
11411 ; AVX2-FAST-PERLANE-NEXT: vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm10 # 16-byte Folded Reload
11412 ; AVX2-FAST-PERLANE-NEXT: # xmm10 = xmm1[3,3],mem[3,3]
11413 ; AVX2-FAST-PERLANE-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm10, %xmm10 # 16-byte Folded Reload
11414 ; AVX2-FAST-PERLANE-NEXT: # xmm10 = xmm10[0,1,2],mem[3]
11415 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0],ymm10[1,2,3],ymm8[4,5,6,7]
11416 ; AVX2-FAST-PERLANE-NEXT: movq {{[0-9]+}}(%rsp), %rax
11417 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm8, 1440(%rax)
11418 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 1312(%rax)
11419 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm7, 1216(%rax)
11420 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm2, 1088(%rax)
11421 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm3, 992(%rax)
11422 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm4, 864(%rax)
11423 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm6, 768(%rax)
11424 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm13, 640(%rax)
11425 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm15, 544(%rax)
11426 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm14, 416(%rax)
11427 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm12, 320(%rax)
11428 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm9, 192(%rax)
11429 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11430 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 96(%rax)
11431 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11432 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 1504(%rax)
11433 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11434 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 1472(%rax)
11435 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11436 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 1280(%rax)
11437 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11438 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 1248(%rax)
11439 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11440 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 1056(%rax)
11441 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11442 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 1024(%rax)
11443 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11444 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 832(%rax)
11445 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11446 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 800(%rax)
11447 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11448 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 608(%rax)
11449 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11450 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 576(%rax)
11451 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11452 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 384(%rax)
11453 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11454 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 352(%rax)
11455 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11456 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 160(%rax)
11457 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11458 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 128(%rax)
11459 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11460 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 1760(%rax)
11461 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11462 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 1728(%rax)
11463 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11464 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 1664(%rax)
11465 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11466 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 1536(%rax)
11467 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11468 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 1408(%rax)
11469 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11470 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 1376(%rax)
11471 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11472 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 1344(%rax)
11473 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11474 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 1184(%rax)
11475 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11476 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 1152(%rax)
11477 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11478 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 1120(%rax)
11479 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11480 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 960(%rax)
11481 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11482 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 928(%rax)
11483 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11484 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 896(%rax)
11485 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11486 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 736(%rax)
11487 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11488 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 704(%rax)
11489 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11490 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 672(%rax)
11491 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11492 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 512(%rax)
11493 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11494 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 480(%rax)
11495 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11496 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 448(%rax)
11497 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11498 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 288(%rax)
11499 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11500 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 256(%rax)
11501 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11502 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 224(%rax)
11503 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11504 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 64(%rax)
11505 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11506 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 32(%rax)
11507 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11508 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, (%rax)
11509 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11510 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 1696(%rax)
11511 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11512 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 1632(%rax)
11513 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11514 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 1600(%rax)
11515 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11516 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 1568(%rax)
11517 ; AVX2-FAST-PERLANE-NEXT: addq $3000, %rsp # imm = 0xBB8
11518 ; AVX2-FAST-PERLANE-NEXT: vzeroupper
11519 ; AVX2-FAST-PERLANE-NEXT: retq
11521 ; AVX512F-LABEL: store_i32_stride7_vf64:
11522 ; AVX512F: # %bb.0:
11523 ; AVX512F-NEXT: subq $3016, %rsp # imm = 0xBC8
11524 ; AVX512F-NEXT: vmovdqa64 (%rdx), %zmm2
11525 ; AVX512F-NEXT: vmovdqa64 (%rcx), %zmm0
11526 ; AVX512F-NEXT: vmovdqa64 (%r8), %zmm22
11527 ; AVX512F-NEXT: vmovdqa64 64(%r8), %zmm19
11528 ; AVX512F-NEXT: vmovdqa64 128(%r8), %zmm13
11529 ; AVX512F-NEXT: vmovdqa64 (%r9), %zmm4
11530 ; AVX512F-NEXT: vmovdqa64 64(%r9), %zmm1
11531 ; AVX512F-NEXT: vmovdqa64 128(%r9), %zmm3
11532 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm5 = <u,u,0,16,u,u,u,u,u,1,17,u,u,u,u,u>
11533 ; AVX512F-NEXT: vmovdqa64 %zmm2, %zmm6
11534 ; AVX512F-NEXT: vpermt2d %zmm0, %zmm5, %zmm6
11535 ; AVX512F-NEXT: vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11536 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm6 = <u,u,u,u,0,16,u,u,u,u,u,1,17,u,u,u>
11537 ; AVX512F-NEXT: vmovdqa64 %zmm22, %zmm7
11538 ; AVX512F-NEXT: vpermt2d %zmm4, %zmm6, %zmm7
11539 ; AVX512F-NEXT: vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11540 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm7 = <2,18,u,u,u,u,u,3,19,u,u,u,u,u,4,20>
11541 ; AVX512F-NEXT: vmovdqa64 %zmm2, %zmm8
11542 ; AVX512F-NEXT: vpermt2d %zmm0, %zmm7, %zmm8
11543 ; AVX512F-NEXT: vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11544 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm8 = <u,u,2,18,u,u,u,u,u,3,19,u,u,u,u,u>
11545 ; AVX512F-NEXT: vmovdqa64 %zmm22, %zmm9
11546 ; AVX512F-NEXT: vpermt2d %zmm4, %zmm8, %zmm9
11547 ; AVX512F-NEXT: vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11548 ; AVX512F-NEXT: vmovdqa64 %zmm4, %zmm10
11549 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm4 = <4,20,u,u,u,u,u,5,21,u,u,u,u,u,6,22>
11550 ; AVX512F-NEXT: vmovdqa64 %zmm22, %zmm9
11551 ; AVX512F-NEXT: vpermt2d %zmm10, %zmm4, %zmm9
11552 ; AVX512F-NEXT: vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11553 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm9 = <u,u,u,u,u,7,23,u,u,u,u,u,8,24,u,u>
11554 ; AVX512F-NEXT: vmovdqa64 %zmm22, %zmm11
11555 ; AVX512F-NEXT: vpermt2d %zmm10, %zmm9, %zmm11
11556 ; AVX512F-NEXT: vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11557 ; AVX512F-NEXT: vmovdqa64 %zmm9, %zmm14
11558 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm9 = <u,u,u,9,25,u,u,u,u,u,10,26,u,u,u,u>
11559 ; AVX512F-NEXT: vmovdqa64 %zmm22, %zmm11
11560 ; AVX512F-NEXT: vpermt2d %zmm10, %zmm9, %zmm11
11561 ; AVX512F-NEXT: vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11562 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm11 = <u,11,27,u,u,u,u,u,12,28,u,u,u,u,u,13>
11563 ; AVX512F-NEXT: vmovdqa64 %zmm22, %zmm12
11564 ; AVX512F-NEXT: vpermt2d %zmm10, %zmm11, %zmm12
11565 ; AVX512F-NEXT: vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11566 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm31 = <13,u,u,u,u,u,30,14,u,u,u,u,u,31,15,u>
11567 ; AVX512F-NEXT: vpermt2d %zmm22, %zmm31, %zmm10
11568 ; AVX512F-NEXT: vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11569 ; AVX512F-NEXT: vmovdqa64 %zmm19, %zmm10
11570 ; AVX512F-NEXT: vpermt2d %zmm1, %zmm6, %zmm10
11571 ; AVX512F-NEXT: vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11572 ; AVX512F-NEXT: vmovdqa64 %zmm19, %zmm10
11573 ; AVX512F-NEXT: vpermt2d %zmm1, %zmm8, %zmm10
11574 ; AVX512F-NEXT: vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11575 ; AVX512F-NEXT: vmovdqa64 %zmm1, %zmm10
11576 ; AVX512F-NEXT: vmovdqa64 %zmm19, %zmm1
11577 ; AVX512F-NEXT: vpermt2d %zmm10, %zmm4, %zmm1
11578 ; AVX512F-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11579 ; AVX512F-NEXT: vmovdqa64 %zmm19, %zmm1
11580 ; AVX512F-NEXT: vpermt2d %zmm10, %zmm14, %zmm1
11581 ; AVX512F-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11582 ; AVX512F-NEXT: vmovdqa64 %zmm19, %zmm1
11583 ; AVX512F-NEXT: vpermt2d %zmm10, %zmm9, %zmm1
11584 ; AVX512F-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11585 ; AVX512F-NEXT: vmovdqa64 %zmm19, %zmm1
11586 ; AVX512F-NEXT: vpermt2d %zmm10, %zmm11, %zmm1
11587 ; AVX512F-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11588 ; AVX512F-NEXT: vpermt2d %zmm19, %zmm31, %zmm10
11589 ; AVX512F-NEXT: vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11590 ; AVX512F-NEXT: vmovdqa64 %zmm13, %zmm1
11591 ; AVX512F-NEXT: vpermt2d %zmm3, %zmm6, %zmm1
11592 ; AVX512F-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11593 ; AVX512F-NEXT: vmovdqa64 %zmm13, %zmm1
11594 ; AVX512F-NEXT: vpermt2d %zmm3, %zmm8, %zmm1
11595 ; AVX512F-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11596 ; AVX512F-NEXT: vmovdqa64 %zmm13, %zmm1
11597 ; AVX512F-NEXT: vpermt2d %zmm3, %zmm4, %zmm1
11598 ; AVX512F-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11599 ; AVX512F-NEXT: vpermi2d %zmm3, %zmm13, %zmm14
11600 ; AVX512F-NEXT: vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11601 ; AVX512F-NEXT: vpermi2d %zmm3, %zmm13, %zmm9
11602 ; AVX512F-NEXT: vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11603 ; AVX512F-NEXT: vpermi2d %zmm3, %zmm13, %zmm11
11604 ; AVX512F-NEXT: vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11605 ; AVX512F-NEXT: vpermt2d %zmm13, %zmm31, %zmm3
11606 ; AVX512F-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11607 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm1 = <u,u,u,u,u,5,21,u,u,u,u,u,6,22,u,u>
11608 ; AVX512F-NEXT: vmovdqa64 %zmm2, %zmm3
11609 ; AVX512F-NEXT: vpermt2d %zmm0, %zmm1, %zmm2
11610 ; AVX512F-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11611 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm10 = <u,u,u,7,23,u,u,u,u,u,8,24,u,u,u,u>
11612 ; AVX512F-NEXT: vmovdqa64 %zmm3, %zmm2
11613 ; AVX512F-NEXT: vpermt2d %zmm0, %zmm10, %zmm2
11614 ; AVX512F-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11615 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm21 = <u,9,25,u,u,u,u,u,10,26,u,u,u,u,u,11>
11616 ; AVX512F-NEXT: vmovdqa64 %zmm3, %zmm2
11617 ; AVX512F-NEXT: vpermt2d %zmm0, %zmm21, %zmm2
11618 ; AVX512F-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11619 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm31 = <11,u,u,u,u,u,28,12,u,u,u,u,u,29,13,u>
11620 ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm2
11621 ; AVX512F-NEXT: vpermt2d %zmm3, %zmm31, %zmm2
11622 ; AVX512F-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11623 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm2 = <u,u,u,u,14,30,u,u,u,u,u,15,31,u,u,u>
11624 ; AVX512F-NEXT: vpermt2d %zmm0, %zmm2, %zmm3
11625 ; AVX512F-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11626 ; AVX512F-NEXT: vmovdqa64 64(%rdx), %zmm17
11627 ; AVX512F-NEXT: vmovdqa64 64(%rcx), %zmm0
11628 ; AVX512F-NEXT: vmovdqa64 %zmm17, %zmm3
11629 ; AVX512F-NEXT: vpermt2d %zmm0, %zmm5, %zmm3
11630 ; AVX512F-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11631 ; AVX512F-NEXT: vmovdqa64 %zmm17, %zmm3
11632 ; AVX512F-NEXT: vpermt2d %zmm0, %zmm7, %zmm3
11633 ; AVX512F-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11634 ; AVX512F-NEXT: vmovdqa64 %zmm17, %zmm3
11635 ; AVX512F-NEXT: vpermt2d %zmm0, %zmm1, %zmm3
11636 ; AVX512F-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11637 ; AVX512F-NEXT: vmovdqa64 %zmm17, %zmm3
11638 ; AVX512F-NEXT: vpermt2d %zmm0, %zmm10, %zmm3
11639 ; AVX512F-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11640 ; AVX512F-NEXT: vmovdqa64 %zmm17, %zmm3
11641 ; AVX512F-NEXT: vpermt2d %zmm0, %zmm21, %zmm3
11642 ; AVX512F-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11643 ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm3
11644 ; AVX512F-NEXT: vpermt2d %zmm17, %zmm31, %zmm3
11645 ; AVX512F-NEXT: vmovdqu64 %zmm3, (%rsp) # 64-byte Spill
11646 ; AVX512F-NEXT: vpermt2d %zmm0, %zmm2, %zmm17
11647 ; AVX512F-NEXT: vmovdqa64 192(%rdx), %zmm25
11648 ; AVX512F-NEXT: vmovdqa64 192(%rcx), %zmm0
11649 ; AVX512F-NEXT: vmovdqa64 %zmm25, %zmm3
11650 ; AVX512F-NEXT: vpermt2d %zmm0, %zmm10, %zmm3
11651 ; AVX512F-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11652 ; AVX512F-NEXT: vmovdqa64 %zmm25, %zmm3
11653 ; AVX512F-NEXT: vpermt2d %zmm0, %zmm21, %zmm3
11654 ; AVX512F-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11655 ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm3
11656 ; AVX512F-NEXT: vpermt2d %zmm25, %zmm31, %zmm3
11657 ; AVX512F-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11658 ; AVX512F-NEXT: vmovdqa64 %zmm25, %zmm3
11659 ; AVX512F-NEXT: vpermt2d %zmm0, %zmm2, %zmm3
11660 ; AVX512F-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11661 ; AVX512F-NEXT: vmovdqa64 128(%rdx), %zmm23
11662 ; AVX512F-NEXT: vmovdqa64 128(%rcx), %zmm3
11663 ; AVX512F-NEXT: vpermi2d %zmm3, %zmm23, %zmm10
11664 ; AVX512F-NEXT: vpermi2d %zmm3, %zmm23, %zmm21
11665 ; AVX512F-NEXT: vpermi2d %zmm23, %zmm3, %zmm31
11666 ; AVX512F-NEXT: vmovdqa64 %zmm23, %zmm8
11667 ; AVX512F-NEXT: vmovdqa64 %zmm23, %zmm6
11668 ; AVX512F-NEXT: vmovdqa64 %zmm23, %zmm4
11669 ; AVX512F-NEXT: vpermt2d %zmm3, %zmm2, %zmm23
11670 ; AVX512F-NEXT: vpermt2d %zmm3, %zmm5, %zmm8
11671 ; AVX512F-NEXT: vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11672 ; AVX512F-NEXT: vpermt2d %zmm3, %zmm7, %zmm6
11673 ; AVX512F-NEXT: vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11674 ; AVX512F-NEXT: vpermt2d %zmm3, %zmm1, %zmm4
11675 ; AVX512F-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11676 ; AVX512F-NEXT: vpermi2d %zmm0, %zmm25, %zmm5
11677 ; AVX512F-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11678 ; AVX512F-NEXT: vpermi2d %zmm0, %zmm25, %zmm7
11679 ; AVX512F-NEXT: vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11680 ; AVX512F-NEXT: vpermt2d %zmm0, %zmm1, %zmm25
11681 ; AVX512F-NEXT: vmovdqa64 (%rdi), %zmm24
11682 ; AVX512F-NEXT: vmovdqa64 (%rsi), %zmm0
11683 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm9 = <0,16,u,u,u,u,u,1,17,u,u,u,u,u,2,18>
11684 ; AVX512F-NEXT: vmovdqa64 %zmm24, %zmm1
11685 ; AVX512F-NEXT: vpermt2d %zmm0, %zmm9, %zmm1
11686 ; AVX512F-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11687 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm15 = <u,u,u,u,u,3,19,u,u,u,u,u,4,20,u,u>
11688 ; AVX512F-NEXT: vmovdqa64 %zmm24, %zmm1
11689 ; AVX512F-NEXT: vpermt2d %zmm0, %zmm15, %zmm1
11690 ; AVX512F-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11691 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm29 = <u,u,u,5,21,u,u,u,u,u,6,22,u,u,u,u>
11692 ; AVX512F-NEXT: vmovdqa64 %zmm24, %zmm1
11693 ; AVX512F-NEXT: vpermt2d %zmm0, %zmm29, %zmm1
11694 ; AVX512F-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11695 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm8 = <u,7,23,u,u,u,u,u,8,24,u,u,u,u,u,9>
11696 ; AVX512F-NEXT: vmovdqa64 %zmm24, %zmm1
11697 ; AVX512F-NEXT: vpermt2d %zmm0, %zmm8, %zmm1
11698 ; AVX512F-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11699 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm6 = <9,u,u,u,u,u,26,10,u,u,u,u,u,27,11,u>
11700 ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm28
11701 ; AVX512F-NEXT: vpermt2d %zmm24, %zmm6, %zmm28
11702 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm2 = <u,u,u,u,12,28,u,u,u,u,u,13,29,u,u,u>
11703 ; AVX512F-NEXT: vmovdqa64 %zmm24, %zmm3
11704 ; AVX512F-NEXT: vpermt2d %zmm0, %zmm2, %zmm3
11705 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm30 = <u,u,14,30,u,u,u,u,u,15,31,u,u,u,u,u>
11706 ; AVX512F-NEXT: vpermt2d %zmm0, %zmm30, %zmm24
11707 ; AVX512F-NEXT: vmovdqa64 64(%rdi), %zmm5
11708 ; AVX512F-NEXT: vmovdqa64 64(%rsi), %zmm0
11709 ; AVX512F-NEXT: vmovdqa64 %zmm5, %zmm27
11710 ; AVX512F-NEXT: vpermt2d %zmm0, %zmm9, %zmm27
11711 ; AVX512F-NEXT: vmovdqa64 %zmm5, %zmm1
11712 ; AVX512F-NEXT: vpermt2d %zmm0, %zmm15, %zmm1
11713 ; AVX512F-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11714 ; AVX512F-NEXT: vmovdqa64 %zmm5, %zmm1
11715 ; AVX512F-NEXT: vpermt2d %zmm0, %zmm29, %zmm1
11716 ; AVX512F-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11717 ; AVX512F-NEXT: vmovdqa64 %zmm5, %zmm26
11718 ; AVX512F-NEXT: vpermt2d %zmm0, %zmm8, %zmm26
11719 ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm19
11720 ; AVX512F-NEXT: vpermt2d %zmm5, %zmm6, %zmm19
11721 ; AVX512F-NEXT: vmovdqa64 %zmm5, %zmm20
11722 ; AVX512F-NEXT: vpermt2d %zmm0, %zmm2, %zmm20
11723 ; AVX512F-NEXT: vpermt2d %zmm0, %zmm30, %zmm5
11724 ; AVX512F-NEXT: vmovdqa64 192(%rdi), %zmm12
11725 ; AVX512F-NEXT: vmovdqa64 192(%rsi), %zmm11
11726 ; AVX512F-NEXT: vmovdqa64 %zmm12, %zmm1
11727 ; AVX512F-NEXT: vpermt2d %zmm11, %zmm8, %zmm1
11728 ; AVX512F-NEXT: vmovdqa64 %zmm11, %zmm14
11729 ; AVX512F-NEXT: vpermt2d %zmm12, %zmm6, %zmm14
11730 ; AVX512F-NEXT: vmovdqa64 %zmm12, %zmm7
11731 ; AVX512F-NEXT: vpermt2d %zmm11, %zmm2, %zmm7
11732 ; AVX512F-NEXT: vmovdqa64 %zmm12, %zmm13
11733 ; AVX512F-NEXT: vpermt2d %zmm11, %zmm30, %zmm13
11734 ; AVX512F-NEXT: vmovdqa64 128(%rdi), %zmm16
11735 ; AVX512F-NEXT: vmovdqa64 128(%rsi), %zmm0
11736 ; AVX512F-NEXT: vpermi2d %zmm0, %zmm16, %zmm8
11737 ; AVX512F-NEXT: vpermi2d %zmm16, %zmm0, %zmm6
11738 ; AVX512F-NEXT: vpermi2d %zmm0, %zmm16, %zmm2
11739 ; AVX512F-NEXT: vmovdqa64 %zmm16, %zmm18
11740 ; AVX512F-NEXT: vmovdqa64 %zmm16, %zmm4
11741 ; AVX512F-NEXT: vmovdqa64 %zmm16, %zmm22
11742 ; AVX512F-NEXT: vpermt2d %zmm0, %zmm30, %zmm16
11743 ; AVX512F-NEXT: vpermt2d %zmm0, %zmm9, %zmm18
11744 ; AVX512F-NEXT: vpermt2d %zmm0, %zmm15, %zmm4
11745 ; AVX512F-NEXT: vpermt2d %zmm0, %zmm29, %zmm22
11746 ; AVX512F-NEXT: vpermi2d %zmm11, %zmm12, %zmm9
11747 ; AVX512F-NEXT: vpermi2d %zmm11, %zmm12, %zmm15
11748 ; AVX512F-NEXT: vpermt2d %zmm11, %zmm29, %zmm12
11749 ; AVX512F-NEXT: movw $3096, %ax # imm = 0xC18
11750 ; AVX512F-NEXT: kmovw %eax, %k1
11751 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
11752 ; AVX512F-NEXT: vmovdqa32 %zmm0, %zmm1 {%k1}
11753 ; AVX512F-NEXT: movw $-31994, %ax # imm = 0x8306
11754 ; AVX512F-NEXT: kmovw %eax, %k2
11755 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
11756 ; AVX512F-NEXT: vmovdqa32 %zmm0, %zmm14 {%k2}
11757 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
11758 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm29 # 64-byte Reload
11759 ; AVX512F-NEXT: vmovdqa32 %zmm0, %zmm29 {%k1}
11760 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
11761 ; AVX512F-NEXT: vmovdqa32 %zmm0, %zmm26 {%k1}
11762 ; AVX512F-NEXT: vmovdqa32 %zmm10, %zmm8 {%k1}
11763 ; AVX512F-NEXT: movq {{[0-9]+}}(%rsp), %rax
11764 ; AVX512F-NEXT: vmovdqa64 (%rax), %zmm0
11765 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm10 = <22,u,u,u,u,5,6,23,u,u,u,u,12,13,24,u>
11766 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
11767 ; AVX512F-NEXT: vpermt2d %zmm0, %zmm10, %zmm11
11768 ; AVX512F-NEXT: movw $28897, %cx # imm = 0x70E1
11769 ; AVX512F-NEXT: kmovw %ecx, %k1
11770 ; AVX512F-NEXT: vmovdqa32 %zmm11, %zmm29 {%k1}
11771 ; AVX512F-NEXT: vmovdqu64 %zmm29, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11772 ; AVX512F-NEXT: vmovdqa64 64(%rax), %zmm11
11773 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm29 # 64-byte Reload
11774 ; AVX512F-NEXT: vpermt2d %zmm11, %zmm10, %zmm29
11775 ; AVX512F-NEXT: vmovdqa32 %zmm29, %zmm26 {%k1}
11776 ; AVX512F-NEXT: vmovdqa64 128(%rax), %zmm30
11777 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm29 # 64-byte Reload
11778 ; AVX512F-NEXT: vpermt2d %zmm30, %zmm10, %zmm29
11779 ; AVX512F-NEXT: vmovdqa32 %zmm29, %zmm8 {%k1}
11780 ; AVX512F-NEXT: movw $6192, %cx # imm = 0x1830
11781 ; AVX512F-NEXT: kmovw %ecx, %k1
11782 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm29 # 64-byte Reload
11783 ; AVX512F-NEXT: vmovdqa32 %zmm7, %zmm29 {%k1}
11784 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
11785 ; AVX512F-NEXT: vmovdqa32 %zmm7, %zmm13 {%k1}
11786 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
11787 ; AVX512F-NEXT: vmovdqa32 %zmm7, %zmm28 {%k2}
11788 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
11789 ; AVX512F-NEXT: vmovdqa32 %zmm7, %zmm19 {%k2}
11790 ; AVX512F-NEXT: vmovdqa32 %zmm21, %zmm6 {%k2}
11791 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm7 = <u,u,u,3,4,25,u,u,u,u,10,11,26,u,u,u>
11792 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
11793 ; AVX512F-NEXT: vpermt2d %zmm0, %zmm7, %zmm10
11794 ; AVX512F-NEXT: movw $7224, %cx # imm = 0x1C38
11795 ; AVX512F-NEXT: kmovw %ecx, %k2
11796 ; AVX512F-NEXT: vmovdqa32 %zmm10, %zmm28 {%k2}
11797 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm21 # 64-byte Reload
11798 ; AVX512F-NEXT: vmovdqa32 %zmm3, %zmm21 {%k1}
11799 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
11800 ; AVX512F-NEXT: vpermt2d %zmm11, %zmm7, %zmm10
11801 ; AVX512F-NEXT: vmovdqa32 %zmm10, %zmm19 {%k2}
11802 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
11803 ; AVX512F-NEXT: vpermt2d %zmm30, %zmm7, %zmm10
11804 ; AVX512F-NEXT: vmovdqa32 %zmm10, %zmm6 {%k2}
11805 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm7 = <u,1,2,27,u,u,u,u,8,9,28,u,u,u,u,15>
11806 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
11807 ; AVX512F-NEXT: vpermt2d %zmm0, %zmm7, %zmm10
11808 ; AVX512F-NEXT: movw $-30962, %cx # imm = 0x870E
11809 ; AVX512F-NEXT: kmovw %ecx, %k2
11810 ; AVX512F-NEXT: vmovdqa32 %zmm10, %zmm21 {%k2}
11811 ; AVX512F-NEXT: vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11812 ; AVX512F-NEXT: vmovdqu64 (%rsp), %zmm3 # 64-byte Reload
11813 ; AVX512F-NEXT: vmovdqa32 %zmm20, %zmm3 {%k1}
11814 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
11815 ; AVX512F-NEXT: vpermt2d %zmm11, %zmm7, %zmm10
11816 ; AVX512F-NEXT: vmovdqa32 %zmm10, %zmm3 {%k2}
11817 ; AVX512F-NEXT: vmovdqu64 %zmm3, (%rsp) # 64-byte Spill
11818 ; AVX512F-NEXT: vmovdqa32 %zmm2, %zmm31 {%k1}
11819 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
11820 ; AVX512F-NEXT: vpermt2d %zmm30, %zmm7, %zmm2
11821 ; AVX512F-NEXT: vmovdqa32 %zmm2, %zmm31 {%k2}
11822 ; AVX512F-NEXT: movw $1548, %cx # imm = 0x60C
11823 ; AVX512F-NEXT: kmovw %ecx, %k2
11824 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
11825 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
11826 ; AVX512F-NEXT: vmovdqa32 %zmm2, %zmm3 {%k2}
11827 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
11828 ; AVX512F-NEXT: vmovdqa32 %zmm2, %zmm24 {%k1}
11829 ; AVX512F-NEXT: vmovdqa32 %zmm17, %zmm5 {%k1}
11830 ; AVX512F-NEXT: vmovdqa32 %zmm23, %zmm16 {%k1}
11831 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm2 = <0,29,u,u,u,u,6,7,30,u,u,u,u,13,14,31>
11832 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
11833 ; AVX512F-NEXT: vpermt2d %zmm0, %zmm2, %zmm7
11834 ; AVX512F-NEXT: movw $-7741, %cx # imm = 0xE1C3
11835 ; AVX512F-NEXT: kmovw %ecx, %k1
11836 ; AVX512F-NEXT: vmovdqa32 %zmm7, %zmm24 {%k1}
11837 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
11838 ; AVX512F-NEXT: vpermt2d %zmm11, %zmm2, %zmm7
11839 ; AVX512F-NEXT: vmovdqa32 %zmm7, %zmm5 {%k1}
11840 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
11841 ; AVX512F-NEXT: vpermt2d %zmm30, %zmm2, %zmm7
11842 ; AVX512F-NEXT: vmovdqa32 %zmm7, %zmm16 {%k1}
11843 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm2 = <u,u,u,u,4,5,16,u,u,u,u,11,12,17,u,u>
11844 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
11845 ; AVX512F-NEXT: vpermt2d %zmm0, %zmm2, %zmm7
11846 ; AVX512F-NEXT: movw $14448, %cx # imm = 0x3870
11847 ; AVX512F-NEXT: kmovw %ecx, %k3
11848 ; AVX512F-NEXT: vmovdqa32 %zmm7, %zmm3 {%k3}
11849 ; AVX512F-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11850 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
11851 ; AVX512F-NEXT: vmovdqa32 %zmm7, %zmm27 {%k2}
11852 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
11853 ; AVX512F-NEXT: vmovdqa32 %zmm7, %zmm18 {%k2}
11854 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
11855 ; AVX512F-NEXT: vmovdqa32 %zmm7, %zmm9 {%k2}
11856 ; AVX512F-NEXT: movw $12384, %cx # imm = 0x3060
11857 ; AVX512F-NEXT: kmovw %ecx, %k1
11858 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
11859 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
11860 ; AVX512F-NEXT: vmovdqa32 %zmm3, %zmm20 {%k1}
11861 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
11862 ; AVX512F-NEXT: vpermt2d %zmm11, %zmm2, %zmm7
11863 ; AVX512F-NEXT: vmovdqa32 %zmm7, %zmm27 {%k3}
11864 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
11865 ; AVX512F-NEXT: vpermt2d %zmm30, %zmm2, %zmm7
11866 ; AVX512F-NEXT: vmovdqa32 %zmm7, %zmm18 {%k3}
11867 ; AVX512F-NEXT: vmovdqa64 192(%r8), %zmm7
11868 ; AVX512F-NEXT: vmovdqa64 192(%r9), %zmm10
11869 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm21 = <u,u,u,u,0,16,u,u,u,u,u,1,17,u,u,u>
11870 ; AVX512F-NEXT: vpermi2d %zmm10, %zmm7, %zmm21
11871 ; AVX512F-NEXT: vmovdqa64 192(%rax), %zmm17
11872 ; AVX512F-NEXT: vpermt2d %zmm17, %zmm2, %zmm21
11873 ; AVX512F-NEXT: vmovdqa32 %zmm21, %zmm9 {%k3}
11874 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm2 = <u,u,2,3,18,u,u,u,u,9,10,19,u,u,u,u>
11875 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm21 # 64-byte Reload
11876 ; AVX512F-NEXT: vpermt2d %zmm0, %zmm2, %zmm21
11877 ; AVX512F-NEXT: movw $3612, %ax # imm = 0xE1C
11878 ; AVX512F-NEXT: kmovw %eax, %k2
11879 ; AVX512F-NEXT: vmovdqa32 %zmm21, %zmm20 {%k2}
11880 ; AVX512F-NEXT: vmovdqu64 %zmm20, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11881 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
11882 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
11883 ; AVX512F-NEXT: vmovdqa32 %zmm3, %zmm20 {%k1}
11884 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm21 # 64-byte Reload
11885 ; AVX512F-NEXT: vpermt2d %zmm11, %zmm2, %zmm21
11886 ; AVX512F-NEXT: vmovdqa32 %zmm21, %zmm20 {%k2}
11887 ; AVX512F-NEXT: vmovdqu64 %zmm20, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11888 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
11889 ; AVX512F-NEXT: vmovdqa32 %zmm4, %zmm20 {%k1}
11890 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm21 # 64-byte Reload
11891 ; AVX512F-NEXT: vpermt2d %zmm30, %zmm2, %zmm21
11892 ; AVX512F-NEXT: vmovdqa32 %zmm21, %zmm20 {%k2}
11893 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm21 # 64-byte Reload
11894 ; AVX512F-NEXT: vmovdqa32 %zmm15, %zmm21 {%k1}
11895 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm15 = <u,u,2,18,u,u,u,u,u,3,19,u,u,u,u,u>
11896 ; AVX512F-NEXT: vpermi2d %zmm10, %zmm7, %zmm15
11897 ; AVX512F-NEXT: vpermt2d %zmm17, %zmm2, %zmm15
11898 ; AVX512F-NEXT: vmovdqa32 %zmm15, %zmm21 {%k2}
11899 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm2 = <u,1,2,3,4,23,u,u,8,9,10,11,24,u,u,15>
11900 ; AVX512F-NEXT: vpermi2d %zmm7, %zmm1, %zmm2
11901 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm3 = <0,1,2,25,u,u,6,7,8,9,26,u,u,13,14,15>
11902 ; AVX512F-NEXT: vpermi2d %zmm7, %zmm14, %zmm3
11903 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm14 = <0,27,u,u,4,5,6,7,28,u,u,11,12,13,14,29>
11904 ; AVX512F-NEXT: vpermi2d %zmm7, %zmm29, %zmm14
11905 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm15 = <u,u,2,3,4,5,30,u,u,9,10,11,12,31,u,u>
11906 ; AVX512F-NEXT: vpermi2d %zmm7, %zmm13, %zmm15
11907 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
11908 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm29 # 64-byte Reload
11909 ; AVX512F-NEXT: vmovdqa32 %zmm13, %zmm29 {%k1}
11910 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm13 = <0,1,20,u,u,u,u,7,8,21,u,u,u,u,14,15>
11911 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload
11912 ; AVX512F-NEXT: vpermt2d %zmm0, %zmm13, %zmm23
11913 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
11914 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
11915 ; AVX512F-NEXT: vmovdqa32 %zmm0, %zmm1 {%k1}
11916 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
11917 ; AVX512F-NEXT: vmovdqa32 %zmm0, %zmm22 {%k1}
11918 ; AVX512F-NEXT: vmovdqa32 %zmm25, %zmm12 {%k1}
11919 ; AVX512F-NEXT: movw $15480, %ax # imm = 0x3C78
11920 ; AVX512F-NEXT: kmovw %eax, %k1
11921 ; AVX512F-NEXT: vmovdqa32 %zmm29, %zmm23 {%k1}
11922 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
11923 ; AVX512F-NEXT: vpermt2d %zmm11, %zmm13, %zmm0
11924 ; AVX512F-NEXT: vmovdqa32 %zmm1, %zmm0 {%k1}
11925 ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm25
11926 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
11927 ; AVX512F-NEXT: vpermt2d %zmm30, %zmm13, %zmm0
11928 ; AVX512F-NEXT: vmovdqa32 %zmm22, %zmm0 {%k1}
11929 ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm4
11930 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm0 = <4,20,u,u,u,u,u,5,21,u,u,u,u,u,6,22>
11931 ; AVX512F-NEXT: vpermt2d %zmm10, %zmm0, %zmm7
11932 ; AVX512F-NEXT: vpermt2d %zmm17, %zmm13, %zmm7
11933 ; AVX512F-NEXT: vmovdqa32 %zmm12, %zmm7 {%k1}
11934 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm0 = <u,1,2,3,4,5,23,u,8,9,10,11,12,24,u,15>
11935 ; AVX512F-NEXT: vpermi2d %zmm10, %zmm2, %zmm0
11936 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm1 = <0,1,2,3,25,u,6,7,8,9,10,26,u,13,14,15>
11937 ; AVX512F-NEXT: vpermi2d %zmm10, %zmm3, %zmm1
11938 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm2 = <0,1,27,u,4,5,6,7,8,28,u,11,12,13,14,15>
11939 ; AVX512F-NEXT: vpermi2d %zmm10, %zmm14, %zmm2
11940 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm11 = <29,u,2,3,4,5,6,30,u,9,10,11,12,13,31,u>
11941 ; AVX512F-NEXT: vpermi2d %zmm10, %zmm15, %zmm11
11942 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm10 = [22,1,2,3,4,5,6,23,8,9,10,11,12,13,24,15]
11943 ; AVX512F-NEXT: vpermi2d %zmm17, %zmm0, %zmm10
11944 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm0 = [0,1,2,3,4,25,6,7,8,9,10,11,26,13,14,15]
11945 ; AVX512F-NEXT: vpermi2d %zmm17, %zmm1, %zmm0
11946 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm1 = [0,1,2,27,4,5,6,7,8,9,28,11,12,13,14,15]
11947 ; AVX512F-NEXT: vpermi2d %zmm17, %zmm2, %zmm1
11948 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,29,2,3,4,5,6,7,30,9,10,11,12,13,14,31]
11949 ; AVX512F-NEXT: vpermi2d %zmm17, %zmm11, %zmm2
11950 ; AVX512F-NEXT: movq {{[0-9]+}}(%rsp), %rax
11951 ; AVX512F-NEXT: vmovdqa64 %zmm7, 1472(%rax)
11952 ; AVX512F-NEXT: vmovdqa64 %zmm21, 1408(%rax)
11953 ; AVX512F-NEXT: vmovdqa64 %zmm9, 1344(%rax)
11954 ; AVX512F-NEXT: vmovdqa64 %zmm16, 1280(%rax)
11955 ; AVX512F-NEXT: vmovdqa64 %zmm31, 1216(%rax)
11956 ; AVX512F-NEXT: vmovdqa64 %zmm6, 1152(%rax)
11957 ; AVX512F-NEXT: vmovdqa64 %zmm8, 1088(%rax)
11958 ; AVX512F-NEXT: vmovdqa64 %zmm4, 1024(%rax)
11959 ; AVX512F-NEXT: vmovdqa64 %zmm20, 960(%rax)
11960 ; AVX512F-NEXT: vmovdqa64 %zmm18, 896(%rax)
11961 ; AVX512F-NEXT: vmovdqa64 %zmm5, 832(%rax)
11962 ; AVX512F-NEXT: vmovups (%rsp), %zmm3 # 64-byte Reload
11963 ; AVX512F-NEXT: vmovaps %zmm3, 768(%rax)
11964 ; AVX512F-NEXT: vmovdqa64 %zmm19, 704(%rax)
11965 ; AVX512F-NEXT: vmovdqa64 %zmm26, 640(%rax)
11966 ; AVX512F-NEXT: vmovdqa64 %zmm25, 576(%rax)
11967 ; AVX512F-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
11968 ; AVX512F-NEXT: vmovaps %zmm3, 512(%rax)
11969 ; AVX512F-NEXT: vmovdqa64 %zmm27, 448(%rax)
11970 ; AVX512F-NEXT: vmovdqa64 %zmm24, 384(%rax)
11971 ; AVX512F-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
11972 ; AVX512F-NEXT: vmovaps %zmm3, 320(%rax)
11973 ; AVX512F-NEXT: vmovdqa64 %zmm28, 256(%rax)
11974 ; AVX512F-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
11975 ; AVX512F-NEXT: vmovaps %zmm3, 192(%rax)
11976 ; AVX512F-NEXT: vmovdqa64 %zmm23, 128(%rax)
11977 ; AVX512F-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
11978 ; AVX512F-NEXT: vmovaps %zmm3, 64(%rax)
11979 ; AVX512F-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
11980 ; AVX512F-NEXT: vmovaps %zmm3, (%rax)
11981 ; AVX512F-NEXT: vmovdqa64 %zmm2, 1728(%rax)
11982 ; AVX512F-NEXT: vmovdqa64 %zmm1, 1664(%rax)
11983 ; AVX512F-NEXT: vmovdqa64 %zmm0, 1600(%rax)
11984 ; AVX512F-NEXT: vmovdqa64 %zmm10, 1536(%rax)
11985 ; AVX512F-NEXT: addq $3016, %rsp # imm = 0xBC8
11986 ; AVX512F-NEXT: vzeroupper
11987 ; AVX512F-NEXT: retq
11989 ; AVX512BW-LABEL: store_i32_stride7_vf64:
11990 ; AVX512BW: # %bb.0:
11991 ; AVX512BW-NEXT: subq $3016, %rsp # imm = 0xBC8
11992 ; AVX512BW-NEXT: vmovdqa64 (%rdx), %zmm2
11993 ; AVX512BW-NEXT: vmovdqa64 (%rcx), %zmm0
11994 ; AVX512BW-NEXT: vmovdqa64 (%r8), %zmm22
11995 ; AVX512BW-NEXT: vmovdqa64 64(%r8), %zmm19
11996 ; AVX512BW-NEXT: vmovdqa64 128(%r8), %zmm13
11997 ; AVX512BW-NEXT: vmovdqa64 (%r9), %zmm4
11998 ; AVX512BW-NEXT: vmovdqa64 64(%r9), %zmm1
11999 ; AVX512BW-NEXT: vmovdqa64 128(%r9), %zmm3
12000 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm5 = <u,u,0,16,u,u,u,u,u,1,17,u,u,u,u,u>
12001 ; AVX512BW-NEXT: vmovdqa64 %zmm2, %zmm6
12002 ; AVX512BW-NEXT: vpermt2d %zmm0, %zmm5, %zmm6
12003 ; AVX512BW-NEXT: vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
12004 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm6 = <u,u,u,u,0,16,u,u,u,u,u,1,17,u,u,u>
12005 ; AVX512BW-NEXT: vmovdqa64 %zmm22, %zmm7
12006 ; AVX512BW-NEXT: vpermt2d %zmm4, %zmm6, %zmm7
12007 ; AVX512BW-NEXT: vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
12008 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm7 = <2,18,u,u,u,u,u,3,19,u,u,u,u,u,4,20>
12009 ; AVX512BW-NEXT: vmovdqa64 %zmm2, %zmm8
12010 ; AVX512BW-NEXT: vpermt2d %zmm0, %zmm7, %zmm8
12011 ; AVX512BW-NEXT: vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
12012 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm8 = <u,u,2,18,u,u,u,u,u,3,19,u,u,u,u,u>
12013 ; AVX512BW-NEXT: vmovdqa64 %zmm22, %zmm9
12014 ; AVX512BW-NEXT: vpermt2d %zmm4, %zmm8, %zmm9
12015 ; AVX512BW-NEXT: vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
12016 ; AVX512BW-NEXT: vmovdqa64 %zmm4, %zmm10
12017 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm4 = <4,20,u,u,u,u,u,5,21,u,u,u,u,u,6,22>
12018 ; AVX512BW-NEXT: vmovdqa64 %zmm22, %zmm9
12019 ; AVX512BW-NEXT: vpermt2d %zmm10, %zmm4, %zmm9
12020 ; AVX512BW-NEXT: vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
12021 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm9 = <u,u,u,u,u,7,23,u,u,u,u,u,8,24,u,u>
12022 ; AVX512BW-NEXT: vmovdqa64 %zmm22, %zmm11
12023 ; AVX512BW-NEXT: vpermt2d %zmm10, %zmm9, %zmm11
12024 ; AVX512BW-NEXT: vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
12025 ; AVX512BW-NEXT: vmovdqa64 %zmm9, %zmm14
12026 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm9 = <u,u,u,9,25,u,u,u,u,u,10,26,u,u,u,u>
12027 ; AVX512BW-NEXT: vmovdqa64 %zmm22, %zmm11
12028 ; AVX512BW-NEXT: vpermt2d %zmm10, %zmm9, %zmm11
12029 ; AVX512BW-NEXT: vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
12030 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm11 = <u,11,27,u,u,u,u,u,12,28,u,u,u,u,u,13>
12031 ; AVX512BW-NEXT: vmovdqa64 %zmm22, %zmm12
12032 ; AVX512BW-NEXT: vpermt2d %zmm10, %zmm11, %zmm12
12033 ; AVX512BW-NEXT: vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
12034 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm31 = <13,u,u,u,u,u,30,14,u,u,u,u,u,31,15,u>
12035 ; AVX512BW-NEXT: vpermt2d %zmm22, %zmm31, %zmm10
12036 ; AVX512BW-NEXT: vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
12037 ; AVX512BW-NEXT: vmovdqa64 %zmm19, %zmm10
12038 ; AVX512BW-NEXT: vpermt2d %zmm1, %zmm6, %zmm10
12039 ; AVX512BW-NEXT: vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
12040 ; AVX512BW-NEXT: vmovdqa64 %zmm19, %zmm10
12041 ; AVX512BW-NEXT: vpermt2d %zmm1, %zmm8, %zmm10
12042 ; AVX512BW-NEXT: vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
12043 ; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm10
12044 ; AVX512BW-NEXT: vmovdqa64 %zmm19, %zmm1
12045 ; AVX512BW-NEXT: vpermt2d %zmm10, %zmm4, %zmm1
12046 ; AVX512BW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
12047 ; AVX512BW-NEXT: vmovdqa64 %zmm19, %zmm1
12048 ; AVX512BW-NEXT: vpermt2d %zmm10, %zmm14, %zmm1
12049 ; AVX512BW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
12050 ; AVX512BW-NEXT: vmovdqa64 %zmm19, %zmm1
12051 ; AVX512BW-NEXT: vpermt2d %zmm10, %zmm9, %zmm1
12052 ; AVX512BW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
12053 ; AVX512BW-NEXT: vmovdqa64 %zmm19, %zmm1
12054 ; AVX512BW-NEXT: vpermt2d %zmm10, %zmm11, %zmm1
12055 ; AVX512BW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
12056 ; AVX512BW-NEXT: vpermt2d %zmm19, %zmm31, %zmm10
12057 ; AVX512BW-NEXT: vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
12058 ; AVX512BW-NEXT: vmovdqa64 %zmm13, %zmm1
12059 ; AVX512BW-NEXT: vpermt2d %zmm3, %zmm6, %zmm1
12060 ; AVX512BW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
12061 ; AVX512BW-NEXT: vmovdqa64 %zmm13, %zmm1
12062 ; AVX512BW-NEXT: vpermt2d %zmm3, %zmm8, %zmm1
12063 ; AVX512BW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
12064 ; AVX512BW-NEXT: vmovdqa64 %zmm13, %zmm1
12065 ; AVX512BW-NEXT: vpermt2d %zmm3, %zmm4, %zmm1
12066 ; AVX512BW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
12067 ; AVX512BW-NEXT: vpermi2d %zmm3, %zmm13, %zmm14
12068 ; AVX512BW-NEXT: vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
12069 ; AVX512BW-NEXT: vpermi2d %zmm3, %zmm13, %zmm9
12070 ; AVX512BW-NEXT: vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
12071 ; AVX512BW-NEXT: vpermi2d %zmm3, %zmm13, %zmm11
12072 ; AVX512BW-NEXT: vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
12073 ; AVX512BW-NEXT: vpermt2d %zmm13, %zmm31, %zmm3
12074 ; AVX512BW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
12075 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm1 = <u,u,u,u,u,5,21,u,u,u,u,u,6,22,u,u>
12076 ; AVX512BW-NEXT: vmovdqa64 %zmm2, %zmm3
12077 ; AVX512BW-NEXT: vpermt2d %zmm0, %zmm1, %zmm2
12078 ; AVX512BW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
12079 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm10 = <u,u,u,7,23,u,u,u,u,u,8,24,u,u,u,u>
12080 ; AVX512BW-NEXT: vmovdqa64 %zmm3, %zmm2
12081 ; AVX512BW-NEXT: vpermt2d %zmm0, %zmm10, %zmm2
12082 ; AVX512BW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
12083 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm21 = <u,9,25,u,u,u,u,u,10,26,u,u,u,u,u,11>
12084 ; AVX512BW-NEXT: vmovdqa64 %zmm3, %zmm2
12085 ; AVX512BW-NEXT: vpermt2d %zmm0, %zmm21, %zmm2
12086 ; AVX512BW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
12087 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm31 = <11,u,u,u,u,u,28,12,u,u,u,u,u,29,13,u>
12088 ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm2
12089 ; AVX512BW-NEXT: vpermt2d %zmm3, %zmm31, %zmm2
12090 ; AVX512BW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
12091 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm2 = <u,u,u,u,14,30,u,u,u,u,u,15,31,u,u,u>
12092 ; AVX512BW-NEXT: vpermt2d %zmm0, %zmm2, %zmm3
12093 ; AVX512BW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
12094 ; AVX512BW-NEXT: vmovdqa64 64(%rdx), %zmm17
12095 ; AVX512BW-NEXT: vmovdqa64 64(%rcx), %zmm0
12096 ; AVX512BW-NEXT: vmovdqa64 %zmm17, %zmm3
12097 ; AVX512BW-NEXT: vpermt2d %zmm0, %zmm5, %zmm3
12098 ; AVX512BW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
12099 ; AVX512BW-NEXT: vmovdqa64 %zmm17, %zmm3
12100 ; AVX512BW-NEXT: vpermt2d %zmm0, %zmm7, %zmm3
12101 ; AVX512BW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
12102 ; AVX512BW-NEXT: vmovdqa64 %zmm17, %zmm3
12103 ; AVX512BW-NEXT: vpermt2d %zmm0, %zmm1, %zmm3
12104 ; AVX512BW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
12105 ; AVX512BW-NEXT: vmovdqa64 %zmm17, %zmm3
12106 ; AVX512BW-NEXT: vpermt2d %zmm0, %zmm10, %zmm3
12107 ; AVX512BW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
12108 ; AVX512BW-NEXT: vmovdqa64 %zmm17, %zmm3
12109 ; AVX512BW-NEXT: vpermt2d %zmm0, %zmm21, %zmm3
12110 ; AVX512BW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
12111 ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm3
12112 ; AVX512BW-NEXT: vpermt2d %zmm17, %zmm31, %zmm3
12113 ; AVX512BW-NEXT: vmovdqu64 %zmm3, (%rsp) # 64-byte Spill
12114 ; AVX512BW-NEXT: vpermt2d %zmm0, %zmm2, %zmm17
12115 ; AVX512BW-NEXT: vmovdqa64 192(%rdx), %zmm25
12116 ; AVX512BW-NEXT: vmovdqa64 192(%rcx), %zmm0
12117 ; AVX512BW-NEXT: vmovdqa64 %zmm25, %zmm3
12118 ; AVX512BW-NEXT: vpermt2d %zmm0, %zmm10, %zmm3
12119 ; AVX512BW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
12120 ; AVX512BW-NEXT: vmovdqa64 %zmm25, %zmm3
12121 ; AVX512BW-NEXT: vpermt2d %zmm0, %zmm21, %zmm3
12122 ; AVX512BW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
12123 ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm3
12124 ; AVX512BW-NEXT: vpermt2d %zmm25, %zmm31, %zmm3
12125 ; AVX512BW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
12126 ; AVX512BW-NEXT: vmovdqa64 %zmm25, %zmm3
12127 ; AVX512BW-NEXT: vpermt2d %zmm0, %zmm2, %zmm3
12128 ; AVX512BW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
12129 ; AVX512BW-NEXT: vmovdqa64 128(%rdx), %zmm23
12130 ; AVX512BW-NEXT: vmovdqa64 128(%rcx), %zmm3
12131 ; AVX512BW-NEXT: vpermi2d %zmm3, %zmm23, %zmm10
12132 ; AVX512BW-NEXT: vpermi2d %zmm3, %zmm23, %zmm21
12133 ; AVX512BW-NEXT: vpermi2d %zmm23, %zmm3, %zmm31
12134 ; AVX512BW-NEXT: vmovdqa64 %zmm23, %zmm8
12135 ; AVX512BW-NEXT: vmovdqa64 %zmm23, %zmm6
12136 ; AVX512BW-NEXT: vmovdqa64 %zmm23, %zmm4
12137 ; AVX512BW-NEXT: vpermt2d %zmm3, %zmm2, %zmm23
12138 ; AVX512BW-NEXT: vpermt2d %zmm3, %zmm5, %zmm8
12139 ; AVX512BW-NEXT: vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
12140 ; AVX512BW-NEXT: vpermt2d %zmm3, %zmm7, %zmm6
12141 ; AVX512BW-NEXT: vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
12142 ; AVX512BW-NEXT: vpermt2d %zmm3, %zmm1, %zmm4
12143 ; AVX512BW-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
12144 ; AVX512BW-NEXT: vpermi2d %zmm0, %zmm25, %zmm5
12145 ; AVX512BW-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
12146 ; AVX512BW-NEXT: vpermi2d %zmm0, %zmm25, %zmm7
12147 ; AVX512BW-NEXT: vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
12148 ; AVX512BW-NEXT: vpermt2d %zmm0, %zmm1, %zmm25
12149 ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm24
12150 ; AVX512BW-NEXT: vmovdqa64 (%rsi), %zmm0
12151 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm9 = <0,16,u,u,u,u,u,1,17,u,u,u,u,u,2,18>
12152 ; AVX512BW-NEXT: vmovdqa64 %zmm24, %zmm1
12153 ; AVX512BW-NEXT: vpermt2d %zmm0, %zmm9, %zmm1
12154 ; AVX512BW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
12155 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm15 = <u,u,u,u,u,3,19,u,u,u,u,u,4,20,u,u>
12156 ; AVX512BW-NEXT: vmovdqa64 %zmm24, %zmm1
12157 ; AVX512BW-NEXT: vpermt2d %zmm0, %zmm15, %zmm1
12158 ; AVX512BW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
12159 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm29 = <u,u,u,5,21,u,u,u,u,u,6,22,u,u,u,u>
12160 ; AVX512BW-NEXT: vmovdqa64 %zmm24, %zmm1
12161 ; AVX512BW-NEXT: vpermt2d %zmm0, %zmm29, %zmm1
12162 ; AVX512BW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
12163 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm8 = <u,7,23,u,u,u,u,u,8,24,u,u,u,u,u,9>
12164 ; AVX512BW-NEXT: vmovdqa64 %zmm24, %zmm1
12165 ; AVX512BW-NEXT: vpermt2d %zmm0, %zmm8, %zmm1
12166 ; AVX512BW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
12167 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm6 = <9,u,u,u,u,u,26,10,u,u,u,u,u,27,11,u>
12168 ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm28
12169 ; AVX512BW-NEXT: vpermt2d %zmm24, %zmm6, %zmm28
12170 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm2 = <u,u,u,u,12,28,u,u,u,u,u,13,29,u,u,u>
12171 ; AVX512BW-NEXT: vmovdqa64 %zmm24, %zmm3
12172 ; AVX512BW-NEXT: vpermt2d %zmm0, %zmm2, %zmm3
12173 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm30 = <u,u,14,30,u,u,u,u,u,15,31,u,u,u,u,u>
12174 ; AVX512BW-NEXT: vpermt2d %zmm0, %zmm30, %zmm24
12175 ; AVX512BW-NEXT: vmovdqa64 64(%rdi), %zmm5
12176 ; AVX512BW-NEXT: vmovdqa64 64(%rsi), %zmm0
12177 ; AVX512BW-NEXT: vmovdqa64 %zmm5, %zmm27
12178 ; AVX512BW-NEXT: vpermt2d %zmm0, %zmm9, %zmm27
12179 ; AVX512BW-NEXT: vmovdqa64 %zmm5, %zmm1
12180 ; AVX512BW-NEXT: vpermt2d %zmm0, %zmm15, %zmm1
12181 ; AVX512BW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
12182 ; AVX512BW-NEXT: vmovdqa64 %zmm5, %zmm1
12183 ; AVX512BW-NEXT: vpermt2d %zmm0, %zmm29, %zmm1
12184 ; AVX512BW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
12185 ; AVX512BW-NEXT: vmovdqa64 %zmm5, %zmm26
12186 ; AVX512BW-NEXT: vpermt2d %zmm0, %zmm8, %zmm26
12187 ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm19
12188 ; AVX512BW-NEXT: vpermt2d %zmm5, %zmm6, %zmm19
12189 ; AVX512BW-NEXT: vmovdqa64 %zmm5, %zmm20
12190 ; AVX512BW-NEXT: vpermt2d %zmm0, %zmm2, %zmm20
12191 ; AVX512BW-NEXT: vpermt2d %zmm0, %zmm30, %zmm5
12192 ; AVX512BW-NEXT: vmovdqa64 192(%rdi), %zmm12
12193 ; AVX512BW-NEXT: vmovdqa64 192(%rsi), %zmm11
12194 ; AVX512BW-NEXT: vmovdqa64 %zmm12, %zmm1
12195 ; AVX512BW-NEXT: vpermt2d %zmm11, %zmm8, %zmm1
12196 ; AVX512BW-NEXT: vmovdqa64 %zmm11, %zmm14
12197 ; AVX512BW-NEXT: vpermt2d %zmm12, %zmm6, %zmm14
12198 ; AVX512BW-NEXT: vmovdqa64 %zmm12, %zmm7
12199 ; AVX512BW-NEXT: vpermt2d %zmm11, %zmm2, %zmm7
12200 ; AVX512BW-NEXT: vmovdqa64 %zmm12, %zmm13
12201 ; AVX512BW-NEXT: vpermt2d %zmm11, %zmm30, %zmm13
12202 ; AVX512BW-NEXT: vmovdqa64 128(%rdi), %zmm16
12203 ; AVX512BW-NEXT: vmovdqa64 128(%rsi), %zmm0
12204 ; AVX512BW-NEXT: vpermi2d %zmm0, %zmm16, %zmm8
12205 ; AVX512BW-NEXT: vpermi2d %zmm16, %zmm0, %zmm6
12206 ; AVX512BW-NEXT: vpermi2d %zmm0, %zmm16, %zmm2
12207 ; AVX512BW-NEXT: vmovdqa64 %zmm16, %zmm18
12208 ; AVX512BW-NEXT: vmovdqa64 %zmm16, %zmm4
12209 ; AVX512BW-NEXT: vmovdqa64 %zmm16, %zmm22
12210 ; AVX512BW-NEXT: vpermt2d %zmm0, %zmm30, %zmm16
12211 ; AVX512BW-NEXT: vpermt2d %zmm0, %zmm9, %zmm18
12212 ; AVX512BW-NEXT: vpermt2d %zmm0, %zmm15, %zmm4
12213 ; AVX512BW-NEXT: vpermt2d %zmm0, %zmm29, %zmm22
12214 ; AVX512BW-NEXT: vpermi2d %zmm11, %zmm12, %zmm9
12215 ; AVX512BW-NEXT: vpermi2d %zmm11, %zmm12, %zmm15
12216 ; AVX512BW-NEXT: vpermt2d %zmm11, %zmm29, %zmm12
12217 ; AVX512BW-NEXT: movw $3096, %ax # imm = 0xC18
12218 ; AVX512BW-NEXT: kmovd %eax, %k1
12219 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
12220 ; AVX512BW-NEXT: vmovdqa32 %zmm0, %zmm1 {%k1}
12221 ; AVX512BW-NEXT: movw $-31994, %ax # imm = 0x8306
12222 ; AVX512BW-NEXT: kmovd %eax, %k2
12223 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
12224 ; AVX512BW-NEXT: vmovdqa32 %zmm0, %zmm14 {%k2}
12225 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
12226 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm29 # 64-byte Reload
12227 ; AVX512BW-NEXT: vmovdqa32 %zmm0, %zmm29 {%k1}
12228 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
12229 ; AVX512BW-NEXT: vmovdqa32 %zmm0, %zmm26 {%k1}
12230 ; AVX512BW-NEXT: vmovdqa32 %zmm10, %zmm8 {%k1}
12231 ; AVX512BW-NEXT: movq {{[0-9]+}}(%rsp), %rax
12232 ; AVX512BW-NEXT: vmovdqa64 (%rax), %zmm0
12233 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm10 = <22,u,u,u,u,5,6,23,u,u,u,u,12,13,24,u>
12234 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
12235 ; AVX512BW-NEXT: vpermt2d %zmm0, %zmm10, %zmm11
12236 ; AVX512BW-NEXT: movw $28897, %cx # imm = 0x70E1
12237 ; AVX512BW-NEXT: kmovd %ecx, %k1
12238 ; AVX512BW-NEXT: vmovdqa32 %zmm11, %zmm29 {%k1}
12239 ; AVX512BW-NEXT: vmovdqu64 %zmm29, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
12240 ; AVX512BW-NEXT: vmovdqa64 64(%rax), %zmm11
12241 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm29 # 64-byte Reload
12242 ; AVX512BW-NEXT: vpermt2d %zmm11, %zmm10, %zmm29
12243 ; AVX512BW-NEXT: vmovdqa32 %zmm29, %zmm26 {%k1}
12244 ; AVX512BW-NEXT: vmovdqa64 128(%rax), %zmm30
12245 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm29 # 64-byte Reload
12246 ; AVX512BW-NEXT: vpermt2d %zmm30, %zmm10, %zmm29
12247 ; AVX512BW-NEXT: vmovdqa32 %zmm29, %zmm8 {%k1}
12248 ; AVX512BW-NEXT: movw $6192, %cx # imm = 0x1830
12249 ; AVX512BW-NEXT: kmovd %ecx, %k1
12250 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm29 # 64-byte Reload
12251 ; AVX512BW-NEXT: vmovdqa32 %zmm7, %zmm29 {%k1}
12252 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
12253 ; AVX512BW-NEXT: vmovdqa32 %zmm7, %zmm13 {%k1}
12254 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
12255 ; AVX512BW-NEXT: vmovdqa32 %zmm7, %zmm28 {%k2}
12256 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
12257 ; AVX512BW-NEXT: vmovdqa32 %zmm7, %zmm19 {%k2}
12258 ; AVX512BW-NEXT: vmovdqa32 %zmm21, %zmm6 {%k2}
12259 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm7 = <u,u,u,3,4,25,u,u,u,u,10,11,26,u,u,u>
12260 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
12261 ; AVX512BW-NEXT: vpermt2d %zmm0, %zmm7, %zmm10
12262 ; AVX512BW-NEXT: movw $7224, %cx # imm = 0x1C38
12263 ; AVX512BW-NEXT: kmovd %ecx, %k2
12264 ; AVX512BW-NEXT: vmovdqa32 %zmm10, %zmm28 {%k2}
12265 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm21 # 64-byte Reload
12266 ; AVX512BW-NEXT: vmovdqa32 %zmm3, %zmm21 {%k1}
12267 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
12268 ; AVX512BW-NEXT: vpermt2d %zmm11, %zmm7, %zmm10
12269 ; AVX512BW-NEXT: vmovdqa32 %zmm10, %zmm19 {%k2}
12270 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
12271 ; AVX512BW-NEXT: vpermt2d %zmm30, %zmm7, %zmm10
12272 ; AVX512BW-NEXT: vmovdqa32 %zmm10, %zmm6 {%k2}
12273 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm7 = <u,1,2,27,u,u,u,u,8,9,28,u,u,u,u,15>
12274 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
12275 ; AVX512BW-NEXT: vpermt2d %zmm0, %zmm7, %zmm10
12276 ; AVX512BW-NEXT: movw $-30962, %cx # imm = 0x870E
12277 ; AVX512BW-NEXT: kmovd %ecx, %k2
12278 ; AVX512BW-NEXT: vmovdqa32 %zmm10, %zmm21 {%k2}
12279 ; AVX512BW-NEXT: vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
12280 ; AVX512BW-NEXT: vmovdqu64 (%rsp), %zmm3 # 64-byte Reload
12281 ; AVX512BW-NEXT: vmovdqa32 %zmm20, %zmm3 {%k1}
12282 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
12283 ; AVX512BW-NEXT: vpermt2d %zmm11, %zmm7, %zmm10
12284 ; AVX512BW-NEXT: vmovdqa32 %zmm10, %zmm3 {%k2}
12285 ; AVX512BW-NEXT: vmovdqu64 %zmm3, (%rsp) # 64-byte Spill
12286 ; AVX512BW-NEXT: vmovdqa32 %zmm2, %zmm31 {%k1}
12287 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
12288 ; AVX512BW-NEXT: vpermt2d %zmm30, %zmm7, %zmm2
12289 ; AVX512BW-NEXT: vmovdqa32 %zmm2, %zmm31 {%k2}
12290 ; AVX512BW-NEXT: movw $1548, %cx # imm = 0x60C
12291 ; AVX512BW-NEXT: kmovd %ecx, %k2
12292 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
12293 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
12294 ; AVX512BW-NEXT: vmovdqa32 %zmm2, %zmm3 {%k2}
12295 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
12296 ; AVX512BW-NEXT: vmovdqa32 %zmm2, %zmm24 {%k1}
12297 ; AVX512BW-NEXT: vmovdqa32 %zmm17, %zmm5 {%k1}
12298 ; AVX512BW-NEXT: vmovdqa32 %zmm23, %zmm16 {%k1}
12299 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm2 = <0,29,u,u,u,u,6,7,30,u,u,u,u,13,14,31>
12300 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
12301 ; AVX512BW-NEXT: vpermt2d %zmm0, %zmm2, %zmm7
12302 ; AVX512BW-NEXT: movw $-7741, %cx # imm = 0xE1C3
12303 ; AVX512BW-NEXT: kmovd %ecx, %k1
12304 ; AVX512BW-NEXT: vmovdqa32 %zmm7, %zmm24 {%k1}
12305 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
12306 ; AVX512BW-NEXT: vpermt2d %zmm11, %zmm2, %zmm7
12307 ; AVX512BW-NEXT: vmovdqa32 %zmm7, %zmm5 {%k1}
12308 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
12309 ; AVX512BW-NEXT: vpermt2d %zmm30, %zmm2, %zmm7
12310 ; AVX512BW-NEXT: vmovdqa32 %zmm7, %zmm16 {%k1}
12311 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm2 = <u,u,u,u,4,5,16,u,u,u,u,11,12,17,u,u>
12312 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
12313 ; AVX512BW-NEXT: vpermt2d %zmm0, %zmm2, %zmm7
12314 ; AVX512BW-NEXT: movw $14448, %cx # imm = 0x3870
12315 ; AVX512BW-NEXT: kmovd %ecx, %k3
12316 ; AVX512BW-NEXT: vmovdqa32 %zmm7, %zmm3 {%k3}
12317 ; AVX512BW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
12318 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
12319 ; AVX512BW-NEXT: vmovdqa32 %zmm7, %zmm27 {%k2}
12320 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
12321 ; AVX512BW-NEXT: vmovdqa32 %zmm7, %zmm18 {%k2}
12322 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
12323 ; AVX512BW-NEXT: vmovdqa32 %zmm7, %zmm9 {%k2}
12324 ; AVX512BW-NEXT: movw $12384, %cx # imm = 0x3060
12325 ; AVX512BW-NEXT: kmovd %ecx, %k1
12326 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
12327 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
12328 ; AVX512BW-NEXT: vmovdqa32 %zmm3, %zmm20 {%k1}
12329 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
12330 ; AVX512BW-NEXT: vpermt2d %zmm11, %zmm2, %zmm7
12331 ; AVX512BW-NEXT: vmovdqa32 %zmm7, %zmm27 {%k3}
12332 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
12333 ; AVX512BW-NEXT: vpermt2d %zmm30, %zmm2, %zmm7
12334 ; AVX512BW-NEXT: vmovdqa32 %zmm7, %zmm18 {%k3}
12335 ; AVX512BW-NEXT: vmovdqa64 192(%r8), %zmm7
12336 ; AVX512BW-NEXT: vmovdqa64 192(%r9), %zmm10
12337 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm21 = <u,u,u,u,0,16,u,u,u,u,u,1,17,u,u,u>
12338 ; AVX512BW-NEXT: vpermi2d %zmm10, %zmm7, %zmm21
12339 ; AVX512BW-NEXT: vmovdqa64 192(%rax), %zmm17
12340 ; AVX512BW-NEXT: vpermt2d %zmm17, %zmm2, %zmm21
12341 ; AVX512BW-NEXT: vmovdqa32 %zmm21, %zmm9 {%k3}
12342 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm2 = <u,u,2,3,18,u,u,u,u,9,10,19,u,u,u,u>
12343 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm21 # 64-byte Reload
12344 ; AVX512BW-NEXT: vpermt2d %zmm0, %zmm2, %zmm21
12345 ; AVX512BW-NEXT: movw $3612, %ax # imm = 0xE1C
12346 ; AVX512BW-NEXT: kmovd %eax, %k2
12347 ; AVX512BW-NEXT: vmovdqa32 %zmm21, %zmm20 {%k2}
12348 ; AVX512BW-NEXT: vmovdqu64 %zmm20, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
12349 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
12350 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
12351 ; AVX512BW-NEXT: vmovdqa32 %zmm3, %zmm20 {%k1}
12352 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm21 # 64-byte Reload
12353 ; AVX512BW-NEXT: vpermt2d %zmm11, %zmm2, %zmm21
12354 ; AVX512BW-NEXT: vmovdqa32 %zmm21, %zmm20 {%k2}
12355 ; AVX512BW-NEXT: vmovdqu64 %zmm20, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
12356 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
12357 ; AVX512BW-NEXT: vmovdqa32 %zmm4, %zmm20 {%k1}
12358 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm21 # 64-byte Reload
12359 ; AVX512BW-NEXT: vpermt2d %zmm30, %zmm2, %zmm21
12360 ; AVX512BW-NEXT: vmovdqa32 %zmm21, %zmm20 {%k2}
12361 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm21 # 64-byte Reload
12362 ; AVX512BW-NEXT: vmovdqa32 %zmm15, %zmm21 {%k1}
12363 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm15 = <u,u,2,18,u,u,u,u,u,3,19,u,u,u,u,u>
12364 ; AVX512BW-NEXT: vpermi2d %zmm10, %zmm7, %zmm15
12365 ; AVX512BW-NEXT: vpermt2d %zmm17, %zmm2, %zmm15
12366 ; AVX512BW-NEXT: vmovdqa32 %zmm15, %zmm21 {%k2}
12367 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm2 = <u,1,2,3,4,23,u,u,8,9,10,11,24,u,u,15>
12368 ; AVX512BW-NEXT: vpermi2d %zmm7, %zmm1, %zmm2
12369 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm3 = <0,1,2,25,u,u,6,7,8,9,26,u,u,13,14,15>
12370 ; AVX512BW-NEXT: vpermi2d %zmm7, %zmm14, %zmm3
12371 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm14 = <0,27,u,u,4,5,6,7,28,u,u,11,12,13,14,29>
12372 ; AVX512BW-NEXT: vpermi2d %zmm7, %zmm29, %zmm14
12373 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm15 = <u,u,2,3,4,5,30,u,u,9,10,11,12,31,u,u>
12374 ; AVX512BW-NEXT: vpermi2d %zmm7, %zmm13, %zmm15
12375 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
12376 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm29 # 64-byte Reload
12377 ; AVX512BW-NEXT: vmovdqa32 %zmm13, %zmm29 {%k1}
12378 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm13 = <0,1,20,u,u,u,u,7,8,21,u,u,u,u,14,15>
12379 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload
12380 ; AVX512BW-NEXT: vpermt2d %zmm0, %zmm13, %zmm23
12381 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
12382 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
12383 ; AVX512BW-NEXT: vmovdqa32 %zmm0, %zmm1 {%k1}
12384 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
12385 ; AVX512BW-NEXT: vmovdqa32 %zmm0, %zmm22 {%k1}
12386 ; AVX512BW-NEXT: vmovdqa32 %zmm25, %zmm12 {%k1}
12387 ; AVX512BW-NEXT: movw $15480, %ax # imm = 0x3C78
12388 ; AVX512BW-NEXT: kmovd %eax, %k1
12389 ; AVX512BW-NEXT: vmovdqa32 %zmm29, %zmm23 {%k1}
12390 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
12391 ; AVX512BW-NEXT: vpermt2d %zmm11, %zmm13, %zmm0
12392 ; AVX512BW-NEXT: vmovdqa32 %zmm1, %zmm0 {%k1}
12393 ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm25
12394 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
12395 ; AVX512BW-NEXT: vpermt2d %zmm30, %zmm13, %zmm0
12396 ; AVX512BW-NEXT: vmovdqa32 %zmm22, %zmm0 {%k1}
12397 ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm4
12398 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm0 = <4,20,u,u,u,u,u,5,21,u,u,u,u,u,6,22>
12399 ; AVX512BW-NEXT: vpermt2d %zmm10, %zmm0, %zmm7
12400 ; AVX512BW-NEXT: vpermt2d %zmm17, %zmm13, %zmm7
12401 ; AVX512BW-NEXT: vmovdqa32 %zmm12, %zmm7 {%k1}
12402 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm0 = <u,1,2,3,4,5,23,u,8,9,10,11,12,24,u,15>
12403 ; AVX512BW-NEXT: vpermi2d %zmm10, %zmm2, %zmm0
12404 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm1 = <0,1,2,3,25,u,6,7,8,9,10,26,u,13,14,15>
12405 ; AVX512BW-NEXT: vpermi2d %zmm10, %zmm3, %zmm1
12406 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm2 = <0,1,27,u,4,5,6,7,8,28,u,11,12,13,14,15>
12407 ; AVX512BW-NEXT: vpermi2d %zmm10, %zmm14, %zmm2
12408 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm11 = <29,u,2,3,4,5,6,30,u,9,10,11,12,13,31,u>
12409 ; AVX512BW-NEXT: vpermi2d %zmm10, %zmm15, %zmm11
12410 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm10 = [22,1,2,3,4,5,6,23,8,9,10,11,12,13,24,15]
12411 ; AVX512BW-NEXT: vpermi2d %zmm17, %zmm0, %zmm10
12412 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm0 = [0,1,2,3,4,25,6,7,8,9,10,11,26,13,14,15]
12413 ; AVX512BW-NEXT: vpermi2d %zmm17, %zmm1, %zmm0
12414 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm1 = [0,1,2,27,4,5,6,7,8,9,28,11,12,13,14,15]
12415 ; AVX512BW-NEXT: vpermi2d %zmm17, %zmm2, %zmm1
12416 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,29,2,3,4,5,6,7,30,9,10,11,12,13,14,31]
12417 ; AVX512BW-NEXT: vpermi2d %zmm17, %zmm11, %zmm2
12418 ; AVX512BW-NEXT: movq {{[0-9]+}}(%rsp), %rax
12419 ; AVX512BW-NEXT: vmovdqa64 %zmm7, 1472(%rax)
12420 ; AVX512BW-NEXT: vmovdqa64 %zmm21, 1408(%rax)
12421 ; AVX512BW-NEXT: vmovdqa64 %zmm9, 1344(%rax)
12422 ; AVX512BW-NEXT: vmovdqa64 %zmm16, 1280(%rax)
12423 ; AVX512BW-NEXT: vmovdqa64 %zmm31, 1216(%rax)
12424 ; AVX512BW-NEXT: vmovdqa64 %zmm6, 1152(%rax)
12425 ; AVX512BW-NEXT: vmovdqa64 %zmm8, 1088(%rax)
12426 ; AVX512BW-NEXT: vmovdqa64 %zmm4, 1024(%rax)
12427 ; AVX512BW-NEXT: vmovdqa64 %zmm20, 960(%rax)
12428 ; AVX512BW-NEXT: vmovdqa64 %zmm18, 896(%rax)
12429 ; AVX512BW-NEXT: vmovdqa64 %zmm5, 832(%rax)
12430 ; AVX512BW-NEXT: vmovups (%rsp), %zmm3 # 64-byte Reload
12431 ; AVX512BW-NEXT: vmovaps %zmm3, 768(%rax)
12432 ; AVX512BW-NEXT: vmovdqa64 %zmm19, 704(%rax)
12433 ; AVX512BW-NEXT: vmovdqa64 %zmm26, 640(%rax)
12434 ; AVX512BW-NEXT: vmovdqa64 %zmm25, 576(%rax)
12435 ; AVX512BW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
12436 ; AVX512BW-NEXT: vmovaps %zmm3, 512(%rax)
12437 ; AVX512BW-NEXT: vmovdqa64 %zmm27, 448(%rax)
12438 ; AVX512BW-NEXT: vmovdqa64 %zmm24, 384(%rax)
12439 ; AVX512BW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
12440 ; AVX512BW-NEXT: vmovaps %zmm3, 320(%rax)
12441 ; AVX512BW-NEXT: vmovdqa64 %zmm28, 256(%rax)
12442 ; AVX512BW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
12443 ; AVX512BW-NEXT: vmovaps %zmm3, 192(%rax)
12444 ; AVX512BW-NEXT: vmovdqa64 %zmm23, 128(%rax)
12445 ; AVX512BW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
12446 ; AVX512BW-NEXT: vmovaps %zmm3, 64(%rax)
12447 ; AVX512BW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
12448 ; AVX512BW-NEXT: vmovaps %zmm3, (%rax)
12449 ; AVX512BW-NEXT: vmovdqa64 %zmm2, 1728(%rax)
12450 ; AVX512BW-NEXT: vmovdqa64 %zmm1, 1664(%rax)
12451 ; AVX512BW-NEXT: vmovdqa64 %zmm0, 1600(%rax)
12452 ; AVX512BW-NEXT: vmovdqa64 %zmm10, 1536(%rax)
12453 ; AVX512BW-NEXT: addq $3016, %rsp # imm = 0xBC8
12454 ; AVX512BW-NEXT: vzeroupper
12455 ; AVX512BW-NEXT: retq
12456 %in.vec0 = load <64 x i32>, ptr %in.vecptr0, align 64
12457 %in.vec1 = load <64 x i32>, ptr %in.vecptr1, align 64
12458 %in.vec2 = load <64 x i32>, ptr %in.vecptr2, align 64
12459 %in.vec3 = load <64 x i32>, ptr %in.vecptr3, align 64
12460 %in.vec4 = load <64 x i32>, ptr %in.vecptr4, align 64
12461 %in.vec5 = load <64 x i32>, ptr %in.vecptr5, align 64
12462 %in.vec6 = load <64 x i32>, ptr %in.vecptr6, align 64
12463 %1 = shufflevector <64 x i32> %in.vec0, <64 x i32> %in.vec1, <128 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95, i32 96, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 112, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127>
12464 %2 = shufflevector <64 x i32> %in.vec2, <64 x i32> %in.vec3, <128 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95, i32 96, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 112, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127>
12465 %3 = shufflevector <64 x i32> %in.vec4, <64 x i32> %in.vec5, <128 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95, i32 96, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 112, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127>
12466 %4 = shufflevector <128 x i32> %1, <128 x i32> %2, <256 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95, i32 96, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 112, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127, i32 128, i32 129, i32 130, i32 131, i32 132, i32 133, i32 134, i32 135, i32 136, i32 137, i32 138, i32 139, i32 140, i32 141, i32 142, i32 143, i32 144, i32 145, i32 146, i32 147, i32 148, i32 149, i32 150, i32 151, i32 152, i32 153, i32 154, i32 155, i32 156, i32 157, i32 158, i32 159, i32 160, i32 161, i32 162, i32 163, i32 164, i32 165, i32 166, i32 167, i32 168, i32 169, i32 170, i32 171, i32 172, i32 173, i32 174, i32 175, i32 176, i32 177, i32 178, i32 179, i32 180, i32 181, i32 182, i32 183, i32 184, i32 185, i32 186, i32 187, i32 188, i32 189, i32 190, i32 191, i32 192, i32 193, i32 194, i32 195, i32 196, i32 197, i32 198, i32 199, i32 200, i32 201, i32 202, i32 203, i32 204, i32 205, i32 206, i32 207, i32 208, i32 209, i32 210, i32 211, i32 212, i32 213, i32 214, i32 215, i32 216, i32 217, i32 218, i32 219, i32 220, i32 221, i32 222, i32 223, i32 224, i32 225, i32 226, i32 227, i32 228, i32 229, i32 230, i32 231, i32 232, i32 233, i32 234, i32 235, i32 236, i32 237, i32 238, i32 239, i32 240, i32 241, i32 242, i32 243, i32 244, i32 245, i32 246, i32 247, i32 248, i32 249, i32 250, i32 251, i32 252, i32 253, i32 254, i32 255>
12467 %5 = shufflevector <64 x i32> %in.vec6, <64 x i32> poison, <128 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
12468 %6 = shufflevector <128 x i32> %3, <128 x i32> %5, <192 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95, i32 96, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 112, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127, i32 128, i32 129, i32 130, i32 131, i32 132, i32 133, i32 134, i32 135, i32 136, i32 137, i32 138, i32 139, i32 140, i32 141, i32 142, i32 143, i32 144, i32 145, i32 146, i32 147, i32 148, i32 149, i32 150, i32 151, i32 152, i32 153, i32 154, i32 155, i32 156, i32 157, i32 158, i32 159, i32 160, i32 161, i32 162, i32 163, i32 164, i32 165, i32 166, i32 167, i32 168, i32 169, i32 170, i32 171, i32 172, i32 173, i32 174, i32 175, i32 176, i32 177, i32 178, i32 179, i32 180, i32 181, i32 182, i32 183, i32 184, i32 185, i32 186, i32 187, i32 188, i32 189, i32 190, i32 191>
12469 %7 = shufflevector <192 x i32> %6, <192 x i32> poison, <256 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95, i32 96, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 112, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127, i32 128, i32 129, i32 130, i32 131, i32 132, i32 133, i32 134, i32 135, i32 136, i32 137, i32 138, i32 139, i32 140, i32 141, i32 142, i32 143, i32 144, i32 145, i32 146, i32 147, i32 148, i32 149, i32 150, i32 151, i32 152, i32 153, i32 154, i32 155, i32 156, i32 157, i32 158, i32 159, i32 160, i32 161, i32 162, i32 163, i32 164, i32 165, i32 166, i32 167, i32 168, i32 169, i32 170, i32 171, i32 172, i32 173, i32 174, i32 175, i32 176, i32 177, i32 178, i32 179, i32 180, i32 181, i32 182, i32 183, i32 184, i32 185, i32 186, i32 187, i32 188, i32 189, i32 190, i32 191, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
12470 %8 = shufflevector <256 x i32> %4, <256 x i32> %7, <448 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95, i32 96, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 112, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127, i32 128, i32 129, i32 130, i32 131, i32 132, i32 133, i32 134, i32 135, i32 136, i32 137, i32 138, i32 139, i32 140, i32 141, i32 142, i32 143, i32 144, i32 145, i32 146, i32 147, i32 148, i32 149, i32 150, i32 151, i32 152, i32 153, i32 154, i32 155, i32 156, i32 157, i32 158, i32 159, i32 160, i32 161, i32 162, i32 163, i32 164, i32 165, i32 166, i32 167, i32 168, i32 169, i32 170, i32 171, i32 172, i32 173, i32 174, i32 175, i32 176, i32 177, i32 178, i32 179, i32 180, i32 181, i32 182, i32 183, i32 184, i32 185, i32 186, i32 187, i32 188, i32 189, i32 190, i32 191, i32 192, i32 193, i32 194, i32 195, i32 196, i32 197, i32 198, i32 199, i32 200, i32 201, i32 202, i32 203, i32 204, i32 205, i32 206, i32 207, i32 208, i32 209, i32 210, i32 211, i32 212, i32 213, i32 214, i32 215, i32 216, i32 217, i32 218, i32 219, i32 220, i32 221, i32 222, i32 223, i32 224, i32 225, i32 226, i32 227, i32 228, i32 229, i32 230, i32 231, i32 232, i32 233, i32 234, i32 235, i32 236, i32 237, i32 238, i32 239, i32 240, i32 241, i32 242, i32 243, i32 244, i32 245, i32 246, i32 247, i32 248, i32 249, i32 250, i32 251, i32 252, i32 253, i32 254, i32 255, i32 256, i32 257, i32 258, i32 259, i32 260, i32 261, i32 262, i32 263, i32 264, i32 265, i32 266, i32 267, i32 268, i32 269, i32 270, i32 271, i32 272, i32 273, i32 274, i32 275, i32 276, i32 277, i32 278, i32 279, i32 280, i32 281, i32 282, i32 283, i32 284, i32 285, i32 286, i32 287, i32 288, i32 289, i32 290, i32 291, i32 292, i32 293, i32 294, i32 295, i32 296, i32 297, i32 298, i32 299, i32 300, i32 301, i32 302, i32 303, i32 304, i32 305, i32 306, i32 307, i32 308, i32 309, i32 310, i32 311, i32 312, i32 313, i32 314, i32 315, i32 316, i32 317, i32 318, i32 319, i32 320, i32 321, i32 322, i32 323, i32 324, i32 325, i32 326, i32 327, i32 328, i32 329, i32 330, i32 331, i32 332, i32 333, i32 334, i32 335, i32 336, i32 337, i32 338, i32 339, i32 340, i32 341, i32 342, i32 343, i32 344, i32 345, i32 346, i32 347, i32 348, i32 349, i32 350, i32 351, i32 352, i32 353, i32 354, i32 355, i32 356, i32 357, i32 358, i32 359, i32 360, i32 361, i32 362, i32 363, i32 364, i32 365, i32 366, i32 367, i32 368, i32 369, i32 370, i32 371, i32 372, i32 373, i32 374, i32 375, i32 376, i32 377, i32 378, i32 379, i32 380, i32 381, i32 382, i32 383, i32 384, i32 385, i32 386, i32 387, i32 388, i32 389, i32 390, i32 391, i32 392, i32 393, i32 394, i32 395, i32 396, i32 397, i32 398, i32 399, i32 400, i32 401, i32 402, i32 403, i32 404, i32 405, i32 406, i32 407, i32 408, i32 409, i32 410, i32 411, i32 412, i32 413, i32 414, i32 415, i32 416, i32 417, i32 418, i32 419, i32 420, i32 421, i32 422, i32 423, i32 424, i32 425, i32 426, i32 427, i32 428, i32 429, i32 430, i32 431, i32 432, i32 433, i32 434, i32 435, i32 436, i32 437, i32 438, i32 439, i32 440, i32 441, i32 442, i32 443, i32 444, i32 445, i32 446, i32 447>
12471 %interleaved.vec = shufflevector <448 x i32> %8, <448 x i32> poison, <448 x i32> <i32 0, i32 64, i32 128, i32 192, i32 256, i32 320, i32 384, i32 1, i32 65, i32 129, i32 193, i32 257, i32 321, i32 385, i32 2, i32 66, i32 130, i32 194, i32 258, i32 322, i32 386, i32 3, i32 67, i32 131, i32 195, i32 259, i32 323, i32 387, i32 4, i32 68, i32 132, i32 196, i32 260, i32 324, i32 388, i32 5, i32 69, i32 133, i32 197, i32 261, i32 325, i32 389, i32 6, i32 70, i32 134, i32 198, i32 262, i32 326, i32 390, i32 7, i32 71, i32 135, i32 199, i32 263, i32 327, i32 391, i32 8, i32 72, i32 136, i32 200, i32 264, i32 328, i32 392, i32 9, i32 73, i32 137, i32 201, i32 265, i32 329, i32 393, i32 10, i32 74, i32 138, i32 202, i32 266, i32 330, i32 394, i32 11, i32 75, i32 139, i32 203, i32 267, i32 331, i32 395, i32 12, i32 76, i32 140, i32 204, i32 268, i32 332, i32 396, i32 13, i32 77, i32 141, i32 205, i32 269, i32 333, i32 397, i32 14, i32 78, i32 142, i32 206, i32 270, i32 334, i32 398, i32 15, i32 79, i32 143, i32 207, i32 271, i32 335, i32 399, i32 16, i32 80, i32 144, i32 208, i32 272, i32 336, i32 400, i32 17, i32 81, i32 145, i32 209, i32 273, i32 337, i32 401, i32 18, i32 82, i32 146, i32 210, i32 274, i32 338, i32 402, i32 19, i32 83, i32 147, i32 211, i32 275, i32 339, i32 403, i32 20, i32 84, i32 148, i32 212, i32 276, i32 340, i32 404, i32 21, i32 85, i32 149, i32 213, i32 277, i32 341, i32 405, i32 22, i32 86, i32 150, i32 214, i32 278, i32 342, i32 406, i32 23, i32 87, i32 151, i32 215, i32 279, i32 343, i32 407, i32 24, i32 88, i32 152, i32 216, i32 280, i32 344, i32 408, i32 25, i32 89, i32 153, i32 217, i32 281, i32 345, i32 409, i32 26, i32 90, i32 154, i32 218, i32 282, i32 346, i32 410, i32 27, i32 91, i32 155, i32 219, i32 283, i32 347, i32 411, i32 28, i32 92, i32 156, i32 220, i32 284, i32 348, i32 412, i32 29, i32 93, i32 157, i32 221, i32 285, i32 349, i32 413, i32 30, i32 94, i32 158, i32 222, i32 286, i32 350, i32 414, i32 31, i32 95, i32 159, i32 223, i32 287, i32 351, i32 415, i32 32, i32 96, i32 160, i32 224, i32 288, i32 352, i32 416, i32 33, i32 97, i32 161, i32 225, i32 289, i32 353, i32 417, i32 34, i32 98, i32 162, i32 226, i32 290, i32 354, i32 418, i32 35, i32 99, i32 163, i32 227, i32 291, i32 355, i32 419, i32 36, i32 100, i32 164, i32 228, i32 292, i32 356, i32 420, i32 37, i32 101, i32 165, i32 229, i32 293, i32 357, i32 421, i32 38, i32 102, i32 166, i32 230, i32 294, i32 358, i32 422, i32 39, i32 103, i32 167, i32 231, i32 295, i32 359, i32 423, i32 40, i32 104, i32 168, i32 232, i32 296, i32 360, i32 424, i32 41, i32 105, i32 169, i32 233, i32 297, i32 361, i32 425, i32 42, i32 106, i32 170, i32 234, i32 298, i32 362, i32 426, i32 43, i32 107, i32 171, i32 235, i32 299, i32 363, i32 427, i32 44, i32 108, i32 172, i32 236, i32 300, i32 364, i32 428, i32 45, i32 109, i32 173, i32 237, i32 301, i32 365, i32 429, i32 46, i32 110, i32 174, i32 238, i32 302, i32 366, i32 430, i32 47, i32 111, i32 175, i32 239, i32 303, i32 367, i32 431, i32 48, i32 112, i32 176, i32 240, i32 304, i32 368, i32 432, i32 49, i32 113, i32 177, i32 241, i32 305, i32 369, i32 433, i32 50, i32 114, i32 178, i32 242, i32 306, i32 370, i32 434, i32 51, i32 115, i32 179, i32 243, i32 307, i32 371, i32 435, i32 52, i32 116, i32 180, i32 244, i32 308, i32 372, i32 436, i32 53, i32 117, i32 181, i32 245, i32 309, i32 373, i32 437, i32 54, i32 118, i32 182, i32 246, i32 310, i32 374, i32 438, i32 55, i32 119, i32 183, i32 247, i32 311, i32 375, i32 439, i32 56, i32 120, i32 184, i32 248, i32 312, i32 376, i32 440, i32 57, i32 121, i32 185, i32 249, i32 313, i32 377, i32 441, i32 58, i32 122, i32 186, i32 250, i32 314, i32 378, i32 442, i32 59, i32 123, i32 187, i32 251, i32 315, i32 379, i32 443, i32 60, i32 124, i32 188, i32 252, i32 316, i32 380, i32 444, i32 61, i32 125, i32 189, i32 253, i32 317, i32 381, i32 445, i32 62, i32 126, i32 190, i32 254, i32 318, i32 382, i32 446, i32 63, i32 127, i32 191, i32 255, i32 319, i32 383, i32 447>
12472 store <448 x i32> %interleaved.vec, ptr %out.vec, align 64
12475 ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
12479 ; AVX2-ONLY: {{.*}}
12480 ; AVX512BW-ONLY-FAST: {{.*}}
12481 ; AVX512BW-ONLY-SLOW: {{.*}}
12482 ; AVX512DQ-FAST: {{.*}}
12483 ; AVX512DQ-SLOW: {{.*}}
12484 ; AVX512DQBW-FAST: {{.*}}
12485 ; AVX512DQBW-SLOW: {{.*}}
12486 ; AVX512F-ONLY-FAST: {{.*}}
12487 ; AVX512F-ONLY-SLOW: {{.*}}
12488 ; FALLBACK0: {{.*}}
12489 ; FALLBACK1: {{.*}}
12490 ; FALLBACK10: {{.*}}
12491 ; FALLBACK11: {{.*}}
12492 ; FALLBACK12: {{.*}}
12493 ; FALLBACK2: {{.*}}
12494 ; FALLBACK3: {{.*}}
12495 ; FALLBACK4: {{.*}}
12496 ; FALLBACK5: {{.*}}
12497 ; FALLBACK6: {{.*}}
12498 ; FALLBACK7: {{.*}}
12499 ; FALLBACK8: {{.*}}
12500 ; FALLBACK9: {{.*}}