1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+sse2 | FileCheck %s --check-prefixes=SSE,FALLBACK0
3 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1,AVX1-ONLY,FALLBACK1
4 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-SLOW,FALLBACK2
5 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST,FALLBACK3
6 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST-PERLANE,FALLBACK4
7 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
8 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
9 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
10 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
11 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
12 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
13 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
14 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
16 ; These patterns are produced by LoopVectorizer for interleaved stores.
18 define void @store_i32_stride6_vf2(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecptr2, ptr %in.vecptr3, ptr %in.vecptr4, ptr %in.vecptr5, ptr %out.vec) nounwind {
19 ; SSE-LABEL: store_i32_stride6_vf2:
21 ; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax
22 ; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
23 ; SSE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
24 ; SSE-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
25 ; SSE-NEXT: movsd {{.*#+}} xmm3 = mem[0],zero
26 ; SSE-NEXT: movaps %xmm0, %xmm4
27 ; SSE-NEXT: movlhps {{.*#+}} xmm4 = xmm4[0],xmm1[0]
28 ; SSE-NEXT: movaps %xmm2, %xmm5
29 ; SSE-NEXT: movlhps {{.*#+}} xmm5 = xmm5[0],xmm3[0]
30 ; SSE-NEXT: movsd {{.*#+}} xmm6 = mem[0],zero
31 ; SSE-NEXT: movsd {{.*#+}} xmm7 = mem[0],zero
32 ; SSE-NEXT: movlhps {{.*#+}} xmm7 = xmm7[0],xmm6[0]
33 ; SSE-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
34 ; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
35 ; SSE-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0]
36 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[1,3],xmm7[1,3]
37 ; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,2],xmm4[1,3]
38 ; SSE-NEXT: movaps %xmm5, 32(%rax)
39 ; SSE-NEXT: movaps %xmm7, 16(%rax)
40 ; SSE-NEXT: movaps %xmm0, (%rax)
43 ; AVX1-ONLY-LABEL: store_i32_stride6_vf2:
45 ; AVX1-ONLY-NEXT: movq {{[0-9]+}}(%rsp), %rax
46 ; AVX1-ONLY-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
47 ; AVX1-ONLY-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
48 ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0]
49 ; AVX1-ONLY-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero
50 ; AVX1-ONLY-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
51 ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm3 = xmm4[0],xmm3[0]
52 ; AVX1-ONLY-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
53 ; AVX1-ONLY-NEXT: vmovsd {{.*#+}} xmm5 = mem[0],zero
54 ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm4 = xmm5[0],xmm4[0]
55 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm2
56 ; AVX1-ONLY-NEXT: vpermilps {{.*#+}} ymm2 = ymm2[u,u,0,2,u,u,5,7]
57 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
58 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm2[2,3,4,5,6,7]
59 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm4[0,2,2,3]
60 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
61 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
62 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm3[1,3],xmm4[1,3]
63 ; AVX1-ONLY-NEXT: vmovaps %xmm1, 32(%rax)
64 ; AVX1-ONLY-NEXT: vmovaps %ymm0, (%rax)
65 ; AVX1-ONLY-NEXT: vzeroupper
66 ; AVX1-ONLY-NEXT: retq
68 ; AVX2-SLOW-LABEL: store_i32_stride6_vf2:
70 ; AVX2-SLOW-NEXT: movq {{[0-9]+}}(%rsp), %rax
71 ; AVX2-SLOW-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
72 ; AVX2-SLOW-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
73 ; AVX2-SLOW-NEXT: vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
74 ; AVX2-SLOW-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
75 ; AVX2-SLOW-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
76 ; AVX2-SLOW-NEXT: vmovlhps {{.*#+}} xmm1 = xmm2[0],xmm1[0]
77 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
78 ; AVX2-SLOW-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
79 ; AVX2-SLOW-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero
80 ; AVX2-SLOW-NEXT: vmovlhps {{.*#+}} xmm2 = xmm3[0],xmm2[0]
81 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2,1,3,4,6,5,7]
82 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,1]
83 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm3 = xmm2[0,2,2,3]
84 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm3
85 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm3[4,5],ymm0[6,7]
86 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,3],xmm2[1,3]
87 ; AVX2-SLOW-NEXT: vmovaps %xmm1, 32(%rax)
88 ; AVX2-SLOW-NEXT: vmovaps %ymm0, (%rax)
89 ; AVX2-SLOW-NEXT: vzeroupper
90 ; AVX2-SLOW-NEXT: retq
92 ; AVX2-FAST-LABEL: store_i32_stride6_vf2:
94 ; AVX2-FAST-NEXT: movq {{[0-9]+}}(%rsp), %rax
95 ; AVX2-FAST-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
96 ; AVX2-FAST-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
97 ; AVX2-FAST-NEXT: vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
98 ; AVX2-FAST-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
99 ; AVX2-FAST-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
100 ; AVX2-FAST-NEXT: vmovlhps {{.*#+}} xmm1 = xmm2[0],xmm1[0]
101 ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
102 ; AVX2-FAST-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
103 ; AVX2-FAST-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
104 ; AVX2-FAST-NEXT: vmovlhps {{.*#+}} xmm3 = xmm2[0],xmm1[0]
105 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm3 = ymm0[1,3],ymm3[1,3],ymm0[5,7],ymm3[5,7]
106 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[2,1,2,3]
107 ; AVX2-FAST-NEXT: vmovaps {{.*#+}} ymm4 = <0,2,4,6,u,u,1,3>
108 ; AVX2-FAST-NEXT: vpermps %ymm0, %ymm4, %ymm0
109 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
110 ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
111 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
112 ; AVX2-FAST-NEXT: vmovaps %ymm0, (%rax)
113 ; AVX2-FAST-NEXT: vmovaps %xmm3, 32(%rax)
114 ; AVX2-FAST-NEXT: vzeroupper
115 ; AVX2-FAST-NEXT: retq
117 ; AVX2-FAST-PERLANE-LABEL: store_i32_stride6_vf2:
118 ; AVX2-FAST-PERLANE: # %bb.0:
119 ; AVX2-FAST-PERLANE-NEXT: movq {{[0-9]+}}(%rsp), %rax
120 ; AVX2-FAST-PERLANE-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
121 ; AVX2-FAST-PERLANE-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
122 ; AVX2-FAST-PERLANE-NEXT: vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
123 ; AVX2-FAST-PERLANE-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
124 ; AVX2-FAST-PERLANE-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
125 ; AVX2-FAST-PERLANE-NEXT: vmovlhps {{.*#+}} xmm1 = xmm2[0],xmm1[0]
126 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
127 ; AVX2-FAST-PERLANE-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
128 ; AVX2-FAST-PERLANE-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero
129 ; AVX2-FAST-PERLANE-NEXT: vmovlhps {{.*#+}} xmm2 = xmm3[0],xmm2[0]
130 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2,1,3,4,6,5,7]
131 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,1]
132 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm3 = xmm2[0,2,2,3]
133 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm3
134 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm3[4,5],ymm0[6,7]
135 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,3],xmm2[1,3]
136 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm1, 32(%rax)
137 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, (%rax)
138 ; AVX2-FAST-PERLANE-NEXT: vzeroupper
139 ; AVX2-FAST-PERLANE-NEXT: retq
141 ; AVX512-LABEL: store_i32_stride6_vf2:
143 ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax
144 ; AVX512-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
145 ; AVX512-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
146 ; AVX512-NEXT: vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
147 ; AVX512-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
148 ; AVX512-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
149 ; AVX512-NEXT: vmovlhps {{.*#+}} xmm1 = xmm2[0],xmm1[0]
150 ; AVX512-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
151 ; AVX512-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero
152 ; AVX512-NEXT: vmovlhps {{.*#+}} xmm2 = xmm3[0],xmm2[0]
153 ; AVX512-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
154 ; AVX512-NEXT: vinsertf32x4 $2, %xmm2, %zmm0, %zmm0
155 ; AVX512-NEXT: vmovaps {{.*#+}} zmm1 = <0,2,4,6,8,10,1,3,5,7,9,11,u,u,u,u>
156 ; AVX512-NEXT: vpermps %zmm0, %zmm1, %zmm0
157 ; AVX512-NEXT: vextractf32x4 $2, %zmm0, 32(%rax)
158 ; AVX512-NEXT: vmovaps %ymm0, (%rax)
159 ; AVX512-NEXT: vzeroupper
161 %in.vec0 = load <2 x i32>, ptr %in.vecptr0, align 64
162 %in.vec1 = load <2 x i32>, ptr %in.vecptr1, align 64
163 %in.vec2 = load <2 x i32>, ptr %in.vecptr2, align 64
164 %in.vec3 = load <2 x i32>, ptr %in.vecptr3, align 64
165 %in.vec4 = load <2 x i32>, ptr %in.vecptr4, align 64
166 %in.vec5 = load <2 x i32>, ptr %in.vecptr5, align 64
167 %1 = shufflevector <2 x i32> %in.vec0, <2 x i32> %in.vec1, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
168 %2 = shufflevector <2 x i32> %in.vec2, <2 x i32> %in.vec3, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
169 %3 = shufflevector <2 x i32> %in.vec4, <2 x i32> %in.vec5, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
170 %4 = shufflevector <4 x i32> %1, <4 x i32> %2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
171 %5 = shufflevector <4 x i32> %3, <4 x i32> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
172 %6 = shufflevector <8 x i32> %4, <8 x i32> %5, <12 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11>
173 %interleaved.vec = shufflevector <12 x i32> %6, <12 x i32> poison, <12 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 1, i32 3, i32 5, i32 7, i32 9, i32 11>
174 store <12 x i32> %interleaved.vec, ptr %out.vec, align 64
178 define void @store_i32_stride6_vf4(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecptr2, ptr %in.vecptr3, ptr %in.vecptr4, ptr %in.vecptr5, ptr %out.vec) nounwind {
179 ; SSE-LABEL: store_i32_stride6_vf4:
181 ; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax
182 ; SSE-NEXT: movaps (%rdi), %xmm0
183 ; SSE-NEXT: movaps (%rsi), %xmm4
184 ; SSE-NEXT: movaps (%rdx), %xmm1
185 ; SSE-NEXT: movaps (%rcx), %xmm5
186 ; SSE-NEXT: movaps (%r8), %xmm7
187 ; SSE-NEXT: movaps (%r9), %xmm3
188 ; SSE-NEXT: movaps %xmm1, %xmm2
189 ; SSE-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1]
190 ; SSE-NEXT: movaps %xmm7, %xmm6
191 ; SSE-NEXT: unpckhpd {{.*#+}} xmm6 = xmm6[1],xmm3[1]
192 ; SSE-NEXT: movaps %xmm7, %xmm8
193 ; SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[3,3],xmm3[3,3]
194 ; SSE-NEXT: movaps %xmm7, %xmm9
195 ; SSE-NEXT: shufps {{.*#+}} xmm9 = xmm9[1,1],xmm3[1,1]
196 ; SSE-NEXT: movlhps {{.*#+}} xmm3 = xmm3[0],xmm7[0]
197 ; SSE-NEXT: movaps %xmm0, %xmm7
198 ; SSE-NEXT: unpcklps {{.*#+}} xmm7 = xmm7[0],xmm4[0],xmm7[1],xmm4[1]
199 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[2,0],xmm7[2,3]
200 ; SSE-NEXT: movlhps {{.*#+}} xmm7 = xmm7[0],xmm2[0]
201 ; SSE-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],xmm4[2],xmm0[3],xmm4[3]
202 ; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,2],xmm0[2,3]
203 ; SSE-NEXT: unpckhps {{.*#+}} xmm1 = xmm1[2],xmm5[2],xmm1[3],xmm5[3]
204 ; SSE-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
205 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,3],xmm8[0,2]
206 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[2,3],xmm9[0,2]
207 ; SSE-NEXT: movaps %xmm3, 16(%rax)
208 ; SSE-NEXT: movaps %xmm2, 32(%rax)
209 ; SSE-NEXT: movaps %xmm0, 48(%rax)
210 ; SSE-NEXT: movaps %xmm1, 80(%rax)
211 ; SSE-NEXT: movaps %xmm6, 64(%rax)
212 ; SSE-NEXT: movaps %xmm7, (%rax)
215 ; AVX1-ONLY-LABEL: store_i32_stride6_vf4:
216 ; AVX1-ONLY: # %bb.0:
217 ; AVX1-ONLY-NEXT: movq {{[0-9]+}}(%rsp), %rax
218 ; AVX1-ONLY-NEXT: vmovaps (%rdi), %xmm0
219 ; AVX1-ONLY-NEXT: vmovaps (%rsi), %xmm2
220 ; AVX1-ONLY-NEXT: vmovaps (%rdx), %xmm1
221 ; AVX1-ONLY-NEXT: vmovaps (%rcx), %xmm3
222 ; AVX1-ONLY-NEXT: vmovaps (%r8), %xmm4
223 ; AVX1-ONLY-NEXT: vmovaps (%r9), %xmm5
224 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm6
225 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm7
226 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm5, %ymm4, %ymm8
227 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm4, %ymm5, %ymm9
228 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} ymm10 = ymm9[0],ymm8[0],ymm9[1],ymm8[1],ymm9[4],ymm8[4],ymm9[5],ymm8[5]
229 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm11
230 ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm12 = ymm6[0],ymm11[0],ymm6[2],ymm11[2]
231 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm12 = ymm12[0,2,3,1,4,6,7,5]
232 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm13 = xmm3[0,0],xmm1[0,0]
233 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm13 = xmm13[0,1,2,0]
234 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm12 = ymm12[0,1],ymm13[2,3],ymm12[4,5,6,7]
235 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm10 = ymm12[0,1,2,3],ymm10[4,5],ymm12[6,7]
236 ; AVX1-ONLY-NEXT: vunpckhps {{.*#+}} ymm6 = ymm11[2],ymm6[2],ymm11[3],ymm6[3],ymm11[6],ymm6[6],ymm11[7],ymm6[7]
237 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm3, %ymm11
238 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm7 = ymm7[1,2],ymm11[1,2],ymm7[5,6],ymm11[5,6]
239 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm7 = ymm7[0,2,3,1,4,6,7,5]
240 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm6 = ymm7[0,1,2,3],ymm6[4,5],ymm7[6,7]
241 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
242 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm4 = ymm6[0,1],ymm4[2,3],ymm6[4,5,6,7]
243 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} ymm5 = ymm8[1],ymm9[1],ymm8[3],ymm9[3]
244 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm5 = ymm5[0,2,3,1,4,6,7,5]
245 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
246 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
247 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[3,3],ymm2[3,3],ymm0[7,7],ymm2[7,7]
248 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2,1,3,4,6,5,7]
249 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm5[0,1],ymm0[2,3,4,5],ymm5[6,7]
250 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 64(%rax)
251 ; AVX1-ONLY-NEXT: vmovaps %ymm4, 32(%rax)
252 ; AVX1-ONLY-NEXT: vmovaps %ymm10, (%rax)
253 ; AVX1-ONLY-NEXT: vzeroupper
254 ; AVX1-ONLY-NEXT: retq
256 ; AVX2-ONLY-LABEL: store_i32_stride6_vf4:
257 ; AVX2-ONLY: # %bb.0:
258 ; AVX2-ONLY-NEXT: movq {{[0-9]+}}(%rsp), %rax
259 ; AVX2-ONLY-NEXT: vmovaps (%rdi), %xmm0
260 ; AVX2-ONLY-NEXT: vmovaps (%rsi), %xmm1
261 ; AVX2-ONLY-NEXT: vmovaps (%rdx), %xmm2
262 ; AVX2-ONLY-NEXT: vmovaps (%rcx), %xmm3
263 ; AVX2-ONLY-NEXT: vmovaps (%r8), %xmm4
264 ; AVX2-ONLY-NEXT: vmovaps (%r9), %xmm5
265 ; AVX2-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm6
266 ; AVX2-ONLY-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm7
267 ; AVX2-ONLY-NEXT: vinsertf128 $1, %xmm5, %ymm4, %ymm8
268 ; AVX2-ONLY-NEXT: vmovddup {{.*#+}} xmm9 = [0,4,0,4]
269 ; AVX2-ONLY-NEXT: # xmm9 = mem[0,0]
270 ; AVX2-ONLY-NEXT: vpermps %ymm7, %ymm9, %ymm9
271 ; AVX2-ONLY-NEXT: vbroadcastf128 {{.*#+}} ymm10 = [0,4,1,5,0,4,1,5]
272 ; AVX2-ONLY-NEXT: # ymm10 = mem[0,1,0,1]
273 ; AVX2-ONLY-NEXT: vpermps %ymm6, %ymm10, %ymm10
274 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm9 = ymm10[0,1],ymm9[2,3],ymm10[4,5,6,7]
275 ; AVX2-ONLY-NEXT: vbroadcastsd {{.*#+}} ymm10 = [0,4,0,4,0,4,0,4]
276 ; AVX2-ONLY-NEXT: vpermps %ymm8, %ymm10, %ymm10
277 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm9 = ymm9[0,1,2,3],ymm10[4,5],ymm9[6,7]
278 ; AVX2-ONLY-NEXT: vbroadcastsd {{.*#+}} ymm10 = [2,6,2,6,2,6,2,6]
279 ; AVX2-ONLY-NEXT: vpermps %ymm6, %ymm10, %ymm6
280 ; AVX2-ONLY-NEXT: vbroadcastf128 {{.*#+}} ymm10 = [1,5,2,6,1,5,2,6]
281 ; AVX2-ONLY-NEXT: # ymm10 = mem[0,1,0,1]
282 ; AVX2-ONLY-NEXT: vpermps %ymm7, %ymm10, %ymm7
283 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm6 = ymm7[0,1,2,3],ymm6[4,5],ymm7[6,7]
284 ; AVX2-ONLY-NEXT: vunpcklps {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
285 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm4 = ymm6[0,1],ymm4[2,3],ymm6[4,5,6,7]
286 ; AVX2-ONLY-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
287 ; AVX2-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
288 ; AVX2-ONLY-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
289 ; AVX2-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,3,2,3,6,7,6,7]
290 ; AVX2-ONLY-NEXT: vbroadcastf128 {{.*#+}} ymm1 = [2,6,3,7,2,6,3,7]
291 ; AVX2-ONLY-NEXT: # ymm1 = mem[0,1,0,1]
292 ; AVX2-ONLY-NEXT: vpermps %ymm8, %ymm1, %ymm1
293 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
294 ; AVX2-ONLY-NEXT: vmovaps %ymm0, 64(%rax)
295 ; AVX2-ONLY-NEXT: vmovaps %ymm4, 32(%rax)
296 ; AVX2-ONLY-NEXT: vmovaps %ymm9, (%rax)
297 ; AVX2-ONLY-NEXT: vzeroupper
298 ; AVX2-ONLY-NEXT: retq
300 ; AVX512-LABEL: store_i32_stride6_vf4:
302 ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax
303 ; AVX512-NEXT: vmovdqa (%rdi), %xmm0
304 ; AVX512-NEXT: vmovdqa (%rdx), %xmm1
305 ; AVX512-NEXT: vmovdqa (%r8), %xmm2
306 ; AVX512-NEXT: vinserti128 $1, (%rcx), %ymm1, %ymm1
307 ; AVX512-NEXT: vinserti128 $1, (%rsi), %ymm0, %ymm0
308 ; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
309 ; AVX512-NEXT: vinserti32x4 $1, (%r9), %zmm2, %zmm1
310 ; AVX512-NEXT: vmovdqa {{.*#+}} ymm2 = [18,22,3,7,11,15,19,23]
311 ; AVX512-NEXT: vpermi2d %zmm1, %zmm0, %zmm2
312 ; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,4,8,12,16,20,1,5,9,13,17,21,2,6,10,14]
313 ; AVX512-NEXT: vpermi2d %zmm1, %zmm0, %zmm3
314 ; AVX512-NEXT: vmovdqa64 %zmm3, (%rax)
315 ; AVX512-NEXT: vmovdqa %ymm2, 64(%rax)
316 ; AVX512-NEXT: vzeroupper
318 %in.vec0 = load <4 x i32>, ptr %in.vecptr0, align 64
319 %in.vec1 = load <4 x i32>, ptr %in.vecptr1, align 64
320 %in.vec2 = load <4 x i32>, ptr %in.vecptr2, align 64
321 %in.vec3 = load <4 x i32>, ptr %in.vecptr3, align 64
322 %in.vec4 = load <4 x i32>, ptr %in.vecptr4, align 64
323 %in.vec5 = load <4 x i32>, ptr %in.vecptr5, align 64
324 %1 = shufflevector <4 x i32> %in.vec0, <4 x i32> %in.vec1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
325 %2 = shufflevector <4 x i32> %in.vec2, <4 x i32> %in.vec3, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
326 %3 = shufflevector <4 x i32> %in.vec4, <4 x i32> %in.vec5, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
327 %4 = shufflevector <8 x i32> %1, <8 x i32> %2, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
328 %5 = shufflevector <8 x i32> %3, <8 x i32> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
329 %6 = shufflevector <16 x i32> %4, <16 x i32> %5, <24 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23>
330 %interleaved.vec = shufflevector <24 x i32> %6, <24 x i32> poison, <24 x i32> <i32 0, i32 4, i32 8, i32 12, i32 16, i32 20, i32 1, i32 5, i32 9, i32 13, i32 17, i32 21, i32 2, i32 6, i32 10, i32 14, i32 18, i32 22, i32 3, i32 7, i32 11, i32 15, i32 19, i32 23>
331 store <24 x i32> %interleaved.vec, ptr %out.vec, align 64
335 define void @store_i32_stride6_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecptr2, ptr %in.vecptr3, ptr %in.vecptr4, ptr %in.vecptr5, ptr %out.vec) nounwind {
336 ; SSE-LABEL: store_i32_stride6_vf8:
338 ; SSE-NEXT: movaps (%rdi), %xmm4
339 ; SSE-NEXT: movaps 16(%rdi), %xmm1
340 ; SSE-NEXT: movaps (%rsi), %xmm0
341 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
342 ; SSE-NEXT: movaps 16(%rsi), %xmm10
343 ; SSE-NEXT: movaps (%rdx), %xmm8
344 ; SSE-NEXT: movaps 16(%rdx), %xmm2
345 ; SSE-NEXT: movaps (%rcx), %xmm6
346 ; SSE-NEXT: movaps 16(%rcx), %xmm9
347 ; SSE-NEXT: movaps (%r8), %xmm5
348 ; SSE-NEXT: movaps 16(%r8), %xmm11
349 ; SSE-NEXT: movaps (%r9), %xmm7
350 ; SSE-NEXT: movaps 16(%r9), %xmm3
351 ; SSE-NEXT: movaps %xmm9, %xmm14
352 ; SSE-NEXT: unpckhpd {{.*#+}} xmm14 = xmm14[1],xmm2[1]
353 ; SSE-NEXT: movaps %xmm1, %xmm12
354 ; SSE-NEXT: unpckhps {{.*#+}} xmm12 = xmm12[2],xmm10[2],xmm12[3],xmm10[3]
355 ; SSE-NEXT: movaps %xmm11, %xmm13
356 ; SSE-NEXT: unpckhpd {{.*#+}} xmm13 = xmm13[1],xmm3[1]
357 ; SSE-NEXT: shufps {{.*#+}} xmm13 = xmm13[0,2],xmm12[2,3]
358 ; SSE-NEXT: shufps {{.*#+}} xmm12 = xmm12[0,1],xmm14[2,0]
359 ; SSE-NEXT: movaps %xmm11, %xmm14
360 ; SSE-NEXT: shufps {{.*#+}} xmm14 = xmm14[1,1],xmm3[1,1]
361 ; SSE-NEXT: movaps %xmm2, %xmm15
362 ; SSE-NEXT: unpcklps {{.*#+}} xmm15 = xmm15[0],xmm9[0],xmm15[1],xmm9[1]
363 ; SSE-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm10[0],xmm1[1],xmm10[1]
364 ; SSE-NEXT: movaps %xmm11, %xmm0
365 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3],xmm3[3,3]
366 ; SSE-NEXT: movlhps {{.*#+}} xmm3 = xmm3[0],xmm11[0]
367 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[2,0],xmm1[2,3]
368 ; SSE-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm15[0]
369 ; SSE-NEXT: movaps %xmm15, %xmm10
370 ; SSE-NEXT: shufps {{.*#+}} xmm10 = xmm10[2,3],xmm14[0,2]
371 ; SSE-NEXT: movaps %xmm5, %xmm14
372 ; SSE-NEXT: shufps {{.*#+}} xmm14 = xmm14[3,3],xmm7[3,3]
373 ; SSE-NEXT: movaps %xmm8, %xmm11
374 ; SSE-NEXT: unpckhps {{.*#+}} xmm11 = xmm11[2],xmm6[2],xmm11[3],xmm6[3]
375 ; SSE-NEXT: shufps {{.*#+}} xmm11 = xmm11[2,3],xmm14[0,2]
376 ; SSE-NEXT: movaps %xmm4, %xmm14
377 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
378 ; SSE-NEXT: unpckhps {{.*#+}} xmm14 = xmm14[2],xmm15[2],xmm14[3],xmm15[3]
379 ; SSE-NEXT: unpcklps {{.*#+}} xmm4 = xmm4[0],xmm15[0],xmm4[1],xmm15[1]
380 ; SSE-NEXT: unpckhps {{.*#+}} xmm2 = xmm2[2],xmm9[2],xmm2[3],xmm9[3]
381 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[2,3],xmm0[0,2]
382 ; SSE-NEXT: movaps %xmm8, %xmm0
383 ; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1]
384 ; SSE-NEXT: unpckhpd {{.*#+}} xmm6 = xmm6[1],xmm8[1]
385 ; SSE-NEXT: movaps %xmm5, %xmm8
386 ; SSE-NEXT: unpckhpd {{.*#+}} xmm8 = xmm8[1],xmm7[1]
387 ; SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[0,2],xmm14[2,3]
388 ; SSE-NEXT: shufps {{.*#+}} xmm14 = xmm14[0,1],xmm6[2,0]
389 ; SSE-NEXT: movaps %xmm7, %xmm6
390 ; SSE-NEXT: movlhps {{.*#+}} xmm6 = xmm6[0],xmm5[0]
391 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[1,1],xmm7[1,1]
392 ; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[2,0],xmm4[2,3]
393 ; SSE-NEXT: movlhps {{.*#+}} xmm4 = xmm4[0],xmm0[0]
394 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,3],xmm5[0,2]
395 ; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax
396 ; SSE-NEXT: movaps %xmm0, 32(%rax)
397 ; SSE-NEXT: movaps %xmm14, 48(%rax)
398 ; SSE-NEXT: movaps %xmm1, 96(%rax)
399 ; SSE-NEXT: movaps %xmm3, 112(%rax)
400 ; SSE-NEXT: movaps %xmm13, 160(%rax)
401 ; SSE-NEXT: movaps %xmm2, 176(%rax)
402 ; SSE-NEXT: movaps %xmm4, (%rax)
403 ; SSE-NEXT: movaps %xmm6, 16(%rax)
404 ; SSE-NEXT: movaps %xmm8, 64(%rax)
405 ; SSE-NEXT: movaps %xmm11, 80(%rax)
406 ; SSE-NEXT: movaps %xmm10, 128(%rax)
407 ; SSE-NEXT: movaps %xmm12, 144(%rax)
410 ; AVX1-ONLY-LABEL: store_i32_stride6_vf8:
411 ; AVX1-ONLY: # %bb.0:
412 ; AVX1-ONLY-NEXT: movq {{[0-9]+}}(%rsp), %rax
413 ; AVX1-ONLY-NEXT: vmovaps (%rdi), %ymm7
414 ; AVX1-ONLY-NEXT: vmovaps (%rsi), %ymm8
415 ; AVX1-ONLY-NEXT: vmovaps (%rdx), %ymm2
416 ; AVX1-ONLY-NEXT: vmovaps (%rcx), %ymm3
417 ; AVX1-ONLY-NEXT: vmovaps (%r8), %ymm1
418 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} ymm0 = ymm7[0],ymm8[0],ymm7[1],ymm8[1],ymm7[4],ymm8[4],ymm7[5],ymm8[5]
419 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
420 ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm4 = ymm3[0],ymm2[0],ymm3[2],ymm2[2]
421 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm4 = ymm4[0,1,2,0,4,5,6,4]
422 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm4, %xmm4
423 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm4[2,3],ymm0[4,5,6,7]
424 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
425 ; AVX1-ONLY-NEXT: vbroadcastss 16(%r9), %ymm4
426 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm4[5],ymm0[6,7]
427 ; AVX1-ONLY-NEXT: vmovaps (%rcx), %xmm9
428 ; AVX1-ONLY-NEXT: vmovaps (%rdx), %xmm10
429 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm4 = xmm10[1,2],xmm9[1,2]
430 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm4 = xmm4[0,2,1,3]
431 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm4, %ymm4, %ymm4
432 ; AVX1-ONLY-NEXT: vmovaps (%rsi), %xmm5
433 ; AVX1-ONLY-NEXT: vmovaps (%rdi), %xmm6
434 ; AVX1-ONLY-NEXT: vunpckhps {{.*#+}} xmm11 = xmm6[2],xmm5[2],xmm6[3],xmm5[3]
435 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm11, %ymm0, %ymm12
436 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm12[4,5],ymm4[6,7]
437 ; AVX1-ONLY-NEXT: vbroadcastss 4(%r8), %xmm12
438 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm12[2,3],ymm4[4,5,6,7]
439 ; AVX1-ONLY-NEXT: vbroadcastss 4(%r9), %ymm12
440 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2],ymm12[3],ymm4[4,5,6,7]
441 ; AVX1-ONLY-NEXT: vunpckhps {{.*#+}} ymm8 = ymm7[2],ymm8[2],ymm7[3],ymm8[3],ymm7[6],ymm8[6],ymm7[7],ymm8[7]
442 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm7 = ymm2[1,2],ymm3[1,2],ymm2[5,6],ymm3[5,6]
443 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm7 = ymm7[2,3,2,3]
444 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm7 = ymm7[0,2,1,3,4,6,5,7]
445 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm8[4,5],ymm7[6,7]
446 ; AVX1-ONLY-NEXT: vbroadcastss 20(%r8), %xmm12
447 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1],ymm12[2,3],ymm7[4,5,6,7]
448 ; AVX1-ONLY-NEXT: vbroadcastss 20(%r9), %ymm12
449 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2],ymm12[3],ymm7[4,5,6,7]
450 ; AVX1-ONLY-NEXT: vunpckhps {{.*#+}} xmm9 = xmm10[2],xmm9[2],xmm10[3],xmm9[3]
451 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm9 = xmm9[2,3,2,3]
452 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm9, %ymm11, %ymm9
453 ; AVX1-ONLY-NEXT: vpermilps {{.*#+}} xmm10 = mem[2,1,3,3]
454 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm10, %ymm10, %ymm10
455 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm9 = ymm10[0,1],ymm9[2,3,4,5],ymm10[6,7]
456 ; AVX1-ONLY-NEXT: vmovaps (%r9), %xmm10
457 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm11 = xmm10[0,2,2,3]
458 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm10, %ymm11, %ymm10
459 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm9 = ymm9[0],ymm10[1],ymm9[2,3,4,5,6],ymm10[7]
460 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm2 = ymm3[3,0],ymm2[3,0],ymm3[7,4],ymm2[7,4]
461 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm2 = ymm2[2,0,2,3,6,4,6,7]
462 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm8[2,3],ymm2[2,3]
463 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[2,3,2,3]
464 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[2,1,3,3,6,5,7,7]
465 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3,4,5],ymm1[6,7]
466 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm2 = mem[2,3,2,3]
467 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm2 = ymm2[0,2,2,3,4,6,6,7]
468 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6],ymm2[7]
469 ; AVX1-ONLY-NEXT: vbroadcastss (%rcx), %xmm2
470 ; AVX1-ONLY-NEXT: vbroadcastss (%rdx), %xmm3
471 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
472 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} xmm3 = xmm6[0],xmm5[0],xmm6[1],xmm5[1]
473 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm5
474 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm5[0,1],ymm2[2,3],ymm5[4,5,6,7]
475 ; AVX1-ONLY-NEXT: vinsertf128 $1, (%r8), %ymm3, %ymm3
476 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3],ymm3[4,5],ymm2[6,7]
477 ; AVX1-ONLY-NEXT: vbroadcastss (%r9), %ymm3
478 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm3[5],ymm2[6,7]
479 ; AVX1-ONLY-NEXT: vmovaps %ymm2, (%rax)
480 ; AVX1-ONLY-NEXT: vmovaps %ymm1, 160(%rax)
481 ; AVX1-ONLY-NEXT: vmovaps %ymm9, 64(%rax)
482 ; AVX1-ONLY-NEXT: vmovaps %ymm7, 128(%rax)
483 ; AVX1-ONLY-NEXT: vmovaps %ymm4, 32(%rax)
484 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 96(%rax)
485 ; AVX1-ONLY-NEXT: vzeroupper
486 ; AVX1-ONLY-NEXT: retq
488 ; AVX2-SLOW-LABEL: store_i32_stride6_vf8:
489 ; AVX2-SLOW: # %bb.0:
490 ; AVX2-SLOW-NEXT: movq {{[0-9]+}}(%rsp), %rax
491 ; AVX2-SLOW-NEXT: vmovdqa (%rdi), %ymm0
492 ; AVX2-SLOW-NEXT: vmovdqa (%rsi), %ymm1
493 ; AVX2-SLOW-NEXT: vmovdqa (%rdx), %ymm3
494 ; AVX2-SLOW-NEXT: vmovdqa (%rcx), %ymm4
495 ; AVX2-SLOW-NEXT: vmovdqa (%r8), %ymm2
496 ; AVX2-SLOW-NEXT: vmovdqa (%rsi), %xmm6
497 ; AVX2-SLOW-NEXT: vmovdqa (%rdi), %xmm11
498 ; AVX2-SLOW-NEXT: vpunpckhdq {{.*#+}} xmm7 = xmm11[2],xmm6[2],xmm11[3],xmm6[3]
499 ; AVX2-SLOW-NEXT: vinserti128 $1, %xmm7, %ymm0, %ymm5
500 ; AVX2-SLOW-NEXT: vmovdqa (%rcx), %xmm8
501 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm10 = xmm8[1,2,2,3]
502 ; AVX2-SLOW-NEXT: vmovdqa (%rdx), %xmm9
503 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm12 = xmm9[1,2,2,3]
504 ; AVX2-SLOW-NEXT: vpunpckldq {{.*#+}} xmm10 = xmm12[0],xmm10[0],xmm12[1],xmm10[1]
505 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm10 = ymm10[0,1,2,1]
506 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm5 = ymm10[0,1,2,3],ymm5[4,5],ymm10[6,7]
507 ; AVX2-SLOW-NEXT: vmovdqa (%r8), %xmm10
508 ; AVX2-SLOW-NEXT: vpmovzxdq {{.*#+}} xmm12 = xmm10[0],zero,xmm10[1],zero
509 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm5 = ymm5[0,1],ymm12[2,3],ymm5[4,5,6,7]
510 ; AVX2-SLOW-NEXT: vpbroadcastd 4(%r9), %ymm12
511 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm5 = ymm5[0,1,2],ymm12[3],ymm5[4,5,6,7]
512 ; AVX2-SLOW-NEXT: vpbroadcastd (%rcx), %xmm12
513 ; AVX2-SLOW-NEXT: vpbroadcastd (%rdx), %xmm13
514 ; AVX2-SLOW-NEXT: vpunpckldq {{.*#+}} xmm12 = xmm13[0],xmm12[0],xmm13[1],xmm12[1]
515 ; AVX2-SLOW-NEXT: vpunpckldq {{.*#+}} xmm6 = xmm11[0],xmm6[0],xmm11[1],xmm6[1]
516 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm6 = ymm6[0,1,2,1]
517 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm6 = ymm6[0,1],ymm12[2,3],ymm6[4,5,6,7]
518 ; AVX2-SLOW-NEXT: vpbroadcastq %xmm10, %ymm11
519 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm11[4,5],ymm6[6,7]
520 ; AVX2-SLOW-NEXT: vmovdqa (%r9), %xmm12
521 ; AVX2-SLOW-NEXT: vpbroadcastd %xmm12, %ymm11
522 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3,4],ymm11[5],ymm6[6,7]
523 ; AVX2-SLOW-NEXT: vpunpckhdq {{.*#+}} ymm13 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
524 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm11 = ymm4[0,1,2,2,4,5,6,6]
525 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm14 = ymm3[1,1,2,3,5,5,6,7]
526 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm11 = ymm14[0],ymm11[1],ymm14[2],ymm11[3],ymm14[4],ymm11[5],ymm14[6],ymm11[7]
527 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,1,2,3]
528 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm11 = ymm11[0,1,2,3],ymm13[4,5],ymm11[6,7]
529 ; AVX2-SLOW-NEXT: vpmovzxdq {{.*#+}} xmm14 = mem[0],zero,mem[1],zero
530 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm11 = ymm11[0,1],ymm14[2,3],ymm11[4,5,6,7]
531 ; AVX2-SLOW-NEXT: vpbroadcastd 20(%r9), %ymm14
532 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm11 = ymm11[0,1,2],ymm14[3],ymm11[4,5,6,7]
533 ; AVX2-SLOW-NEXT: vpunpckhdq {{.*#+}} xmm8 = xmm9[2],xmm8[2],xmm9[3],xmm8[3]
534 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm8 = xmm8[2,3,2,3]
535 ; AVX2-SLOW-NEXT: vinserti128 $1, %xmm8, %ymm7, %ymm7
536 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm8 = xmm10[2,2,3,3]
537 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm8 = ymm8[0,1,2,1]
538 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm7 = ymm8[0,1],ymm7[2,3,4,5],ymm8[6,7]
539 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm8 = xmm12[2,2,3,3]
540 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm8 = ymm8[0,1,2,1]
541 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm7 = ymm7[0],ymm8[1],ymm7[2,3,4,5,6],ymm8[7]
542 ; AVX2-SLOW-NEXT: vpunpckhdq {{.*#+}} ymm8 = ymm3[2],ymm4[2],ymm3[3],ymm4[3],ymm3[6],ymm4[6],ymm3[7],ymm4[7]
543 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm8 = ymm8[2,3,2,3,6,7,6,7]
544 ; AVX2-SLOW-NEXT: vperm2i128 {{.*#+}} ymm8 = ymm13[2,3],ymm8[2,3]
545 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm9 = ymm2[2,1,3,3,6,5,7,7]
546 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,1,2,3]
547 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm8 = ymm9[0,1],ymm8[2,3,4,5],ymm9[6,7]
548 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm9 = mem[0,2,2,3,4,6,6,7]
549 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,1,2,3]
550 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm8 = ymm8[0],ymm9[1],ymm8[2,3,4,5,6],ymm9[7]
551 ; AVX2-SLOW-NEXT: vpunpckldq {{.*#+}} ymm3 = ymm3[0],ymm4[0],ymm3[1],ymm4[1],ymm3[4],ymm4[4],ymm3[5],ymm4[5]
552 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,2,2,2]
553 ; AVX2-SLOW-NEXT: vpunpckldq {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
554 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,1,2,3]
555 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm3[2,3],ymm0[4,5,6,7]
556 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5],ymm0[6,7]
557 ; AVX2-SLOW-NEXT: vpbroadcastd 16(%r9), %ymm1
558 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
559 ; AVX2-SLOW-NEXT: vmovdqa %ymm0, 96(%rax)
560 ; AVX2-SLOW-NEXT: vmovdqa %ymm8, 160(%rax)
561 ; AVX2-SLOW-NEXT: vmovdqa %ymm7, 64(%rax)
562 ; AVX2-SLOW-NEXT: vmovdqa %ymm11, 128(%rax)
563 ; AVX2-SLOW-NEXT: vmovdqa %ymm6, (%rax)
564 ; AVX2-SLOW-NEXT: vmovdqa %ymm5, 32(%rax)
565 ; AVX2-SLOW-NEXT: vzeroupper
566 ; AVX2-SLOW-NEXT: retq
568 ; AVX2-FAST-LABEL: store_i32_stride6_vf8:
569 ; AVX2-FAST: # %bb.0:
570 ; AVX2-FAST-NEXT: movq {{[0-9]+}}(%rsp), %rax
571 ; AVX2-FAST-NEXT: vmovdqa (%rdi), %ymm0
572 ; AVX2-FAST-NEXT: vmovdqa (%rsi), %ymm1
573 ; AVX2-FAST-NEXT: vmovdqa (%rdx), %ymm3
574 ; AVX2-FAST-NEXT: vmovdqa (%rcx), %ymm4
575 ; AVX2-FAST-NEXT: vmovdqa (%r8), %ymm2
576 ; AVX2-FAST-NEXT: vmovdqa (%r9), %ymm5
577 ; AVX2-FAST-NEXT: vmovdqa (%rsi), %xmm7
578 ; AVX2-FAST-NEXT: vmovdqa (%rdi), %xmm11
579 ; AVX2-FAST-NEXT: vpunpckhdq {{.*#+}} xmm8 = xmm11[2],xmm7[2],xmm11[3],xmm7[3]
580 ; AVX2-FAST-NEXT: vinserti128 $1, %xmm8, %ymm0, %ymm6
581 ; AVX2-FAST-NEXT: vmovdqa (%rcx), %xmm9
582 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm12 = xmm9[1,2,2,3]
583 ; AVX2-FAST-NEXT: vmovdqa (%rdx), %xmm10
584 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm13 = xmm10[1,2,2,3]
585 ; AVX2-FAST-NEXT: vpunpckldq {{.*#+}} xmm12 = xmm13[0],xmm12[0],xmm13[1],xmm12[1]
586 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm12 = ymm12[0,1,2,1]
587 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm6 = ymm12[0,1,2,3],ymm6[4,5],ymm12[6,7]
588 ; AVX2-FAST-NEXT: vmovdqa (%r8), %xmm12
589 ; AVX2-FAST-NEXT: vpmovzxdq {{.*#+}} xmm13 = xmm12[0],zero,xmm12[1],zero
590 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm6 = ymm6[0,1],ymm13[2,3],ymm6[4,5,6,7]
591 ; AVX2-FAST-NEXT: vpbroadcastd 4(%r9), %ymm13
592 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm6 = ymm6[0,1,2],ymm13[3],ymm6[4,5,6,7]
593 ; AVX2-FAST-NEXT: vpbroadcastd (%rcx), %xmm13
594 ; AVX2-FAST-NEXT: vpbroadcastd (%rdx), %xmm14
595 ; AVX2-FAST-NEXT: vpunpckldq {{.*#+}} xmm13 = xmm14[0],xmm13[0],xmm14[1],xmm13[1]
596 ; AVX2-FAST-NEXT: vpunpckldq {{.*#+}} xmm7 = xmm11[0],xmm7[0],xmm11[1],xmm7[1]
597 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm7 = ymm7[0,1,2,1]
598 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm7 = ymm7[0,1],ymm13[2,3],ymm7[4,5,6,7]
599 ; AVX2-FAST-NEXT: vpbroadcastq %xmm12, %ymm11
600 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm11[4,5],ymm7[6,7]
601 ; AVX2-FAST-NEXT: vpbroadcastd (%r9), %ymm11
602 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm7 = ymm7[0,1,2,3,4],ymm11[5],ymm7[6,7]
603 ; AVX2-FAST-NEXT: vpunpckhdq {{.*#+}} ymm11 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
604 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} ymm12 = ymm4[0,1,2,2,4,5,6,6]
605 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} ymm13 = ymm3[1,1,2,3,5,5,6,7]
606 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm12 = ymm13[0],ymm12[1],ymm13[2],ymm12[3],ymm13[4],ymm12[5],ymm13[6],ymm12[7]
607 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm12 = ymm12[2,1,2,3]
608 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm12 = ymm12[0,1,2,3],ymm11[4,5],ymm12[6,7]
609 ; AVX2-FAST-NEXT: vpmovzxdq {{.*#+}} xmm13 = mem[0],zero,mem[1],zero
610 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm12 = ymm12[0,1],ymm13[2,3],ymm12[4,5,6,7]
611 ; AVX2-FAST-NEXT: vpbroadcastd 20(%r9), %ymm13
612 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm12 = ymm12[0,1,2],ymm13[3],ymm12[4,5,6,7]
613 ; AVX2-FAST-NEXT: vpunpckhdq {{.*#+}} xmm9 = xmm10[2],xmm9[2],xmm10[3],xmm9[3]
614 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm9 = xmm9[2,3,2,3]
615 ; AVX2-FAST-NEXT: vinserti128 $1, %xmm9, %ymm8, %ymm8
616 ; AVX2-FAST-NEXT: vbroadcasti128 {{.*#+}} ymm9 = [2,2,3,3,2,2,3,3]
617 ; AVX2-FAST-NEXT: # ymm9 = mem[0,1,0,1]
618 ; AVX2-FAST-NEXT: vpermd %ymm2, %ymm9, %ymm10
619 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm8 = ymm10[0,1],ymm8[2,3,4,5],ymm10[6,7]
620 ; AVX2-FAST-NEXT: vpermd %ymm5, %ymm9, %ymm9
621 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm8 = ymm8[0],ymm9[1],ymm8[2,3,4,5,6],ymm9[7]
622 ; AVX2-FAST-NEXT: vpunpckhdq {{.*#+}} ymm9 = ymm3[2],ymm4[2],ymm3[3],ymm4[3],ymm3[6],ymm4[6],ymm3[7],ymm4[7]
623 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} ymm9 = ymm9[2,3,2,3,6,7,6,7]
624 ; AVX2-FAST-NEXT: vperm2i128 {{.*#+}} ymm9 = ymm11[2,3],ymm9[2,3]
625 ; AVX2-FAST-NEXT: vbroadcasti128 {{.*#+}} ymm10 = [6,0,7,0,6,0,7,0]
626 ; AVX2-FAST-NEXT: # ymm10 = mem[0,1,0,1]
627 ; AVX2-FAST-NEXT: vpermd %ymm2, %ymm10, %ymm10
628 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm9 = ymm10[0,1],ymm9[2,3,4,5],ymm10[6,7]
629 ; AVX2-FAST-NEXT: vbroadcasti128 {{.*#+}} ymm10 = [0,6,0,7,0,6,0,7]
630 ; AVX2-FAST-NEXT: # ymm10 = mem[0,1,0,1]
631 ; AVX2-FAST-NEXT: vpermd %ymm5, %ymm10, %ymm5
632 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm5 = ymm9[0],ymm5[1],ymm9[2,3,4,5,6],ymm5[7]
633 ; AVX2-FAST-NEXT: vpunpckldq {{.*#+}} ymm3 = ymm3[0],ymm4[0],ymm3[1],ymm4[1],ymm3[4],ymm4[4],ymm3[5],ymm4[5]
634 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,2,2,2]
635 ; AVX2-FAST-NEXT: vpunpckldq {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
636 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,1,2,3]
637 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm3[2,3],ymm0[4,5,6,7]
638 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5],ymm0[6,7]
639 ; AVX2-FAST-NEXT: vpbroadcastd 16(%r9), %ymm1
640 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
641 ; AVX2-FAST-NEXT: vmovdqa %ymm0, 96(%rax)
642 ; AVX2-FAST-NEXT: vmovdqa %ymm5, 160(%rax)
643 ; AVX2-FAST-NEXT: vmovdqa %ymm8, 64(%rax)
644 ; AVX2-FAST-NEXT: vmovdqa %ymm12, 128(%rax)
645 ; AVX2-FAST-NEXT: vmovdqa %ymm7, (%rax)
646 ; AVX2-FAST-NEXT: vmovdqa %ymm6, 32(%rax)
647 ; AVX2-FAST-NEXT: vzeroupper
648 ; AVX2-FAST-NEXT: retq
650 ; AVX2-FAST-PERLANE-LABEL: store_i32_stride6_vf8:
651 ; AVX2-FAST-PERLANE: # %bb.0:
652 ; AVX2-FAST-PERLANE-NEXT: movq {{[0-9]+}}(%rsp), %rax
653 ; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rdi), %ymm0
654 ; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rsi), %ymm1
655 ; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rdx), %ymm3
656 ; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rcx), %ymm4
657 ; AVX2-FAST-PERLANE-NEXT: vmovdqa (%r8), %ymm2
658 ; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rsi), %xmm6
659 ; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rdi), %xmm11
660 ; AVX2-FAST-PERLANE-NEXT: vpunpckhdq {{.*#+}} xmm7 = xmm11[2],xmm6[2],xmm11[3],xmm6[3]
661 ; AVX2-FAST-PERLANE-NEXT: vinserti128 $1, %xmm7, %ymm0, %ymm5
662 ; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rcx), %xmm8
663 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm10 = xmm8[1,2,2,3]
664 ; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rdx), %xmm9
665 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm12 = xmm9[1,2,2,3]
666 ; AVX2-FAST-PERLANE-NEXT: vpunpckldq {{.*#+}} xmm10 = xmm12[0],xmm10[0],xmm12[1],xmm10[1]
667 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm10 = ymm10[0,1,2,1]
668 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm5 = ymm10[0,1,2,3],ymm5[4,5],ymm10[6,7]
669 ; AVX2-FAST-PERLANE-NEXT: vmovdqa (%r8), %xmm10
670 ; AVX2-FAST-PERLANE-NEXT: vpmovzxdq {{.*#+}} xmm12 = xmm10[0],zero,xmm10[1],zero
671 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm5 = ymm5[0,1],ymm12[2,3],ymm5[4,5,6,7]
672 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastd 4(%r9), %ymm12
673 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm5 = ymm5[0,1,2],ymm12[3],ymm5[4,5,6,7]
674 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastd (%rcx), %xmm12
675 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastd (%rdx), %xmm13
676 ; AVX2-FAST-PERLANE-NEXT: vpunpckldq {{.*#+}} xmm12 = xmm13[0],xmm12[0],xmm13[1],xmm12[1]
677 ; AVX2-FAST-PERLANE-NEXT: vpunpckldq {{.*#+}} xmm6 = xmm11[0],xmm6[0],xmm11[1],xmm6[1]
678 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm6 = ymm6[0,1,2,1]
679 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm6 = ymm6[0,1],ymm12[2,3],ymm6[4,5,6,7]
680 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastq %xmm10, %ymm11
681 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm11[4,5],ymm6[6,7]
682 ; AVX2-FAST-PERLANE-NEXT: vmovdqa (%r9), %xmm12
683 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastd %xmm12, %ymm11
684 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3,4],ymm11[5],ymm6[6,7]
685 ; AVX2-FAST-PERLANE-NEXT: vpunpckhdq {{.*#+}} ymm13 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
686 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm11 = ymm4[0,1,2,2,4,5,6,6]
687 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm14 = ymm3[1,1,2,3,5,5,6,7]
688 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm11 = ymm14[0],ymm11[1],ymm14[2],ymm11[3],ymm14[4],ymm11[5],ymm14[6],ymm11[7]
689 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,1,2,3]
690 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm11 = ymm11[0,1,2,3],ymm13[4,5],ymm11[6,7]
691 ; AVX2-FAST-PERLANE-NEXT: vpmovzxdq {{.*#+}} xmm14 = mem[0],zero,mem[1],zero
692 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm11 = ymm11[0,1],ymm14[2,3],ymm11[4,5,6,7]
693 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastd 20(%r9), %ymm14
694 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm11 = ymm11[0,1,2],ymm14[3],ymm11[4,5,6,7]
695 ; AVX2-FAST-PERLANE-NEXT: vpunpckhdq {{.*#+}} xmm8 = xmm9[2],xmm8[2],xmm9[3],xmm8[3]
696 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm8 = xmm8[2,3,2,3]
697 ; AVX2-FAST-PERLANE-NEXT: vinserti128 $1, %xmm8, %ymm7, %ymm7
698 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm8 = xmm10[2,2,3,3]
699 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm8 = ymm8[0,1,2,1]
700 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm7 = ymm8[0,1],ymm7[2,3,4,5],ymm8[6,7]
701 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm8 = xmm12[2,2,3,3]
702 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm8 = ymm8[0,1,2,1]
703 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm7 = ymm7[0],ymm8[1],ymm7[2,3,4,5,6],ymm8[7]
704 ; AVX2-FAST-PERLANE-NEXT: vpunpckhdq {{.*#+}} ymm8 = ymm3[2],ymm4[2],ymm3[3],ymm4[3],ymm3[6],ymm4[6],ymm3[7],ymm4[7]
705 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm8 = ymm8[2,3,2,3,6,7,6,7]
706 ; AVX2-FAST-PERLANE-NEXT: vperm2i128 {{.*#+}} ymm8 = ymm13[2,3],ymm8[2,3]
707 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm9 = ymm2[2,1,3,3,6,5,7,7]
708 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,1,2,3]
709 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm8 = ymm9[0,1],ymm8[2,3,4,5],ymm9[6,7]
710 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm9 = mem[0,2,2,3,4,6,6,7]
711 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,1,2,3]
712 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm8 = ymm8[0],ymm9[1],ymm8[2,3,4,5,6],ymm9[7]
713 ; AVX2-FAST-PERLANE-NEXT: vpunpckldq {{.*#+}} ymm3 = ymm3[0],ymm4[0],ymm3[1],ymm4[1],ymm3[4],ymm4[4],ymm3[5],ymm4[5]
714 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,2,2,2]
715 ; AVX2-FAST-PERLANE-NEXT: vpunpckldq {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
716 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,1,2,3]
717 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm3[2,3],ymm0[4,5,6,7]
718 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5],ymm0[6,7]
719 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastd 16(%r9), %ymm1
720 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
721 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm0, 96(%rax)
722 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm8, 160(%rax)
723 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm7, 64(%rax)
724 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm11, 128(%rax)
725 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm6, (%rax)
726 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm5, 32(%rax)
727 ; AVX2-FAST-PERLANE-NEXT: vzeroupper
728 ; AVX2-FAST-PERLANE-NEXT: retq
730 ; AVX512-LABEL: store_i32_stride6_vf8:
732 ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax
733 ; AVX512-NEXT: vmovdqa (%rdi), %ymm0
734 ; AVX512-NEXT: vmovdqa (%rdx), %ymm1
735 ; AVX512-NEXT: vmovdqa (%r8), %ymm2
736 ; AVX512-NEXT: vinserti64x4 $1, (%rsi), %zmm0, %zmm0
737 ; AVX512-NEXT: vinserti64x4 $1, (%rcx), %zmm1, %zmm1
738 ; AVX512-NEXT: vinserti64x4 $1, (%r9), %zmm2, %zmm2
739 ; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm3 = <0,8,16,24,u,u,1,9,17,25,u,u,2,10,18,26>
740 ; AVX512-NEXT: vpermi2d %zmm1, %zmm0, %zmm3
741 ; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm4 = [0,1,2,3,16,24,6,7,8,9,17,25,12,13,14,15]
742 ; AVX512-NEXT: vpermi2d %zmm2, %zmm3, %zmm4
743 ; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm3 = <u,u,3,11,19,27,u,u,4,12,20,28,u,u,5,13>
744 ; AVX512-NEXT: vpermi2d %zmm1, %zmm0, %zmm3
745 ; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm5 = [18,26,2,3,4,5,19,27,8,9,10,11,20,28,14,15]
746 ; AVX512-NEXT: vpermi2d %zmm2, %zmm3, %zmm5
747 ; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm3 = <5,13,u,u,22,30,6,14,u,u,23,31,7,15,u,u>
748 ; AVX512-NEXT: vpermi2d %zmm0, %zmm1, %zmm3
749 ; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm0 = [0,1,21,29,4,5,6,7,22,30,10,11,12,13,23,31]
750 ; AVX512-NEXT: vpermi2d %zmm2, %zmm3, %zmm0
751 ; AVX512-NEXT: vmovdqa64 %zmm0, 128(%rax)
752 ; AVX512-NEXT: vmovdqa64 %zmm5, 64(%rax)
753 ; AVX512-NEXT: vmovdqa64 %zmm4, (%rax)
754 ; AVX512-NEXT: vzeroupper
756 %in.vec0 = load <8 x i32>, ptr %in.vecptr0, align 64
757 %in.vec1 = load <8 x i32>, ptr %in.vecptr1, align 64
758 %in.vec2 = load <8 x i32>, ptr %in.vecptr2, align 64
759 %in.vec3 = load <8 x i32>, ptr %in.vecptr3, align 64
760 %in.vec4 = load <8 x i32>, ptr %in.vecptr4, align 64
761 %in.vec5 = load <8 x i32>, ptr %in.vecptr5, align 64
762 %1 = shufflevector <8 x i32> %in.vec0, <8 x i32> %in.vec1, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
763 %2 = shufflevector <8 x i32> %in.vec2, <8 x i32> %in.vec3, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
764 %3 = shufflevector <8 x i32> %in.vec4, <8 x i32> %in.vec5, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
765 %4 = shufflevector <16 x i32> %1, <16 x i32> %2, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
766 %5 = shufflevector <16 x i32> %3, <16 x i32> poison, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
767 %6 = shufflevector <32 x i32> %4, <32 x i32> %5, <48 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47>
768 %interleaved.vec = shufflevector <48 x i32> %6, <48 x i32> poison, <48 x i32> <i32 0, i32 8, i32 16, i32 24, i32 32, i32 40, i32 1, i32 9, i32 17, i32 25, i32 33, i32 41, i32 2, i32 10, i32 18, i32 26, i32 34, i32 42, i32 3, i32 11, i32 19, i32 27, i32 35, i32 43, i32 4, i32 12, i32 20, i32 28, i32 36, i32 44, i32 5, i32 13, i32 21, i32 29, i32 37, i32 45, i32 6, i32 14, i32 22, i32 30, i32 38, i32 46, i32 7, i32 15, i32 23, i32 31, i32 39, i32 47>
769 store <48 x i32> %interleaved.vec, ptr %out.vec, align 64
773 define void @store_i32_stride6_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecptr2, ptr %in.vecptr3, ptr %in.vecptr4, ptr %in.vecptr5, ptr %out.vec) nounwind {
774 ; SSE-LABEL: store_i32_stride6_vf16:
776 ; SSE-NEXT: subq $72, %rsp
777 ; SSE-NEXT: movaps (%rdi), %xmm7
778 ; SSE-NEXT: movaps 16(%rdi), %xmm8
779 ; SSE-NEXT: movaps (%rsi), %xmm2
780 ; SSE-NEXT: movaps 16(%rsi), %xmm6
781 ; SSE-NEXT: movaps (%rdx), %xmm9
782 ; SSE-NEXT: movaps 16(%rdx), %xmm10
783 ; SSE-NEXT: movaps (%rcx), %xmm1
784 ; SSE-NEXT: movaps 16(%rcx), %xmm0
785 ; SSE-NEXT: movaps (%r8), %xmm3
786 ; SSE-NEXT: movaps 16(%r8), %xmm14
787 ; SSE-NEXT: movaps (%r9), %xmm4
788 ; SSE-NEXT: movaps 16(%r9), %xmm13
789 ; SSE-NEXT: movaps %xmm9, %xmm11
790 ; SSE-NEXT: unpcklps {{.*#+}} xmm11 = xmm11[0],xmm1[0],xmm11[1],xmm1[1]
791 ; SSE-NEXT: movaps %xmm7, %xmm5
792 ; SSE-NEXT: unpcklps {{.*#+}} xmm5 = xmm5[0],xmm2[0],xmm5[1],xmm2[1]
793 ; SSE-NEXT: movaps %xmm4, %xmm12
794 ; SSE-NEXT: movlhps {{.*#+}} xmm12 = xmm12[0],xmm3[0]
795 ; SSE-NEXT: shufps {{.*#+}} xmm12 = xmm12[2,0],xmm5[2,3]
796 ; SSE-NEXT: movaps %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
797 ; SSE-NEXT: movlhps {{.*#+}} xmm5 = xmm5[0],xmm11[0]
798 ; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
799 ; SSE-NEXT: movaps %xmm3, %xmm5
800 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[1,1],xmm4[1,1]
801 ; SSE-NEXT: shufps {{.*#+}} xmm11 = xmm11[2,3],xmm5[0,2]
802 ; SSE-NEXT: movaps %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
803 ; SSE-NEXT: movaps %xmm1, %xmm5
804 ; SSE-NEXT: unpckhpd {{.*#+}} xmm5 = xmm5[1],xmm9[1]
805 ; SSE-NEXT: unpckhps {{.*#+}} xmm7 = xmm7[2],xmm2[2],xmm7[3],xmm2[3]
806 ; SSE-NEXT: movaps %xmm3, %xmm2
807 ; SSE-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm4[1]
808 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm7[2,3]
809 ; SSE-NEXT: movaps %xmm2, (%rsp) # 16-byte Spill
810 ; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,1],xmm5[2,0]
811 ; SSE-NEXT: movaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
812 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,3],xmm4[3,3]
813 ; SSE-NEXT: unpckhps {{.*#+}} xmm9 = xmm9[2],xmm1[2],xmm9[3],xmm1[3]
814 ; SSE-NEXT: shufps {{.*#+}} xmm9 = xmm9[2,3],xmm3[0,2]
815 ; SSE-NEXT: movaps %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
816 ; SSE-NEXT: movaps %xmm10, %xmm2
817 ; SSE-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
818 ; SSE-NEXT: movaps %xmm8, %xmm1
819 ; SSE-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm6[0],xmm1[1],xmm6[1]
820 ; SSE-NEXT: movaps %xmm13, %xmm3
821 ; SSE-NEXT: movlhps {{.*#+}} xmm3 = xmm3[0],xmm14[0]
822 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[2,0],xmm1[2,3]
823 ; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
824 ; SSE-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm2[0]
825 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
826 ; SSE-NEXT: movaps %xmm14, %xmm1
827 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm13[1,1]
828 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[2,3],xmm1[0,2]
829 ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
830 ; SSE-NEXT: movaps %xmm0, %xmm1
831 ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm10[1]
832 ; SSE-NEXT: unpckhps {{.*#+}} xmm8 = xmm8[2],xmm6[2],xmm8[3],xmm6[3]
833 ; SSE-NEXT: movaps %xmm14, %xmm2
834 ; SSE-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm13[1]
835 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm8[2,3]
836 ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
837 ; SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[0,1],xmm1[2,0]
838 ; SSE-NEXT: movaps %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
839 ; SSE-NEXT: movaps 32(%rdi), %xmm12
840 ; SSE-NEXT: shufps {{.*#+}} xmm14 = xmm14[3,3],xmm13[3,3]
841 ; SSE-NEXT: movaps 32(%rdx), %xmm13
842 ; SSE-NEXT: unpckhps {{.*#+}} xmm10 = xmm10[2],xmm0[2],xmm10[3],xmm0[3]
843 ; SSE-NEXT: movaps 32(%rcx), %xmm0
844 ; SSE-NEXT: shufps {{.*#+}} xmm10 = xmm10[2,3],xmm14[0,2]
845 ; SSE-NEXT: movaps %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
846 ; SSE-NEXT: movaps %xmm13, %xmm14
847 ; SSE-NEXT: unpcklps {{.*#+}} xmm14 = xmm14[0],xmm0[0],xmm14[1],xmm0[1]
848 ; SSE-NEXT: movaps 32(%rsi), %xmm1
849 ; SSE-NEXT: movaps %xmm12, %xmm15
850 ; SSE-NEXT: unpcklps {{.*#+}} xmm15 = xmm15[0],xmm1[0],xmm15[1],xmm1[1]
851 ; SSE-NEXT: movaps 32(%r8), %xmm2
852 ; SSE-NEXT: movaps 32(%r9), %xmm3
853 ; SSE-NEXT: movaps %xmm3, %xmm11
854 ; SSE-NEXT: movlhps {{.*#+}} xmm11 = xmm11[0],xmm2[0]
855 ; SSE-NEXT: shufps {{.*#+}} xmm11 = xmm11[2,0],xmm15[2,3]
856 ; SSE-NEXT: movlhps {{.*#+}} xmm15 = xmm15[0],xmm14[0]
857 ; SSE-NEXT: movaps %xmm2, %xmm4
858 ; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[1,1],xmm3[1,1]
859 ; SSE-NEXT: shufps {{.*#+}} xmm14 = xmm14[2,3],xmm4[0,2]
860 ; SSE-NEXT: unpckhps {{.*#+}} xmm12 = xmm12[2],xmm1[2],xmm12[3],xmm1[3]
861 ; SSE-NEXT: movaps %xmm0, %xmm1
862 ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm13[1]
863 ; SSE-NEXT: movaps %xmm2, %xmm8
864 ; SSE-NEXT: unpckhpd {{.*#+}} xmm8 = xmm8[1],xmm3[1]
865 ; SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[0,2],xmm12[2,3]
866 ; SSE-NEXT: shufps {{.*#+}} xmm12 = xmm12[0,1],xmm1[2,0]
867 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,3],xmm3[3,3]
868 ; SSE-NEXT: unpckhps {{.*#+}} xmm13 = xmm13[2],xmm0[2],xmm13[3],xmm0[3]
869 ; SSE-NEXT: shufps {{.*#+}} xmm13 = xmm13[2,3],xmm2[0,2]
870 ; SSE-NEXT: movaps 48(%rdx), %xmm3
871 ; SSE-NEXT: movaps 48(%rcx), %xmm10
872 ; SSE-NEXT: movaps %xmm3, %xmm5
873 ; SSE-NEXT: unpcklps {{.*#+}} xmm5 = xmm5[0],xmm10[0],xmm5[1],xmm10[1]
874 ; SSE-NEXT: movaps 48(%rdi), %xmm2
875 ; SSE-NEXT: movaps 48(%rsi), %xmm9
876 ; SSE-NEXT: movaps %xmm2, %xmm4
877 ; SSE-NEXT: unpcklps {{.*#+}} xmm4 = xmm4[0],xmm9[0],xmm4[1],xmm9[1]
878 ; SSE-NEXT: movaps 48(%r8), %xmm1
879 ; SSE-NEXT: movaps 48(%r9), %xmm7
880 ; SSE-NEXT: movaps %xmm7, %xmm6
881 ; SSE-NEXT: movlhps {{.*#+}} xmm6 = xmm6[0],xmm1[0]
882 ; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[2,0],xmm4[2,3]
883 ; SSE-NEXT: movlhps {{.*#+}} xmm4 = xmm4[0],xmm5[0]
884 ; SSE-NEXT: movaps %xmm1, %xmm0
885 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm7[1,1]
886 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[2,3],xmm0[0,2]
887 ; SSE-NEXT: unpckhps {{.*#+}} xmm2 = xmm2[2],xmm9[2],xmm2[3],xmm9[3]
888 ; SSE-NEXT: movaps %xmm10, %xmm0
889 ; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm3[1]
890 ; SSE-NEXT: movaps %xmm1, %xmm9
891 ; SSE-NEXT: unpckhpd {{.*#+}} xmm9 = xmm9[1],xmm7[1]
892 ; SSE-NEXT: shufps {{.*#+}} xmm9 = xmm9[0,2],xmm2[2,3]
893 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1],xmm0[2,0]
894 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,3],xmm7[3,3]
895 ; SSE-NEXT: unpckhps {{.*#+}} xmm3 = xmm3[2],xmm10[2],xmm3[3],xmm10[3]
896 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[2,3],xmm1[0,2]
897 ; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax
898 ; SSE-NEXT: movaps %xmm3, 368(%rax)
899 ; SSE-NEXT: movaps %xmm9, 352(%rax)
900 ; SSE-NEXT: movaps %xmm2, 336(%rax)
901 ; SSE-NEXT: movaps %xmm5, 320(%rax)
902 ; SSE-NEXT: movaps %xmm6, 304(%rax)
903 ; SSE-NEXT: movaps %xmm4, 288(%rax)
904 ; SSE-NEXT: movaps %xmm13, 272(%rax)
905 ; SSE-NEXT: movaps %xmm8, 256(%rax)
906 ; SSE-NEXT: movaps %xmm12, 240(%rax)
907 ; SSE-NEXT: movaps %xmm14, 224(%rax)
908 ; SSE-NEXT: movaps %xmm11, 208(%rax)
909 ; SSE-NEXT: movaps %xmm15, 192(%rax)
910 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
911 ; SSE-NEXT: movaps %xmm0, 176(%rax)
912 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
913 ; SSE-NEXT: movaps %xmm0, 160(%rax)
914 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
915 ; SSE-NEXT: movaps %xmm0, 144(%rax)
916 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
917 ; SSE-NEXT: movaps %xmm0, 128(%rax)
918 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
919 ; SSE-NEXT: movaps %xmm0, 112(%rax)
920 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
921 ; SSE-NEXT: movaps %xmm0, 96(%rax)
922 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
923 ; SSE-NEXT: movaps %xmm0, 80(%rax)
924 ; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
925 ; SSE-NEXT: movaps %xmm0, 64(%rax)
926 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
927 ; SSE-NEXT: movaps %xmm0, 48(%rax)
928 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
929 ; SSE-NEXT: movaps %xmm0, 32(%rax)
930 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
931 ; SSE-NEXT: movaps %xmm0, 16(%rax)
932 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
933 ; SSE-NEXT: movaps %xmm0, (%rax)
934 ; SSE-NEXT: addq $72, %rsp
937 ; AVX1-ONLY-LABEL: store_i32_stride6_vf16:
938 ; AVX1-ONLY: # %bb.0:
939 ; AVX1-ONLY-NEXT: subq $104, %rsp
940 ; AVX1-ONLY-NEXT: vmovaps 32(%rdi), %ymm5
941 ; AVX1-ONLY-NEXT: vmovaps 32(%rsi), %ymm13
942 ; AVX1-ONLY-NEXT: vmovaps 32(%rdx), %ymm7
943 ; AVX1-ONLY-NEXT: vmovaps 32(%rcx), %ymm9
944 ; AVX1-ONLY-NEXT: vmovaps 32(%r8), %ymm11
945 ; AVX1-ONLY-NEXT: vmovups %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
946 ; AVX1-ONLY-NEXT: vmovaps (%rcx), %xmm8
947 ; AVX1-ONLY-NEXT: vmovaps 32(%rcx), %xmm3
948 ; AVX1-ONLY-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
949 ; AVX1-ONLY-NEXT: vmovaps (%rdx), %xmm6
950 ; AVX1-ONLY-NEXT: vmovaps 32(%rdx), %xmm10
951 ; AVX1-ONLY-NEXT: vmovaps %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
952 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm6[1,2],xmm8[1,2]
953 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2,1,3]
954 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
955 ; AVX1-ONLY-NEXT: vmovaps (%rsi), %xmm1
956 ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
957 ; AVX1-ONLY-NEXT: vmovaps (%rdi), %xmm2
958 ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
959 ; AVX1-ONLY-NEXT: vunpckhps {{.*#+}} xmm4 = xmm2[2],xmm1[2],xmm2[3],xmm1[3]
960 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm1
961 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
962 ; AVX1-ONLY-NEXT: vbroadcastss 4(%r8), %xmm1
963 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
964 ; AVX1-ONLY-NEXT: vbroadcastss 4(%r9), %ymm1
965 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3],ymm0[4,5,6,7]
966 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
967 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} ymm0 = ymm5[0],ymm13[0],ymm5[1],ymm13[1],ymm5[4],ymm13[4],ymm5[5],ymm13[5]
968 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
969 ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm9[0],ymm7[0],ymm9[2],ymm7[2]
970 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,1,2,0,4,5,6,4]
971 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm1, %xmm1
972 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
973 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm11[4,5],ymm0[6,7]
974 ; AVX1-ONLY-NEXT: vbroadcastss 48(%r9), %ymm1
975 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
976 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
977 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm10[1,2],xmm3[1,2]
978 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2,1,3]
979 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
980 ; AVX1-ONLY-NEXT: vmovaps 32(%rsi), %xmm3
981 ; AVX1-ONLY-NEXT: vmovaps 32(%rdi), %xmm2
982 ; AVX1-ONLY-NEXT: vunpckhps {{.*#+}} xmm15 = xmm2[2],xmm3[2],xmm2[3],xmm3[3]
983 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm15, %ymm0, %ymm11
984 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm11[4,5],ymm0[6,7]
985 ; AVX1-ONLY-NEXT: vbroadcastss 36(%r8), %xmm11
986 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm11[2,3],ymm0[4,5,6,7]
987 ; AVX1-ONLY-NEXT: vbroadcastss 36(%r9), %ymm11
988 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm11[3],ymm0[4,5,6,7]
989 ; AVX1-ONLY-NEXT: vmovups %ymm0, (%rsp) # 32-byte Spill
990 ; AVX1-ONLY-NEXT: vmovaps (%rdi), %ymm1
991 ; AVX1-ONLY-NEXT: vmovaps (%rsi), %ymm0
992 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} ymm11 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[4],ymm0[4],ymm1[5],ymm0[5]
993 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm14 = ymm11[2,3,2,3]
994 ; AVX1-ONLY-NEXT: vmovaps (%rdx), %ymm11
995 ; AVX1-ONLY-NEXT: vmovaps (%rcx), %ymm12
996 ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm10 = ymm12[0],ymm11[0],ymm12[2],ymm11[2]
997 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm10 = ymm10[0,1,2,0,4,5,6,4]
998 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm10, %xmm10
999 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm10 = ymm14[0,1],ymm10[2,3],ymm14[4,5,6,7]
1000 ; AVX1-ONLY-NEXT: vmovaps (%r8), %ymm14
1001 ; AVX1-ONLY-NEXT: vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1002 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0,1,2,3],ymm14[4,5],ymm10[6,7]
1003 ; AVX1-ONLY-NEXT: vbroadcastss 16(%r9), %ymm14
1004 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm14 = ymm10[0,1,2,3,4],ymm14[5],ymm10[6,7]
1005 ; AVX1-ONLY-NEXT: vunpckhps {{.*#+}} ymm13 = ymm5[2],ymm13[2],ymm5[3],ymm13[3],ymm5[6],ymm13[6],ymm5[7],ymm13[7]
1006 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm5 = ymm7[1,2],ymm9[1,2],ymm7[5,6],ymm9[5,6]
1007 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm5 = ymm5[2,3,2,3]
1008 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm5 = ymm5[0,2,1,3,4,6,5,7]
1009 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm13[4,5],ymm5[6,7]
1010 ; AVX1-ONLY-NEXT: vbroadcastss 52(%r8), %xmm10
1011 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1],ymm10[2,3],ymm5[4,5,6,7]
1012 ; AVX1-ONLY-NEXT: vbroadcastss 52(%r9), %ymm10
1013 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm10 = ymm5[0,1,2],ymm10[3],ymm5[4,5,6,7]
1014 ; AVX1-ONLY-NEXT: vunpckhps {{.*#+}} ymm5 = ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[6],ymm0[6],ymm1[7],ymm0[7]
1015 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm11[1,2],ymm12[1,2],ymm11[5,6],ymm12[5,6]
1016 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
1017 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2,1,3,4,6,5,7]
1018 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm5[4,5],ymm0[6,7]
1019 ; AVX1-ONLY-NEXT: vbroadcastss 20(%r8), %xmm1
1020 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
1021 ; AVX1-ONLY-NEXT: vbroadcastss 20(%r9), %ymm1
1022 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm0[0,1,2],ymm1[3],ymm0[4,5,6,7]
1023 ; AVX1-ONLY-NEXT: vunpckhps {{.*#+}} xmm0 = xmm6[2],xmm8[2],xmm6[3],xmm8[3]
1024 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,3,2,3]
1025 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm4, %ymm0
1026 ; AVX1-ONLY-NEXT: vpermilps {{.*#+}} xmm4 = mem[2,1,3,3]
1027 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm4, %ymm4, %ymm4
1028 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm4[0,1],ymm0[2,3,4,5],ymm4[6,7]
1029 ; AVX1-ONLY-NEXT: vmovaps (%r9), %xmm4
1030 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm6 = xmm4[0,2,2,3]
1031 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm4, %ymm6, %ymm4
1032 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm4 = ymm0[0],ymm4[1],ymm0[2,3,4,5,6],ymm4[7]
1033 ; AVX1-ONLY-NEXT: vbroadcastss 32(%rcx), %xmm0
1034 ; AVX1-ONLY-NEXT: vbroadcastss 32(%rdx), %xmm6
1035 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} xmm0 = xmm6[0],xmm0[0],xmm6[1],xmm0[1]
1036 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
1037 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm3
1038 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm3[0,1],ymm0[2,3],ymm3[4,5,6,7]
1039 ; AVX1-ONLY-NEXT: vinsertf128 $1, 32(%r8), %ymm2, %ymm2
1040 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1],ymm0[2,3],ymm2[4,5],ymm0[6,7]
1041 ; AVX1-ONLY-NEXT: vbroadcastss 32(%r9), %ymm2
1042 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm0[0,1,2,3,4],ymm2[5],ymm0[6,7]
1043 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm9[3,0],ymm7[3,0],ymm9[7,4],ymm7[7,4]
1044 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0,2,3,6,4,6,7]
1045 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm13[2,3],ymm0[2,3]
1046 ; AVX1-ONLY-NEXT: vperm2f128 $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm3 # 32-byte Folded Reload
1047 ; AVX1-ONLY-NEXT: # ymm3 = mem[2,3,2,3]
1048 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm3 = ymm3[2,1,3,3,6,5,7,7]
1049 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm3[0,1],ymm0[2,3,4,5],ymm3[6,7]
1050 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm3 = mem[2,3,2,3]
1051 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm3 = ymm3[0,2,2,3,4,6,6,7]
1052 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm3[1],ymm0[2,3,4,5,6],ymm3[7]
1053 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
1054 ; AVX1-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm3 # 16-byte Folded Reload
1055 ; AVX1-ONLY-NEXT: # xmm3 = xmm3[2],mem[2],xmm3[3],mem[3]
1056 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm3 = xmm3[2,3,2,3]
1057 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm3, %ymm15, %ymm3
1058 ; AVX1-ONLY-NEXT: vpermilps {{.*#+}} xmm6 = mem[2,1,3,3]
1059 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm6, %ymm6, %ymm6
1060 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm3 = ymm6[0,1],ymm3[2,3,4,5],ymm6[6,7]
1061 ; AVX1-ONLY-NEXT: vmovaps 32(%r9), %xmm6
1062 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm7 = xmm6[0,2,2,3]
1063 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm6, %ymm7, %ymm6
1064 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0],ymm6[1],ymm3[2,3,4,5,6],ymm6[7]
1065 ; AVX1-ONLY-NEXT: vbroadcastss (%rcx), %xmm6
1066 ; AVX1-ONLY-NEXT: vbroadcastss (%rdx), %xmm7
1067 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} xmm6 = xmm7[0],xmm6[0],xmm7[1],xmm6[1]
1068 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
1069 ; AVX1-ONLY-NEXT: vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm7, %xmm7 # 16-byte Folded Reload
1070 ; AVX1-ONLY-NEXT: # xmm7 = xmm7[0],mem[0],xmm7[1],mem[1]
1071 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm7, %ymm0, %ymm8
1072 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm6 = ymm8[0,1],ymm6[2,3],ymm8[4,5,6,7]
1073 ; AVX1-ONLY-NEXT: vinsertf128 $1, (%r8), %ymm7, %ymm7
1074 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm6 = ymm7[0,1],ymm6[2,3],ymm7[4,5],ymm6[6,7]
1075 ; AVX1-ONLY-NEXT: vbroadcastss (%r9), %ymm7
1076 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3,4],ymm7[5],ymm6[6,7]
1077 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm7 = ymm12[3,0],ymm11[3,0],ymm12[7,4],ymm11[7,4]
1078 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm7 = ymm7[2,0,2,3,6,4,6,7]
1079 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm5 = ymm5[2,3],ymm7[2,3]
1080 ; AVX1-ONLY-NEXT: vperm2f128 $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm7 # 32-byte Folded Reload
1081 ; AVX1-ONLY-NEXT: # ymm7 = mem[2,3,2,3]
1082 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm7 = ymm7[2,1,3,3,6,5,7,7]
1083 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm5 = ymm7[0,1],ymm5[2,3,4,5],ymm7[6,7]
1084 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm7 = mem[2,3,2,3]
1085 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm7 = ymm7[0,2,2,3,4,6,6,7]
1086 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0],ymm7[1],ymm5[2,3,4,5,6],ymm7[7]
1087 ; AVX1-ONLY-NEXT: movq {{[0-9]+}}(%rsp), %rax
1088 ; AVX1-ONLY-NEXT: vmovaps %ymm5, 160(%rax)
1089 ; AVX1-ONLY-NEXT: vmovaps %ymm6, (%rax)
1090 ; AVX1-ONLY-NEXT: vmovaps %ymm3, 256(%rax)
1091 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 352(%rax)
1092 ; AVX1-ONLY-NEXT: vmovaps %ymm2, 192(%rax)
1093 ; AVX1-ONLY-NEXT: vmovaps %ymm4, 64(%rax)
1094 ; AVX1-ONLY-NEXT: vmovaps %ymm1, 128(%rax)
1095 ; AVX1-ONLY-NEXT: vmovaps %ymm10, 320(%rax)
1096 ; AVX1-ONLY-NEXT: vmovaps %ymm14, 96(%rax)
1097 ; AVX1-ONLY-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
1098 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 224(%rax)
1099 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
1100 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 288(%rax)
1101 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
1102 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 32(%rax)
1103 ; AVX1-ONLY-NEXT: addq $104, %rsp
1104 ; AVX1-ONLY-NEXT: vzeroupper
1105 ; AVX1-ONLY-NEXT: retq
1107 ; AVX2-SLOW-LABEL: store_i32_stride6_vf16:
1108 ; AVX2-SLOW: # %bb.0:
1109 ; AVX2-SLOW-NEXT: subq $200, %rsp
1110 ; AVX2-SLOW-NEXT: vmovdqa (%rsi), %xmm12
1111 ; AVX2-SLOW-NEXT: vmovdqa 32(%rsi), %xmm1
1112 ; AVX2-SLOW-NEXT: vmovdqa (%rdi), %xmm0
1113 ; AVX2-SLOW-NEXT: vmovdqa 32(%rdi), %xmm2
1114 ; AVX2-SLOW-NEXT: vpunpckhdq {{.*#+}} xmm3 = xmm0[2],xmm12[2],xmm0[3],xmm12[3]
1115 ; AVX2-SLOW-NEXT: vmovdqu %ymm3, (%rsp) # 32-byte Spill
1116 ; AVX2-SLOW-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
1117 ; AVX2-SLOW-NEXT: vmovdqa (%rcx), %xmm4
1118 ; AVX2-SLOW-NEXT: vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1119 ; AVX2-SLOW-NEXT: vmovdqa 32(%rcx), %xmm7
1120 ; AVX2-SLOW-NEXT: vmovdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1121 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm4 = xmm4[1,2,2,3]
1122 ; AVX2-SLOW-NEXT: vmovdqa (%rdx), %xmm5
1123 ; AVX2-SLOW-NEXT: vmovdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1124 ; AVX2-SLOW-NEXT: vmovdqa 32(%rdx), %xmm8
1125 ; AVX2-SLOW-NEXT: vmovdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1126 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm5 = xmm5[1,2,2,3]
1127 ; AVX2-SLOW-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm5[0],xmm4[0],xmm5[1],xmm4[1]
1128 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,1,2,1]
1129 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5],ymm4[6,7]
1130 ; AVX2-SLOW-NEXT: vmovdqa (%r8), %xmm13
1131 ; AVX2-SLOW-NEXT: vmovdqa 32(%r8), %xmm6
1132 ; AVX2-SLOW-NEXT: vpmovzxdq {{.*#+}} xmm4 = xmm13[0],zero,xmm13[1],zero
1133 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1],ymm4[2,3],ymm3[4,5,6,7]
1134 ; AVX2-SLOW-NEXT: vpbroadcastd 4(%r9), %ymm4
1135 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2],ymm4[3],ymm3[4,5,6,7]
1136 ; AVX2-SLOW-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1137 ; AVX2-SLOW-NEXT: vpunpckhdq {{.*#+}} xmm3 = xmm2[2],xmm1[2],xmm2[3],xmm1[3]
1138 ; AVX2-SLOW-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1139 ; AVX2-SLOW-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
1140 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm4 = xmm7[1,2,2,3]
1141 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm5 = xmm8[1,2,2,3]
1142 ; AVX2-SLOW-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm5[0],xmm4[0],xmm5[1],xmm4[1]
1143 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,1,2,1]
1144 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5],ymm4[6,7]
1145 ; AVX2-SLOW-NEXT: vpmovzxdq {{.*#+}} xmm4 = xmm6[0],zero,xmm6[1],zero
1146 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1],ymm4[2,3],ymm3[4,5,6,7]
1147 ; AVX2-SLOW-NEXT: vpbroadcastd 36(%r9), %ymm4
1148 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2],ymm4[3],ymm3[4,5,6,7]
1149 ; AVX2-SLOW-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1150 ; AVX2-SLOW-NEXT: vpbroadcastd 32(%rcx), %xmm3
1151 ; AVX2-SLOW-NEXT: vpbroadcastd 32(%rdx), %xmm4
1152 ; AVX2-SLOW-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
1153 ; AVX2-SLOW-NEXT: vmovdqa 32(%rdx), %ymm9
1154 ; AVX2-SLOW-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
1155 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,1,2,1]
1156 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1],ymm3[2,3],ymm1[4,5,6,7]
1157 ; AVX2-SLOW-NEXT: vpbroadcastq %xmm6, %ymm2
1158 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5],ymm1[6,7]
1159 ; AVX2-SLOW-NEXT: vmovdqa 32(%r9), %xmm15
1160 ; AVX2-SLOW-NEXT: vpbroadcastd %xmm15, %ymm2
1161 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5],ymm1[6,7]
1162 ; AVX2-SLOW-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1163 ; AVX2-SLOW-NEXT: vmovdqa 32(%rcx), %ymm14
1164 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm1 = ymm14[0,1,2,2,4,5,6,6]
1165 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm2 = ymm9[1,1,2,3,5,5,6,7]
1166 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2],ymm1[3],ymm2[4],ymm1[5],ymm2[6],ymm1[7]
1167 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,1,2,3]
1168 ; AVX2-SLOW-NEXT: vmovdqa 32(%rdi), %ymm10
1169 ; AVX2-SLOW-NEXT: vmovdqa 32(%rsi), %ymm8
1170 ; AVX2-SLOW-NEXT: vpunpckhdq {{.*#+}} ymm4 = ymm10[2],ymm8[2],ymm10[3],ymm8[3],ymm10[6],ymm8[6],ymm10[7],ymm8[7]
1171 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm4[4,5],ymm1[6,7]
1172 ; AVX2-SLOW-NEXT: vpmovzxdq {{.*#+}} xmm3 = mem[0],zero,mem[1],zero
1173 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1],ymm3[2,3],ymm1[4,5,6,7]
1174 ; AVX2-SLOW-NEXT: vpbroadcastd 52(%r9), %ymm3
1175 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm3[3],ymm1[4,5,6,7]
1176 ; AVX2-SLOW-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1177 ; AVX2-SLOW-NEXT: vpbroadcastd (%rcx), %xmm1
1178 ; AVX2-SLOW-NEXT: vpbroadcastd (%rdx), %xmm3
1179 ; AVX2-SLOW-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
1180 ; AVX2-SLOW-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm12[0],xmm0[1],xmm12[1]
1181 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,2,1]
1182 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
1183 ; AVX2-SLOW-NEXT: vpbroadcastq %xmm13, %ymm1
1184 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
1185 ; AVX2-SLOW-NEXT: vmovdqa (%r9), %xmm1
1186 ; AVX2-SLOW-NEXT: vpbroadcastd %xmm1, %ymm2
1187 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm2[5],ymm0[6,7]
1188 ; AVX2-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1189 ; AVX2-SLOW-NEXT: vmovdqa (%rdx), %ymm2
1190 ; AVX2-SLOW-NEXT: vmovdqa (%rcx), %ymm0
1191 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm3 = ymm0[0,1,2,2,4,5,6,6]
1192 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm5 = ymm2[1,1,2,3,5,5,6,7]
1193 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm3 = ymm5[0],ymm3[1],ymm5[2],ymm3[3],ymm5[4],ymm3[5],ymm5[6],ymm3[7]
1194 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm12 = ymm3[2,1,2,3]
1195 ; AVX2-SLOW-NEXT: vmovdqa (%rdi), %ymm7
1196 ; AVX2-SLOW-NEXT: vmovdqa (%rsi), %ymm5
1197 ; AVX2-SLOW-NEXT: vpunpckhdq {{.*#+}} ymm3 = ymm7[2],ymm5[2],ymm7[3],ymm5[3],ymm7[6],ymm5[6],ymm7[7],ymm5[7]
1198 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm12 = ymm12[0,1,2,3],ymm3[4,5],ymm12[6,7]
1199 ; AVX2-SLOW-NEXT: vpmovzxdq {{.*#+}} xmm11 = mem[0],zero,mem[1],zero
1200 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm11 = ymm12[0,1],ymm11[2,3],ymm12[4,5,6,7]
1201 ; AVX2-SLOW-NEXT: vpbroadcastd 20(%r9), %ymm12
1202 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm11 = ymm11[0,1,2],ymm12[3],ymm11[4,5,6,7]
1203 ; AVX2-SLOW-NEXT: vmovdqu %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1204 ; AVX2-SLOW-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
1205 ; AVX2-SLOW-NEXT: vpunpckhdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm11, %xmm11 # 16-byte Folded Reload
1206 ; AVX2-SLOW-NEXT: # xmm11 = xmm11[2],mem[2],xmm11[3],mem[3]
1207 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm11 = xmm11[2,3,2,3]
1208 ; AVX2-SLOW-NEXT: vmovdqu (%rsp), %ymm12 # 32-byte Reload
1209 ; AVX2-SLOW-NEXT: vinserti128 $1, %xmm11, %ymm12, %ymm11
1210 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm13 = xmm13[2,2,3,3]
1211 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm13 = ymm13[0,1,2,1]
1212 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm11 = ymm13[0,1],ymm11[2,3,4,5],ymm13[6,7]
1213 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,2,3,3]
1214 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,1,2,1]
1215 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm1 = ymm11[0],ymm1[1],ymm11[2,3,4,5,6],ymm1[7]
1216 ; AVX2-SLOW-NEXT: vpunpckhdq {{.*#+}} ymm11 = ymm9[2],ymm14[2],ymm9[3],ymm14[3],ymm9[6],ymm14[6],ymm9[7],ymm14[7]
1217 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm11 = ymm11[2,3,2,3,6,7,6,7]
1218 ; AVX2-SLOW-NEXT: vperm2i128 {{.*#+}} ymm4 = ymm4[2,3],ymm11[2,3]
1219 ; AVX2-SLOW-NEXT: vmovdqa 32(%r8), %ymm11
1220 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm13 = ymm11[2,1,3,3,6,5,7,7]
1221 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm13 = ymm13[2,1,2,3]
1222 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm4 = ymm13[0,1],ymm4[2,3,4,5],ymm13[6,7]
1223 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm13 = mem[0,2,2,3,4,6,6,7]
1224 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm13 = ymm13[2,1,2,3]
1225 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0],ymm13[1],ymm4[2,3,4,5,6],ymm13[7]
1226 ; AVX2-SLOW-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
1227 ; AVX2-SLOW-NEXT: vpunpckhdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm12, %xmm13 # 16-byte Folded Reload
1228 ; AVX2-SLOW-NEXT: # xmm13 = xmm12[2],mem[2],xmm12[3],mem[3]
1229 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm13 = xmm13[2,3,2,3]
1230 ; AVX2-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
1231 ; AVX2-SLOW-NEXT: vinserti128 $1, %xmm13, %ymm12, %ymm13
1232 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm6 = xmm6[2,2,3,3]
1233 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm6 = ymm6[0,1,2,1]
1234 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm6 = ymm6[0,1],ymm13[2,3,4,5],ymm6[6,7]
1235 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm13 = xmm15[2,2,3,3]
1236 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm13 = ymm13[0,1,2,1]
1237 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm6 = ymm6[0],ymm13[1],ymm6[2,3,4,5,6],ymm13[7]
1238 ; AVX2-SLOW-NEXT: vpunpckldq {{.*#+}} ymm9 = ymm9[0],ymm14[0],ymm9[1],ymm14[1],ymm9[4],ymm14[4],ymm9[5],ymm14[5]
1239 ; AVX2-SLOW-NEXT: vpunpckldq {{.*#+}} ymm8 = ymm10[0],ymm8[0],ymm10[1],ymm8[1],ymm10[4],ymm8[4],ymm10[5],ymm8[5]
1240 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,2,2,2]
1241 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,1,2,3]
1242 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm8 = ymm8[0,1],ymm9[2,3],ymm8[4,5,6,7]
1243 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm11[4,5],ymm8[6,7]
1244 ; AVX2-SLOW-NEXT: vpbroadcastd 48(%r9), %ymm9
1245 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3,4],ymm9[5],ymm8[6,7]
1246 ; AVX2-SLOW-NEXT: vpunpckhdq {{.*#+}} ymm9 = ymm2[2],ymm0[2],ymm2[3],ymm0[3],ymm2[6],ymm0[6],ymm2[7],ymm0[7]
1247 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm9 = ymm9[2,3,2,3,6,7,6,7]
1248 ; AVX2-SLOW-NEXT: vperm2i128 {{.*#+}} ymm3 = ymm3[2,3],ymm9[2,3]
1249 ; AVX2-SLOW-NEXT: vmovdqa (%r8), %ymm9
1250 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm10 = ymm9[2,1,3,3,6,5,7,7]
1251 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm10 = ymm10[2,1,2,3]
1252 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm3 = ymm10[0,1],ymm3[2,3,4,5],ymm10[6,7]
1253 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm10 = mem[0,2,2,3,4,6,6,7]
1254 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm10 = ymm10[2,1,2,3]
1255 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0],ymm10[1],ymm3[2,3,4,5,6],ymm10[7]
1256 ; AVX2-SLOW-NEXT: vpunpckldq {{.*#+}} ymm0 = ymm2[0],ymm0[0],ymm2[1],ymm0[1],ymm2[4],ymm0[4],ymm2[5],ymm0[5]
1257 ; AVX2-SLOW-NEXT: vpunpckldq {{.*#+}} ymm2 = ymm7[0],ymm5[0],ymm7[1],ymm5[1],ymm7[4],ymm5[4],ymm7[5],ymm5[5]
1258 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,2,2,2]
1259 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,1,2,3]
1260 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1],ymm0[2,3],ymm2[4,5,6,7]
1261 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm9[4,5],ymm0[6,7]
1262 ; AVX2-SLOW-NEXT: vpbroadcastd 16(%r9), %ymm2
1263 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm2[5],ymm0[6,7]
1264 ; AVX2-SLOW-NEXT: movq {{[0-9]+}}(%rsp), %rax
1265 ; AVX2-SLOW-NEXT: vmovdqa %ymm0, 96(%rax)
1266 ; AVX2-SLOW-NEXT: vmovdqa %ymm3, 160(%rax)
1267 ; AVX2-SLOW-NEXT: vmovdqa %ymm8, 288(%rax)
1268 ; AVX2-SLOW-NEXT: vmovdqa %ymm6, 256(%rax)
1269 ; AVX2-SLOW-NEXT: vmovdqa %ymm4, 352(%rax)
1270 ; AVX2-SLOW-NEXT: vmovdqa %ymm1, 64(%rax)
1271 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
1272 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 128(%rax)
1273 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
1274 ; AVX2-SLOW-NEXT: vmovaps %ymm0, (%rax)
1275 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
1276 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 320(%rax)
1277 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
1278 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 192(%rax)
1279 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
1280 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 224(%rax)
1281 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
1282 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 32(%rax)
1283 ; AVX2-SLOW-NEXT: addq $200, %rsp
1284 ; AVX2-SLOW-NEXT: vzeroupper
1285 ; AVX2-SLOW-NEXT: retq
1287 ; AVX2-FAST-LABEL: store_i32_stride6_vf16:
1288 ; AVX2-FAST: # %bb.0:
1289 ; AVX2-FAST-NEXT: subq $232, %rsp
1290 ; AVX2-FAST-NEXT: vmovdqa (%rsi), %xmm12
1291 ; AVX2-FAST-NEXT: vmovdqa 32(%rsi), %xmm1
1292 ; AVX2-FAST-NEXT: vmovdqa (%rdi), %xmm15
1293 ; AVX2-FAST-NEXT: vmovdqa 32(%rdi), %xmm2
1294 ; AVX2-FAST-NEXT: vpunpckhdq {{.*#+}} xmm7 = xmm15[2],xmm12[2],xmm15[3],xmm12[3]
1295 ; AVX2-FAST-NEXT: vinserti128 $1, %xmm7, %ymm0, %ymm0
1296 ; AVX2-FAST-NEXT: vmovdqa (%rcx), %xmm8
1297 ; AVX2-FAST-NEXT: vmovdqa 32(%rcx), %xmm6
1298 ; AVX2-FAST-NEXT: vmovdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1299 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm3 = xmm8[1,2,2,3]
1300 ; AVX2-FAST-NEXT: vmovdqa (%rdx), %xmm9
1301 ; AVX2-FAST-NEXT: vmovdqa 32(%rdx), %xmm10
1302 ; AVX2-FAST-NEXT: vmovdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1303 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm4 = xmm9[1,2,2,3]
1304 ; AVX2-FAST-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
1305 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,1,2,1]
1306 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm0[4,5],ymm3[6,7]
1307 ; AVX2-FAST-NEXT: vmovdqa (%r8), %xmm0
1308 ; AVX2-FAST-NEXT: vmovdqa 32(%r8), %xmm4
1309 ; AVX2-FAST-NEXT: vpmovzxdq {{.*#+}} xmm5 = xmm0[0],zero,xmm0[1],zero
1310 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1],ymm5[2,3],ymm3[4,5,6,7]
1311 ; AVX2-FAST-NEXT: vpbroadcastd 4(%r9), %ymm5
1312 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2],ymm5[3],ymm3[4,5,6,7]
1313 ; AVX2-FAST-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1314 ; AVX2-FAST-NEXT: vpunpckhdq {{.*#+}} xmm3 = xmm2[2],xmm1[2],xmm2[3],xmm1[3]
1315 ; AVX2-FAST-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1316 ; AVX2-FAST-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
1317 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm5 = xmm6[1,2,2,3]
1318 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm10 = xmm10[1,2,2,3]
1319 ; AVX2-FAST-NEXT: vpunpckldq {{.*#+}} xmm5 = xmm10[0],xmm5[0],xmm10[1],xmm5[1]
1320 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm5 = ymm5[0,1,2,1]
1321 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm3 = ymm5[0,1,2,3],ymm3[4,5],ymm5[6,7]
1322 ; AVX2-FAST-NEXT: vpmovzxdq {{.*#+}} xmm5 = xmm4[0],zero,xmm4[1],zero
1323 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1],ymm5[2,3],ymm3[4,5,6,7]
1324 ; AVX2-FAST-NEXT: vpbroadcastd 36(%r9), %ymm5
1325 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2],ymm5[3],ymm3[4,5,6,7]
1326 ; AVX2-FAST-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1327 ; AVX2-FAST-NEXT: vpbroadcastd 32(%rcx), %xmm3
1328 ; AVX2-FAST-NEXT: vpbroadcastd 32(%rdx), %xmm5
1329 ; AVX2-FAST-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm5[0],xmm3[0],xmm5[1],xmm3[1]
1330 ; AVX2-FAST-NEXT: vmovdqa 32(%rdx), %ymm10
1331 ; AVX2-FAST-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
1332 ; AVX2-FAST-NEXT: vmovdqa 32(%rcx), %ymm11
1333 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,1,2,1]
1334 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1],ymm3[2,3],ymm1[4,5,6,7]
1335 ; AVX2-FAST-NEXT: vpbroadcastq %xmm4, %ymm2
1336 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5],ymm1[6,7]
1337 ; AVX2-FAST-NEXT: vpbroadcastd 32(%r9), %ymm2
1338 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5],ymm1[6,7]
1339 ; AVX2-FAST-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1340 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} ymm1 = ymm11[0,1,2,2,4,5,6,6]
1341 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} ymm2 = ymm10[1,1,2,3,5,5,6,7]
1342 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2],ymm1[3],ymm2[4],ymm1[5],ymm2[6],ymm1[7]
1343 ; AVX2-FAST-NEXT: vmovdqa 32(%rdi), %ymm2
1344 ; AVX2-FAST-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1345 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,1,2,3]
1346 ; AVX2-FAST-NEXT: vmovdqa 32(%rsi), %ymm14
1347 ; AVX2-FAST-NEXT: vpunpckhdq {{.*#+}} ymm2 = ymm2[2],ymm14[2],ymm2[3],ymm14[3],ymm2[6],ymm14[6],ymm2[7],ymm14[7]
1348 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5],ymm1[6,7]
1349 ; AVX2-FAST-NEXT: vpmovzxdq {{.*#+}} xmm3 = mem[0],zero,mem[1],zero
1350 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1],ymm3[2,3],ymm1[4,5,6,7]
1351 ; AVX2-FAST-NEXT: vpbroadcastd 52(%r9), %ymm3
1352 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm3[3],ymm1[4,5,6,7]
1353 ; AVX2-FAST-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1354 ; AVX2-FAST-NEXT: vpbroadcastd (%rcx), %xmm1
1355 ; AVX2-FAST-NEXT: vpbroadcastd (%rdx), %xmm3
1356 ; AVX2-FAST-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
1357 ; AVX2-FAST-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm15[0],xmm12[0],xmm15[1],xmm12[1]
1358 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,1,2,1]
1359 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm1 = ymm3[0,1],ymm1[2,3],ymm3[4,5,6,7]
1360 ; AVX2-FAST-NEXT: vpbroadcastq %xmm0, %ymm0
1361 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5],ymm1[6,7]
1362 ; AVX2-FAST-NEXT: vpbroadcastd (%r9), %ymm1
1363 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
1364 ; AVX2-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1365 ; AVX2-FAST-NEXT: vmovdqa (%rdx), %ymm5
1366 ; AVX2-FAST-NEXT: vmovdqa (%rcx), %ymm1
1367 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} ymm3 = ymm1[0,1,2,2,4,5,6,6]
1368 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} ymm4 = ymm5[1,1,2,3,5,5,6,7]
1369 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0],ymm3[1],ymm4[2],ymm3[3],ymm4[4],ymm3[5],ymm4[6],ymm3[7]
1370 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm15 = ymm3[2,1,2,3]
1371 ; AVX2-FAST-NEXT: vmovdqa (%rdi), %ymm13
1372 ; AVX2-FAST-NEXT: vmovdqa (%rsi), %ymm4
1373 ; AVX2-FAST-NEXT: vpunpckhdq {{.*#+}} ymm3 = ymm13[2],ymm4[2],ymm13[3],ymm4[3],ymm13[6],ymm4[6],ymm13[7],ymm4[7]
1374 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm15 = ymm15[0,1,2,3],ymm3[4,5],ymm15[6,7]
1375 ; AVX2-FAST-NEXT: vpmovzxdq {{.*#+}} xmm12 = mem[0],zero,mem[1],zero
1376 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm12 = ymm15[0,1],ymm12[2,3],ymm15[4,5,6,7]
1377 ; AVX2-FAST-NEXT: vpbroadcastd 20(%r9), %ymm15
1378 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm6 = ymm12[0,1,2],ymm15[3],ymm12[4,5,6,7]
1379 ; AVX2-FAST-NEXT: vmovdqu %ymm6, (%rsp) # 32-byte Spill
1380 ; AVX2-FAST-NEXT: vpunpckhdq {{.*#+}} xmm8 = xmm9[2],xmm8[2],xmm9[3],xmm8[3]
1381 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm8 = xmm8[2,3,2,3]
1382 ; AVX2-FAST-NEXT: vinserti128 $1, %xmm8, %ymm7, %ymm7
1383 ; AVX2-FAST-NEXT: vmovdqa (%r8), %ymm8
1384 ; AVX2-FAST-NEXT: vbroadcasti128 {{.*#+}} ymm12 = [2,2,3,3,2,2,3,3]
1385 ; AVX2-FAST-NEXT: # ymm12 = mem[0,1,0,1]
1386 ; AVX2-FAST-NEXT: vpermd %ymm8, %ymm12, %ymm9
1387 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm7 = ymm9[0,1],ymm7[2,3,4,5],ymm9[6,7]
1388 ; AVX2-FAST-NEXT: vmovdqa (%r9), %ymm9
1389 ; AVX2-FAST-NEXT: vpermd %ymm9, %ymm12, %ymm15
1390 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm6 = ymm7[0],ymm15[1],ymm7[2,3,4,5,6],ymm15[7]
1391 ; AVX2-FAST-NEXT: vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1392 ; AVX2-FAST-NEXT: vpunpckhdq {{.*#+}} ymm15 = ymm10[2],ymm11[2],ymm10[3],ymm11[3],ymm10[6],ymm11[6],ymm10[7],ymm11[7]
1393 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} ymm15 = ymm15[2,3,2,3,6,7,6,7]
1394 ; AVX2-FAST-NEXT: vperm2i128 {{.*#+}} ymm2 = ymm2[2,3],ymm15[2,3]
1395 ; AVX2-FAST-NEXT: vmovdqa 32(%r8), %ymm15
1396 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm6 = [6,5,3,3,6,5,7,7]
1397 ; AVX2-FAST-NEXT: vpermd %ymm15, %ymm6, %ymm7
1398 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm7[0,1],ymm2[2,3,4,5],ymm7[6,7]
1399 ; AVX2-FAST-NEXT: vmovdqa 32(%r9), %ymm7
1400 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm0 = [4,6,2,3,4,6,6,7]
1401 ; AVX2-FAST-NEXT: vpermd %ymm7, %ymm0, %ymm6
1402 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0],ymm6[1],ymm2[2,3,4,5,6],ymm6[7]
1403 ; AVX2-FAST-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1404 ; AVX2-FAST-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
1405 ; AVX2-FAST-NEXT: vpunpckhdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm6 # 16-byte Folded Reload
1406 ; AVX2-FAST-NEXT: # xmm6 = xmm2[2],mem[2],xmm2[3],mem[3]
1407 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm6 = xmm6[2,3,2,3]
1408 ; AVX2-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
1409 ; AVX2-FAST-NEXT: vinserti128 $1, %xmm6, %ymm2, %ymm6
1410 ; AVX2-FAST-NEXT: vpermd %ymm15, %ymm12, %ymm2
1411 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1],ymm6[2,3,4,5],ymm2[6,7]
1412 ; AVX2-FAST-NEXT: vpermd %ymm7, %ymm12, %ymm6
1413 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0],ymm6[1],ymm2[2,3,4,5,6],ymm6[7]
1414 ; AVX2-FAST-NEXT: vpunpckldq {{.*#+}} ymm6 = ymm10[0],ymm11[0],ymm10[1],ymm11[1],ymm10[4],ymm11[4],ymm10[5],ymm11[5]
1415 ; AVX2-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
1416 ; AVX2-FAST-NEXT: vpunpckldq {{.*#+}} ymm7 = ymm7[0],ymm14[0],ymm7[1],ymm14[1],ymm7[4],ymm14[4],ymm7[5],ymm14[5]
1417 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm6 = ymm6[2,2,2,2]
1418 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,1,2,3]
1419 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm6 = ymm7[0,1],ymm6[2,3],ymm7[4,5,6,7]
1420 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm15[4,5],ymm6[6,7]
1421 ; AVX2-FAST-NEXT: vpbroadcastd 48(%r9), %ymm7
1422 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3,4],ymm7[5],ymm6[6,7]
1423 ; AVX2-FAST-NEXT: vpunpckhdq {{.*#+}} ymm7 = ymm5[2],ymm1[2],ymm5[3],ymm1[3],ymm5[6],ymm1[6],ymm5[7],ymm1[7]
1424 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} ymm7 = ymm7[2,3,2,3,6,7,6,7]
1425 ; AVX2-FAST-NEXT: vperm2i128 {{.*#+}} ymm3 = ymm3[2,3],ymm7[2,3]
1426 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm7 = [6,5,3,3,6,5,7,7]
1427 ; AVX2-FAST-NEXT: vpermd %ymm8, %ymm7, %ymm7
1428 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm3 = ymm7[0,1],ymm3[2,3,4,5],ymm7[6,7]
1429 ; AVX2-FAST-NEXT: vpermd %ymm9, %ymm0, %ymm7
1430 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0],ymm7[1],ymm3[2,3,4,5,6],ymm7[7]
1431 ; AVX2-FAST-NEXT: vpunpckldq {{.*#+}} ymm0 = ymm5[0],ymm1[0],ymm5[1],ymm1[1],ymm5[4],ymm1[4],ymm5[5],ymm1[5]
1432 ; AVX2-FAST-NEXT: vpunpckldq {{.*#+}} ymm1 = ymm13[0],ymm4[0],ymm13[1],ymm4[1],ymm13[4],ymm4[4],ymm13[5],ymm4[5]
1433 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,2,2,2]
1434 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,1,2,3]
1435 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5,6,7]
1436 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm8[4,5],ymm0[6,7]
1437 ; AVX2-FAST-NEXT: vpbroadcastd 16(%r9), %ymm1
1438 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
1439 ; AVX2-FAST-NEXT: movq {{[0-9]+}}(%rsp), %rax
1440 ; AVX2-FAST-NEXT: vmovdqa %ymm0, 96(%rax)
1441 ; AVX2-FAST-NEXT: vmovdqa %ymm3, 160(%rax)
1442 ; AVX2-FAST-NEXT: vmovdqa %ymm6, 288(%rax)
1443 ; AVX2-FAST-NEXT: vmovdqa %ymm2, 256(%rax)
1444 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
1445 ; AVX2-FAST-NEXT: vmovaps %ymm0, 352(%rax)
1446 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
1447 ; AVX2-FAST-NEXT: vmovaps %ymm0, 64(%rax)
1448 ; AVX2-FAST-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
1449 ; AVX2-FAST-NEXT: vmovaps %ymm0, 128(%rax)
1450 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
1451 ; AVX2-FAST-NEXT: vmovaps %ymm0, (%rax)
1452 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
1453 ; AVX2-FAST-NEXT: vmovaps %ymm0, 320(%rax)
1454 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
1455 ; AVX2-FAST-NEXT: vmovaps %ymm0, 192(%rax)
1456 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
1457 ; AVX2-FAST-NEXT: vmovaps %ymm0, 224(%rax)
1458 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
1459 ; AVX2-FAST-NEXT: vmovaps %ymm0, 32(%rax)
1460 ; AVX2-FAST-NEXT: addq $232, %rsp
1461 ; AVX2-FAST-NEXT: vzeroupper
1462 ; AVX2-FAST-NEXT: retq
1464 ; AVX2-FAST-PERLANE-LABEL: store_i32_stride6_vf16:
1465 ; AVX2-FAST-PERLANE: # %bb.0:
1466 ; AVX2-FAST-PERLANE-NEXT: subq $200, %rsp
1467 ; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rsi), %xmm12
1468 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 32(%rsi), %xmm1
1469 ; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rdi), %xmm0
1470 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 32(%rdi), %xmm2
1471 ; AVX2-FAST-PERLANE-NEXT: vpunpckhdq {{.*#+}} xmm3 = xmm0[2],xmm12[2],xmm0[3],xmm12[3]
1472 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm3, (%rsp) # 32-byte Spill
1473 ; AVX2-FAST-PERLANE-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
1474 ; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rcx), %xmm4
1475 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1476 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 32(%rcx), %xmm7
1477 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1478 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm4 = xmm4[1,2,2,3]
1479 ; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rdx), %xmm5
1480 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1481 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 32(%rdx), %xmm8
1482 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1483 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm5 = xmm5[1,2,2,3]
1484 ; AVX2-FAST-PERLANE-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm5[0],xmm4[0],xmm5[1],xmm4[1]
1485 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,1,2,1]
1486 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5],ymm4[6,7]
1487 ; AVX2-FAST-PERLANE-NEXT: vmovdqa (%r8), %xmm13
1488 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 32(%r8), %xmm6
1489 ; AVX2-FAST-PERLANE-NEXT: vpmovzxdq {{.*#+}} xmm4 = xmm13[0],zero,xmm13[1],zero
1490 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1],ymm4[2,3],ymm3[4,5,6,7]
1491 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastd 4(%r9), %ymm4
1492 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2],ymm4[3],ymm3[4,5,6,7]
1493 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1494 ; AVX2-FAST-PERLANE-NEXT: vpunpckhdq {{.*#+}} xmm3 = xmm2[2],xmm1[2],xmm2[3],xmm1[3]
1495 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1496 ; AVX2-FAST-PERLANE-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
1497 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm4 = xmm7[1,2,2,3]
1498 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm5 = xmm8[1,2,2,3]
1499 ; AVX2-FAST-PERLANE-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm5[0],xmm4[0],xmm5[1],xmm4[1]
1500 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,1,2,1]
1501 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5],ymm4[6,7]
1502 ; AVX2-FAST-PERLANE-NEXT: vpmovzxdq {{.*#+}} xmm4 = xmm6[0],zero,xmm6[1],zero
1503 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1],ymm4[2,3],ymm3[4,5,6,7]
1504 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastd 36(%r9), %ymm4
1505 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2],ymm4[3],ymm3[4,5,6,7]
1506 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1507 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastd 32(%rcx), %xmm3
1508 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastd 32(%rdx), %xmm4
1509 ; AVX2-FAST-PERLANE-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
1510 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 32(%rdx), %ymm9
1511 ; AVX2-FAST-PERLANE-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
1512 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,1,2,1]
1513 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1],ymm3[2,3],ymm1[4,5,6,7]
1514 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastq %xmm6, %ymm2
1515 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5],ymm1[6,7]
1516 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 32(%r9), %xmm15
1517 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastd %xmm15, %ymm2
1518 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5],ymm1[6,7]
1519 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1520 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 32(%rcx), %ymm14
1521 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm1 = ymm14[0,1,2,2,4,5,6,6]
1522 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm2 = ymm9[1,1,2,3,5,5,6,7]
1523 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2],ymm1[3],ymm2[4],ymm1[5],ymm2[6],ymm1[7]
1524 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,1,2,3]
1525 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 32(%rdi), %ymm10
1526 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 32(%rsi), %ymm8
1527 ; AVX2-FAST-PERLANE-NEXT: vpunpckhdq {{.*#+}} ymm4 = ymm10[2],ymm8[2],ymm10[3],ymm8[3],ymm10[6],ymm8[6],ymm10[7],ymm8[7]
1528 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm4[4,5],ymm1[6,7]
1529 ; AVX2-FAST-PERLANE-NEXT: vpmovzxdq {{.*#+}} xmm3 = mem[0],zero,mem[1],zero
1530 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1],ymm3[2,3],ymm1[4,5,6,7]
1531 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastd 52(%r9), %ymm3
1532 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm3[3],ymm1[4,5,6,7]
1533 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1534 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastd (%rcx), %xmm1
1535 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastd (%rdx), %xmm3
1536 ; AVX2-FAST-PERLANE-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
1537 ; AVX2-FAST-PERLANE-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm12[0],xmm0[1],xmm12[1]
1538 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,2,1]
1539 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
1540 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastq %xmm13, %ymm1
1541 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
1542 ; AVX2-FAST-PERLANE-NEXT: vmovdqa (%r9), %xmm1
1543 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastd %xmm1, %ymm2
1544 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm2[5],ymm0[6,7]
1545 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1546 ; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rdx), %ymm2
1547 ; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rcx), %ymm0
1548 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm3 = ymm0[0,1,2,2,4,5,6,6]
1549 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm5 = ymm2[1,1,2,3,5,5,6,7]
1550 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm3 = ymm5[0],ymm3[1],ymm5[2],ymm3[3],ymm5[4],ymm3[5],ymm5[6],ymm3[7]
1551 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm12 = ymm3[2,1,2,3]
1552 ; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rdi), %ymm7
1553 ; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rsi), %ymm5
1554 ; AVX2-FAST-PERLANE-NEXT: vpunpckhdq {{.*#+}} ymm3 = ymm7[2],ymm5[2],ymm7[3],ymm5[3],ymm7[6],ymm5[6],ymm7[7],ymm5[7]
1555 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm12 = ymm12[0,1,2,3],ymm3[4,5],ymm12[6,7]
1556 ; AVX2-FAST-PERLANE-NEXT: vpmovzxdq {{.*#+}} xmm11 = mem[0],zero,mem[1],zero
1557 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm11 = ymm12[0,1],ymm11[2,3],ymm12[4,5,6,7]
1558 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastd 20(%r9), %ymm12
1559 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm11 = ymm11[0,1,2],ymm12[3],ymm11[4,5,6,7]
1560 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1561 ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
1562 ; AVX2-FAST-PERLANE-NEXT: vpunpckhdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm11, %xmm11 # 16-byte Folded Reload
1563 ; AVX2-FAST-PERLANE-NEXT: # xmm11 = xmm11[2],mem[2],xmm11[3],mem[3]
1564 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm11 = xmm11[2,3,2,3]
1565 ; AVX2-FAST-PERLANE-NEXT: vmovdqu (%rsp), %ymm12 # 32-byte Reload
1566 ; AVX2-FAST-PERLANE-NEXT: vinserti128 $1, %xmm11, %ymm12, %ymm11
1567 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm13 = xmm13[2,2,3,3]
1568 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm13 = ymm13[0,1,2,1]
1569 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm11 = ymm13[0,1],ymm11[2,3,4,5],ymm13[6,7]
1570 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,2,3,3]
1571 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,1,2,1]
1572 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm1 = ymm11[0],ymm1[1],ymm11[2,3,4,5,6],ymm1[7]
1573 ; AVX2-FAST-PERLANE-NEXT: vpunpckhdq {{.*#+}} ymm11 = ymm9[2],ymm14[2],ymm9[3],ymm14[3],ymm9[6],ymm14[6],ymm9[7],ymm14[7]
1574 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm11 = ymm11[2,3,2,3,6,7,6,7]
1575 ; AVX2-FAST-PERLANE-NEXT: vperm2i128 {{.*#+}} ymm4 = ymm4[2,3],ymm11[2,3]
1576 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 32(%r8), %ymm11
1577 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm13 = ymm11[2,1,3,3,6,5,7,7]
1578 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm13 = ymm13[2,1,2,3]
1579 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm4 = ymm13[0,1],ymm4[2,3,4,5],ymm13[6,7]
1580 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm13 = mem[0,2,2,3,4,6,6,7]
1581 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm13 = ymm13[2,1,2,3]
1582 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0],ymm13[1],ymm4[2,3,4,5,6],ymm13[7]
1583 ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
1584 ; AVX2-FAST-PERLANE-NEXT: vpunpckhdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm12, %xmm13 # 16-byte Folded Reload
1585 ; AVX2-FAST-PERLANE-NEXT: # xmm13 = xmm12[2],mem[2],xmm12[3],mem[3]
1586 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm13 = xmm13[2,3,2,3]
1587 ; AVX2-FAST-PERLANE-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
1588 ; AVX2-FAST-PERLANE-NEXT: vinserti128 $1, %xmm13, %ymm12, %ymm13
1589 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm6 = xmm6[2,2,3,3]
1590 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm6 = ymm6[0,1,2,1]
1591 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm6 = ymm6[0,1],ymm13[2,3,4,5],ymm6[6,7]
1592 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm13 = xmm15[2,2,3,3]
1593 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm13 = ymm13[0,1,2,1]
1594 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm6 = ymm6[0],ymm13[1],ymm6[2,3,4,5,6],ymm13[7]
1595 ; AVX2-FAST-PERLANE-NEXT: vpunpckldq {{.*#+}} ymm9 = ymm9[0],ymm14[0],ymm9[1],ymm14[1],ymm9[4],ymm14[4],ymm9[5],ymm14[5]
1596 ; AVX2-FAST-PERLANE-NEXT: vpunpckldq {{.*#+}} ymm8 = ymm10[0],ymm8[0],ymm10[1],ymm8[1],ymm10[4],ymm8[4],ymm10[5],ymm8[5]
1597 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,2,2,2]
1598 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,1,2,3]
1599 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm8 = ymm8[0,1],ymm9[2,3],ymm8[4,5,6,7]
1600 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm11[4,5],ymm8[6,7]
1601 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastd 48(%r9), %ymm9
1602 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3,4],ymm9[5],ymm8[6,7]
1603 ; AVX2-FAST-PERLANE-NEXT: vpunpckhdq {{.*#+}} ymm9 = ymm2[2],ymm0[2],ymm2[3],ymm0[3],ymm2[6],ymm0[6],ymm2[7],ymm0[7]
1604 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm9 = ymm9[2,3,2,3,6,7,6,7]
1605 ; AVX2-FAST-PERLANE-NEXT: vperm2i128 {{.*#+}} ymm3 = ymm3[2,3],ymm9[2,3]
1606 ; AVX2-FAST-PERLANE-NEXT: vmovdqa (%r8), %ymm9
1607 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm10 = ymm9[2,1,3,3,6,5,7,7]
1608 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm10 = ymm10[2,1,2,3]
1609 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm3 = ymm10[0,1],ymm3[2,3,4,5],ymm10[6,7]
1610 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm10 = mem[0,2,2,3,4,6,6,7]
1611 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm10 = ymm10[2,1,2,3]
1612 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0],ymm10[1],ymm3[2,3,4,5,6],ymm10[7]
1613 ; AVX2-FAST-PERLANE-NEXT: vpunpckldq {{.*#+}} ymm0 = ymm2[0],ymm0[0],ymm2[1],ymm0[1],ymm2[4],ymm0[4],ymm2[5],ymm0[5]
1614 ; AVX2-FAST-PERLANE-NEXT: vpunpckldq {{.*#+}} ymm2 = ymm7[0],ymm5[0],ymm7[1],ymm5[1],ymm7[4],ymm5[4],ymm7[5],ymm5[5]
1615 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,2,2,2]
1616 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,1,2,3]
1617 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1],ymm0[2,3],ymm2[4,5,6,7]
1618 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm9[4,5],ymm0[6,7]
1619 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastd 16(%r9), %ymm2
1620 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm2[5],ymm0[6,7]
1621 ; AVX2-FAST-PERLANE-NEXT: movq {{[0-9]+}}(%rsp), %rax
1622 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm0, 96(%rax)
1623 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm3, 160(%rax)
1624 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm8, 288(%rax)
1625 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm6, 256(%rax)
1626 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm4, 352(%rax)
1627 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm1, 64(%rax)
1628 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
1629 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 128(%rax)
1630 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
1631 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, (%rax)
1632 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
1633 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 320(%rax)
1634 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
1635 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 192(%rax)
1636 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
1637 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 224(%rax)
1638 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
1639 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 32(%rax)
1640 ; AVX2-FAST-PERLANE-NEXT: addq $200, %rsp
1641 ; AVX2-FAST-PERLANE-NEXT: vzeroupper
1642 ; AVX2-FAST-PERLANE-NEXT: retq
1644 ; AVX512F-SLOW-LABEL: store_i32_stride6_vf16:
1645 ; AVX512F-SLOW: # %bb.0:
1646 ; AVX512F-SLOW-NEXT: movq {{[0-9]+}}(%rsp), %rax
1647 ; AVX512F-SLOW-NEXT: vmovdqa64 (%rdi), %zmm2
1648 ; AVX512F-SLOW-NEXT: vmovdqa64 (%rsi), %zmm3
1649 ; AVX512F-SLOW-NEXT: vmovdqa64 (%rdx), %zmm4
1650 ; AVX512F-SLOW-NEXT: vmovdqa64 (%rcx), %zmm5
1651 ; AVX512F-SLOW-NEXT: vmovdqa64 (%r8), %zmm1
1652 ; AVX512F-SLOW-NEXT: vmovdqa64 (%r9), %zmm0
1653 ; AVX512F-SLOW-NEXT: vbroadcasti64x4 {{.*#+}} zmm6 = [4,20,3,19,0,0,5,21,4,20,3,19,0,0,5,21]
1654 ; AVX512F-SLOW-NEXT: # zmm6 = mem[0,1,2,3,0,1,2,3]
1655 ; AVX512F-SLOW-NEXT: vpermi2d %zmm3, %zmm2, %zmm6
1656 ; AVX512F-SLOW-NEXT: vmovdqa (%rdx), %ymm7
1657 ; AVX512F-SLOW-NEXT: vmovdqa {{.*#+}} ymm8 = [3,11,0,8,7,15,4,12]
1658 ; AVX512F-SLOW-NEXT: vpermi2d (%rcx), %ymm7, %ymm8
1659 ; AVX512F-SLOW-NEXT: movb $36, %cl
1660 ; AVX512F-SLOW-NEXT: kmovw %ecx, %k1
1661 ; AVX512F-SLOW-NEXT: vshufi64x2 {{.*#+}} zmm6 {%k1} = zmm8[0,1,0,1,2,3,6,7]
1662 ; AVX512F-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm7 = <18,u,2,3,4,5,19,u,8,9,10,11,20,u,14,15>
1663 ; AVX512F-SLOW-NEXT: vpermi2d %zmm1, %zmm6, %zmm7
1664 ; AVX512F-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm6 = [0,18,2,3,4,5,6,19,8,9,10,11,12,20,14,15]
1665 ; AVX512F-SLOW-NEXT: vpermi2d %zmm0, %zmm7, %zmm6
1666 ; AVX512F-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm7 = <u,u,0,16,11,27,u,u,15,31,12,28,u,u,12,28>
1667 ; AVX512F-SLOW-NEXT: vpermi2d %zmm5, %zmm4, %zmm7
1668 ; AVX512F-SLOW-NEXT: vbroadcasti64x4 {{.*#+}} zmm8 = [12,28,11,27,0,0,13,29,12,28,11,27,0,0,13,29]
1669 ; AVX512F-SLOW-NEXT: # zmm8 = mem[0,1,2,3,0,1,2,3]
1670 ; AVX512F-SLOW-NEXT: vpermi2d %zmm3, %zmm2, %zmm8
1671 ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm7, %zmm8 {%k1}
1672 ; AVX512F-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm7 = <26,u,2,3,4,5,27,u,8,9,10,11,28,u,14,15>
1673 ; AVX512F-SLOW-NEXT: vpermi2d %zmm1, %zmm8, %zmm7
1674 ; AVX512F-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm8 = [0,26,2,3,4,5,6,27,8,9,10,11,12,28,14,15]
1675 ; AVX512F-SLOW-NEXT: vpermi2d %zmm0, %zmm7, %zmm8
1676 ; AVX512F-SLOW-NEXT: vbroadcasti64x4 {{.*#+}} zmm7 = [9,25,8,24,0,0,10,26,9,25,8,24,0,0,10,26]
1677 ; AVX512F-SLOW-NEXT: # zmm7 = mem[0,1,2,3,0,1,2,3]
1678 ; AVX512F-SLOW-NEXT: vpermi2d %zmm5, %zmm4, %zmm7
1679 ; AVX512F-SLOW-NEXT: vbroadcasti64x4 {{.*#+}} zmm9 = [8,24,0,0,10,26,9,25,8,24,0,0,10,26,9,25]
1680 ; AVX512F-SLOW-NEXT: # zmm9 = mem[0,1,2,3,0,1,2,3]
1681 ; AVX512F-SLOW-NEXT: vpermi2d %zmm3, %zmm2, %zmm9
1682 ; AVX512F-SLOW-NEXT: movb $-110, %cl
1683 ; AVX512F-SLOW-NEXT: kmovw %ecx, %k2
1684 ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm7, %zmm9 {%k2}
1685 ; AVX512F-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm7 = <0,1,2,3,24,u,6,7,8,9,25,u,12,13,14,15>
1686 ; AVX512F-SLOW-NEXT: vpermi2d %zmm1, %zmm9, %zmm7
1687 ; AVX512F-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm9 = [0,1,2,3,4,24,6,7,8,9,10,25,12,13,14,15]
1688 ; AVX512F-SLOW-NEXT: vpermi2d %zmm0, %zmm7, %zmm9
1689 ; AVX512F-SLOW-NEXT: vbroadcasti64x4 {{.*#+}} zmm7 = [1,17,0,16,0,0,2,18,1,17,0,16,0,0,2,18]
1690 ; AVX512F-SLOW-NEXT: # zmm7 = mem[0,1,2,3,0,1,2,3]
1691 ; AVX512F-SLOW-NEXT: vpermi2d %zmm5, %zmm4, %zmm7
1692 ; AVX512F-SLOW-NEXT: vbroadcasti64x4 {{.*#+}} zmm10 = [0,16,0,0,2,18,1,17,0,16,0,0,2,18,1,17]
1693 ; AVX512F-SLOW-NEXT: # zmm10 = mem[0,1,2,3,0,1,2,3]
1694 ; AVX512F-SLOW-NEXT: vpermi2d %zmm3, %zmm2, %zmm10
1695 ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm7, %zmm10 {%k2}
1696 ; AVX512F-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm7 = <0,1,2,3,16,u,6,7,8,9,17,u,12,13,14,15>
1697 ; AVX512F-SLOW-NEXT: vpermi2d %zmm1, %zmm10, %zmm7
1698 ; AVX512F-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm10 = [0,1,2,3,4,16,6,7,8,9,10,17,12,13,14,15]
1699 ; AVX512F-SLOW-NEXT: vpermi2d %zmm0, %zmm7, %zmm10
1700 ; AVX512F-SLOW-NEXT: vbroadcasti64x4 {{.*#+}} zmm7 = [5,21,0,0,7,23,6,22,5,21,0,0,7,23,6,22]
1701 ; AVX512F-SLOW-NEXT: # zmm7 = mem[0,1,2,3,0,1,2,3]
1702 ; AVX512F-SLOW-NEXT: vpermi2d %zmm5, %zmm4, %zmm7
1703 ; AVX512F-SLOW-NEXT: vmovdqa (%rdi), %ymm11
1704 ; AVX512F-SLOW-NEXT: vpunpckhdq {{.*#+}} ymm11 = ymm11[2],mem[2],ymm11[3],mem[3],ymm11[6],mem[6],ymm11[7],mem[7]
1705 ; AVX512F-SLOW-NEXT: vshufi64x2 {{.*#+}} zmm7 {%k1} = zmm11[2,3,2,3,2,3,2,3]
1706 ; AVX512F-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm11 = <0,1,21,u,4,5,6,7,22,u,10,11,12,13,23,u>
1707 ; AVX512F-SLOW-NEXT: vpermi2d %zmm1, %zmm7, %zmm11
1708 ; AVX512F-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm7 = [0,1,2,21,4,5,6,7,8,22,10,11,12,13,14,23]
1709 ; AVX512F-SLOW-NEXT: vpermi2d %zmm0, %zmm11, %zmm7
1710 ; AVX512F-SLOW-NEXT: vbroadcasti64x4 {{.*#+}} zmm11 = [13,29,0,0,15,31,14,30,13,29,0,0,15,31,14,30]
1711 ; AVX512F-SLOW-NEXT: # zmm11 = mem[0,1,2,3,0,1,2,3]
1712 ; AVX512F-SLOW-NEXT: vpermi2d %zmm5, %zmm4, %zmm11
1713 ; AVX512F-SLOW-NEXT: vpunpckhdq {{.*#+}} zmm2 = zmm2[2],zmm3[2],zmm2[3],zmm3[3],zmm2[6],zmm3[6],zmm2[7],zmm3[7],zmm2[10],zmm3[10],zmm2[11],zmm3[11],zmm2[14],zmm3[14],zmm2[15],zmm3[15]
1714 ; AVX512F-SLOW-NEXT: vshufi64x2 {{.*#+}} zmm11 {%k1} = zmm2[6,7,6,7,6,7,6,7]
1715 ; AVX512F-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm2 = <0,1,29,u,4,5,6,7,30,u,10,11,12,13,31,u>
1716 ; AVX512F-SLOW-NEXT: vpermi2d %zmm1, %zmm11, %zmm2
1717 ; AVX512F-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm1 = [0,1,2,29,4,5,6,7,8,30,10,11,12,13,14,31]
1718 ; AVX512F-SLOW-NEXT: vpermi2d %zmm0, %zmm2, %zmm1
1719 ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm10, (%rax)
1720 ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm9, 192(%rax)
1721 ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm1, 320(%rax)
1722 ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm8, 256(%rax)
1723 ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm7, 128(%rax)
1724 ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm6, 64(%rax)
1725 ; AVX512F-SLOW-NEXT: vzeroupper
1726 ; AVX512F-SLOW-NEXT: retq
1728 ; AVX512F-FAST-LABEL: store_i32_stride6_vf16:
1729 ; AVX512F-FAST: # %bb.0:
1730 ; AVX512F-FAST-NEXT: movq {{[0-9]+}}(%rsp), %rax
1731 ; AVX512F-FAST-NEXT: vmovdqa64 (%rdi), %zmm3
1732 ; AVX512F-FAST-NEXT: vmovdqa64 (%rsi), %zmm5
1733 ; AVX512F-FAST-NEXT: vmovdqa64 (%rdx), %zmm2
1734 ; AVX512F-FAST-NEXT: vmovdqa64 (%rcx), %zmm4
1735 ; AVX512F-FAST-NEXT: vmovdqa64 (%r8), %zmm1
1736 ; AVX512F-FAST-NEXT: vmovdqa64 (%r9), %zmm0
1737 ; AVX512F-FAST-NEXT: vbroadcasti64x4 {{.*#+}} zmm6 = [1,17,0,16,0,0,2,18,1,17,0,16,0,0,2,18]
1738 ; AVX512F-FAST-NEXT: # zmm6 = mem[0,1,2,3,0,1,2,3]
1739 ; AVX512F-FAST-NEXT: vpermi2d %zmm4, %zmm2, %zmm6
1740 ; AVX512F-FAST-NEXT: vbroadcasti64x4 {{.*#+}} zmm7 = [0,16,0,0,2,18,1,17,0,16,0,0,2,18,1,17]
1741 ; AVX512F-FAST-NEXT: # zmm7 = mem[0,1,2,3,0,1,2,3]
1742 ; AVX512F-FAST-NEXT: vpermi2d %zmm5, %zmm3, %zmm7
1743 ; AVX512F-FAST-NEXT: movb $-110, %cl
1744 ; AVX512F-FAST-NEXT: kmovw %ecx, %k2
1745 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm6, %zmm7 {%k2}
1746 ; AVX512F-FAST-NEXT: vmovdqa64 {{.*#+}} zmm8 = <0,1,2,3,16,u,6,7,8,9,17,u,12,13,14,15>
1747 ; AVX512F-FAST-NEXT: vpermi2d %zmm1, %zmm7, %zmm8
1748 ; AVX512F-FAST-NEXT: vmovdqa64 {{.*#+}} zmm6 = [0,1,2,3,4,16,6,7,8,9,10,17,12,13,14,15]
1749 ; AVX512F-FAST-NEXT: vpermi2d %zmm0, %zmm8, %zmm6
1750 ; AVX512F-FAST-NEXT: vmovdqa64 {{.*#+}} zmm7 = <u,u,0,16,3,19,u,u,7,23,4,20,u,u,u,u>
1751 ; AVX512F-FAST-NEXT: vpermi2d %zmm4, %zmm2, %zmm7
1752 ; AVX512F-FAST-NEXT: vbroadcasti64x4 {{.*#+}} zmm8 = [4,20,3,19,0,0,5,21,4,20,3,19,0,0,5,21]
1753 ; AVX512F-FAST-NEXT: # zmm8 = mem[0,1,2,3,0,1,2,3]
1754 ; AVX512F-FAST-NEXT: vpermi2d %zmm5, %zmm3, %zmm8
1755 ; AVX512F-FAST-NEXT: movb $36, %cl
1756 ; AVX512F-FAST-NEXT: kmovw %ecx, %k1
1757 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm7, %zmm8 {%k1}
1758 ; AVX512F-FAST-NEXT: vmovdqa64 {{.*#+}} zmm7 = <18,u,2,3,4,5,19,u,8,9,10,11,20,u,14,15>
1759 ; AVX512F-FAST-NEXT: vpermi2d %zmm1, %zmm8, %zmm7
1760 ; AVX512F-FAST-NEXT: vmovdqa64 {{.*#+}} zmm8 = [0,18,2,3,4,5,6,19,8,9,10,11,12,20,14,15]
1761 ; AVX512F-FAST-NEXT: vpermi2d %zmm0, %zmm7, %zmm8
1762 ; AVX512F-FAST-NEXT: vbroadcasti32x4 {{.*#+}} zmm7 = [6,22,7,23,6,22,7,23,6,22,7,23,6,22,7,23]
1763 ; AVX512F-FAST-NEXT: # zmm7 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
1764 ; AVX512F-FAST-NEXT: vpermi2d %zmm5, %zmm3, %zmm7
1765 ; AVX512F-FAST-NEXT: vbroadcasti64x4 {{.*#+}} zmm9 = [5,21,0,0,7,23,6,22,5,21,0,0,7,23,6,22]
1766 ; AVX512F-FAST-NEXT: # zmm9 = mem[0,1,2,3,0,1,2,3]
1767 ; AVX512F-FAST-NEXT: vpermi2d %zmm4, %zmm2, %zmm9
1768 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm7, %zmm9 {%k1}
1769 ; AVX512F-FAST-NEXT: vmovdqa64 {{.*#+}} zmm7 = <0,1,21,u,4,5,6,7,22,u,10,11,12,13,23,u>
1770 ; AVX512F-FAST-NEXT: vpermi2d %zmm1, %zmm9, %zmm7
1771 ; AVX512F-FAST-NEXT: vmovdqa64 {{.*#+}} zmm9 = [0,1,2,21,4,5,6,7,8,22,10,11,12,13,14,23]
1772 ; AVX512F-FAST-NEXT: vpermi2d %zmm0, %zmm7, %zmm9
1773 ; AVX512F-FAST-NEXT: vbroadcasti64x4 {{.*#+}} zmm7 = [9,25,8,24,0,0,10,26,9,25,8,24,0,0,10,26]
1774 ; AVX512F-FAST-NEXT: # zmm7 = mem[0,1,2,3,0,1,2,3]
1775 ; AVX512F-FAST-NEXT: vpermi2d %zmm4, %zmm2, %zmm7
1776 ; AVX512F-FAST-NEXT: vbroadcasti64x4 {{.*#+}} zmm10 = [8,24,0,0,10,26,9,25,8,24,0,0,10,26,9,25]
1777 ; AVX512F-FAST-NEXT: # zmm10 = mem[0,1,2,3,0,1,2,3]
1778 ; AVX512F-FAST-NEXT: vpermi2d %zmm5, %zmm3, %zmm10
1779 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm7, %zmm10 {%k2}
1780 ; AVX512F-FAST-NEXT: vmovdqa64 {{.*#+}} zmm7 = <0,1,2,3,24,u,6,7,8,9,25,u,12,13,14,15>
1781 ; AVX512F-FAST-NEXT: vpermi2d %zmm1, %zmm10, %zmm7
1782 ; AVX512F-FAST-NEXT: vmovdqa64 {{.*#+}} zmm10 = [0,1,2,3,4,24,6,7,8,9,10,25,12,13,14,15]
1783 ; AVX512F-FAST-NEXT: vpermi2d %zmm0, %zmm7, %zmm10
1784 ; AVX512F-FAST-NEXT: vmovdqa64 {{.*#+}} zmm7 = <u,u,0,16,11,27,u,u,15,31,12,28,u,u,12,28>
1785 ; AVX512F-FAST-NEXT: vpermi2d %zmm4, %zmm2, %zmm7
1786 ; AVX512F-FAST-NEXT: vbroadcasti64x4 {{.*#+}} zmm11 = [12,28,11,27,0,0,13,29,12,28,11,27,0,0,13,29]
1787 ; AVX512F-FAST-NEXT: # zmm11 = mem[0,1,2,3,0,1,2,3]
1788 ; AVX512F-FAST-NEXT: vpermi2d %zmm5, %zmm3, %zmm11
1789 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm7, %zmm11 {%k1}
1790 ; AVX512F-FAST-NEXT: vmovdqa64 {{.*#+}} zmm7 = <26,u,2,3,4,5,27,u,8,9,10,11,28,u,14,15>
1791 ; AVX512F-FAST-NEXT: vpermi2d %zmm1, %zmm11, %zmm7
1792 ; AVX512F-FAST-NEXT: vmovdqa64 {{.*#+}} zmm11 = [0,26,2,3,4,5,6,27,8,9,10,11,12,28,14,15]
1793 ; AVX512F-FAST-NEXT: vpermi2d %zmm0, %zmm7, %zmm11
1794 ; AVX512F-FAST-NEXT: vbroadcasti32x4 {{.*#+}} zmm7 = [14,30,15,31,14,30,15,31,14,30,15,31,14,30,15,31]
1795 ; AVX512F-FAST-NEXT: # zmm7 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
1796 ; AVX512F-FAST-NEXT: vpermi2d %zmm5, %zmm3, %zmm7
1797 ; AVX512F-FAST-NEXT: vbroadcasti64x4 {{.*#+}} zmm3 = [13,29,0,0,15,31,14,30,13,29,0,0,15,31,14,30]
1798 ; AVX512F-FAST-NEXT: # zmm3 = mem[0,1,2,3,0,1,2,3]
1799 ; AVX512F-FAST-NEXT: vpermi2d %zmm4, %zmm2, %zmm3
1800 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm7, %zmm3 {%k1}
1801 ; AVX512F-FAST-NEXT: vmovdqa64 {{.*#+}} zmm2 = <0,1,29,u,4,5,6,7,30,u,10,11,12,13,31,u>
1802 ; AVX512F-FAST-NEXT: vpermi2d %zmm1, %zmm3, %zmm2
1803 ; AVX512F-FAST-NEXT: vmovdqa64 {{.*#+}} zmm1 = [0,1,2,29,4,5,6,7,8,30,10,11,12,13,14,31]
1804 ; AVX512F-FAST-NEXT: vpermi2d %zmm0, %zmm2, %zmm1
1805 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm1, 320(%rax)
1806 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm11, 256(%rax)
1807 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm10, 192(%rax)
1808 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm9, 128(%rax)
1809 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm8, 64(%rax)
1810 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm6, (%rax)
1811 ; AVX512F-FAST-NEXT: vzeroupper
1812 ; AVX512F-FAST-NEXT: retq
1814 ; AVX512BW-SLOW-LABEL: store_i32_stride6_vf16:
1815 ; AVX512BW-SLOW: # %bb.0:
1816 ; AVX512BW-SLOW-NEXT: movq {{[0-9]+}}(%rsp), %rax
1817 ; AVX512BW-SLOW-NEXT: vmovdqa64 (%rdi), %zmm2
1818 ; AVX512BW-SLOW-NEXT: vmovdqa64 (%rsi), %zmm3
1819 ; AVX512BW-SLOW-NEXT: vmovdqa64 (%rdx), %zmm4
1820 ; AVX512BW-SLOW-NEXT: vmovdqa64 (%rcx), %zmm5
1821 ; AVX512BW-SLOW-NEXT: vmovdqa64 (%r8), %zmm1
1822 ; AVX512BW-SLOW-NEXT: vmovdqa64 (%r9), %zmm0
1823 ; AVX512BW-SLOW-NEXT: vbroadcasti64x4 {{.*#+}} zmm6 = [4,20,3,19,0,0,5,21,4,20,3,19,0,0,5,21]
1824 ; AVX512BW-SLOW-NEXT: # zmm6 = mem[0,1,2,3,0,1,2,3]
1825 ; AVX512BW-SLOW-NEXT: vpermi2d %zmm3, %zmm2, %zmm6
1826 ; AVX512BW-SLOW-NEXT: vmovdqa (%rdx), %ymm7
1827 ; AVX512BW-SLOW-NEXT: vmovdqa {{.*#+}} ymm8 = [3,11,0,8,7,15,4,12]
1828 ; AVX512BW-SLOW-NEXT: vpermi2d (%rcx), %ymm7, %ymm8
1829 ; AVX512BW-SLOW-NEXT: movb $36, %cl
1830 ; AVX512BW-SLOW-NEXT: kmovd %ecx, %k1
1831 ; AVX512BW-SLOW-NEXT: vshufi64x2 {{.*#+}} zmm6 {%k1} = zmm8[0,1,0,1,2,3,6,7]
1832 ; AVX512BW-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm7 = <18,u,2,3,4,5,19,u,8,9,10,11,20,u,14,15>
1833 ; AVX512BW-SLOW-NEXT: vpermi2d %zmm1, %zmm6, %zmm7
1834 ; AVX512BW-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm6 = [0,18,2,3,4,5,6,19,8,9,10,11,12,20,14,15]
1835 ; AVX512BW-SLOW-NEXT: vpermi2d %zmm0, %zmm7, %zmm6
1836 ; AVX512BW-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm7 = <u,u,0,16,11,27,u,u,15,31,12,28,u,u,12,28>
1837 ; AVX512BW-SLOW-NEXT: vpermi2d %zmm5, %zmm4, %zmm7
1838 ; AVX512BW-SLOW-NEXT: vbroadcasti64x4 {{.*#+}} zmm8 = [12,28,11,27,0,0,13,29,12,28,11,27,0,0,13,29]
1839 ; AVX512BW-SLOW-NEXT: # zmm8 = mem[0,1,2,3,0,1,2,3]
1840 ; AVX512BW-SLOW-NEXT: vpermi2d %zmm3, %zmm2, %zmm8
1841 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm7, %zmm8 {%k1}
1842 ; AVX512BW-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm7 = <26,u,2,3,4,5,27,u,8,9,10,11,28,u,14,15>
1843 ; AVX512BW-SLOW-NEXT: vpermi2d %zmm1, %zmm8, %zmm7
1844 ; AVX512BW-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm8 = [0,26,2,3,4,5,6,27,8,9,10,11,12,28,14,15]
1845 ; AVX512BW-SLOW-NEXT: vpermi2d %zmm0, %zmm7, %zmm8
1846 ; AVX512BW-SLOW-NEXT: vbroadcasti64x4 {{.*#+}} zmm7 = [9,25,8,24,0,0,10,26,9,25,8,24,0,0,10,26]
1847 ; AVX512BW-SLOW-NEXT: # zmm7 = mem[0,1,2,3,0,1,2,3]
1848 ; AVX512BW-SLOW-NEXT: vpermi2d %zmm5, %zmm4, %zmm7
1849 ; AVX512BW-SLOW-NEXT: vbroadcasti64x4 {{.*#+}} zmm9 = [8,24,0,0,10,26,9,25,8,24,0,0,10,26,9,25]
1850 ; AVX512BW-SLOW-NEXT: # zmm9 = mem[0,1,2,3,0,1,2,3]
1851 ; AVX512BW-SLOW-NEXT: vpermi2d %zmm3, %zmm2, %zmm9
1852 ; AVX512BW-SLOW-NEXT: movb $-110, %cl
1853 ; AVX512BW-SLOW-NEXT: kmovd %ecx, %k2
1854 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm7, %zmm9 {%k2}
1855 ; AVX512BW-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm7 = <0,1,2,3,24,u,6,7,8,9,25,u,12,13,14,15>
1856 ; AVX512BW-SLOW-NEXT: vpermi2d %zmm1, %zmm9, %zmm7
1857 ; AVX512BW-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm9 = [0,1,2,3,4,24,6,7,8,9,10,25,12,13,14,15]
1858 ; AVX512BW-SLOW-NEXT: vpermi2d %zmm0, %zmm7, %zmm9
1859 ; AVX512BW-SLOW-NEXT: vbroadcasti64x4 {{.*#+}} zmm7 = [1,17,0,16,0,0,2,18,1,17,0,16,0,0,2,18]
1860 ; AVX512BW-SLOW-NEXT: # zmm7 = mem[0,1,2,3,0,1,2,3]
1861 ; AVX512BW-SLOW-NEXT: vpermi2d %zmm5, %zmm4, %zmm7
1862 ; AVX512BW-SLOW-NEXT: vbroadcasti64x4 {{.*#+}} zmm10 = [0,16,0,0,2,18,1,17,0,16,0,0,2,18,1,17]
1863 ; AVX512BW-SLOW-NEXT: # zmm10 = mem[0,1,2,3,0,1,2,3]
1864 ; AVX512BW-SLOW-NEXT: vpermi2d %zmm3, %zmm2, %zmm10
1865 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm7, %zmm10 {%k2}
1866 ; AVX512BW-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm7 = <0,1,2,3,16,u,6,7,8,9,17,u,12,13,14,15>
1867 ; AVX512BW-SLOW-NEXT: vpermi2d %zmm1, %zmm10, %zmm7
1868 ; AVX512BW-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm10 = [0,1,2,3,4,16,6,7,8,9,10,17,12,13,14,15]
1869 ; AVX512BW-SLOW-NEXT: vpermi2d %zmm0, %zmm7, %zmm10
1870 ; AVX512BW-SLOW-NEXT: vbroadcasti64x4 {{.*#+}} zmm7 = [5,21,0,0,7,23,6,22,5,21,0,0,7,23,6,22]
1871 ; AVX512BW-SLOW-NEXT: # zmm7 = mem[0,1,2,3,0,1,2,3]
1872 ; AVX512BW-SLOW-NEXT: vpermi2d %zmm5, %zmm4, %zmm7
1873 ; AVX512BW-SLOW-NEXT: vmovdqa (%rdi), %ymm11
1874 ; AVX512BW-SLOW-NEXT: vpunpckhdq {{.*#+}} ymm11 = ymm11[2],mem[2],ymm11[3],mem[3],ymm11[6],mem[6],ymm11[7],mem[7]
1875 ; AVX512BW-SLOW-NEXT: vshufi64x2 {{.*#+}} zmm7 {%k1} = zmm11[2,3,2,3,2,3,2,3]
1876 ; AVX512BW-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm11 = <0,1,21,u,4,5,6,7,22,u,10,11,12,13,23,u>
1877 ; AVX512BW-SLOW-NEXT: vpermi2d %zmm1, %zmm7, %zmm11
1878 ; AVX512BW-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm7 = [0,1,2,21,4,5,6,7,8,22,10,11,12,13,14,23]
1879 ; AVX512BW-SLOW-NEXT: vpermi2d %zmm0, %zmm11, %zmm7
1880 ; AVX512BW-SLOW-NEXT: vbroadcasti64x4 {{.*#+}} zmm11 = [13,29,0,0,15,31,14,30,13,29,0,0,15,31,14,30]
1881 ; AVX512BW-SLOW-NEXT: # zmm11 = mem[0,1,2,3,0,1,2,3]
1882 ; AVX512BW-SLOW-NEXT: vpermi2d %zmm5, %zmm4, %zmm11
1883 ; AVX512BW-SLOW-NEXT: vpunpckhdq {{.*#+}} zmm2 = zmm2[2],zmm3[2],zmm2[3],zmm3[3],zmm2[6],zmm3[6],zmm2[7],zmm3[7],zmm2[10],zmm3[10],zmm2[11],zmm3[11],zmm2[14],zmm3[14],zmm2[15],zmm3[15]
1884 ; AVX512BW-SLOW-NEXT: vshufi64x2 {{.*#+}} zmm11 {%k1} = zmm2[6,7,6,7,6,7,6,7]
1885 ; AVX512BW-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm2 = <0,1,29,u,4,5,6,7,30,u,10,11,12,13,31,u>
1886 ; AVX512BW-SLOW-NEXT: vpermi2d %zmm1, %zmm11, %zmm2
1887 ; AVX512BW-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm1 = [0,1,2,29,4,5,6,7,8,30,10,11,12,13,14,31]
1888 ; AVX512BW-SLOW-NEXT: vpermi2d %zmm0, %zmm2, %zmm1
1889 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm10, (%rax)
1890 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm9, 192(%rax)
1891 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm1, 320(%rax)
1892 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm8, 256(%rax)
1893 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm7, 128(%rax)
1894 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm6, 64(%rax)
1895 ; AVX512BW-SLOW-NEXT: vzeroupper
1896 ; AVX512BW-SLOW-NEXT: retq
1898 ; AVX512BW-FAST-LABEL: store_i32_stride6_vf16:
1899 ; AVX512BW-FAST: # %bb.0:
1900 ; AVX512BW-FAST-NEXT: movq {{[0-9]+}}(%rsp), %rax
1901 ; AVX512BW-FAST-NEXT: vmovdqa64 (%rdi), %zmm3
1902 ; AVX512BW-FAST-NEXT: vmovdqa64 (%rsi), %zmm5
1903 ; AVX512BW-FAST-NEXT: vmovdqa64 (%rdx), %zmm2
1904 ; AVX512BW-FAST-NEXT: vmovdqa64 (%rcx), %zmm4
1905 ; AVX512BW-FAST-NEXT: vmovdqa64 (%r8), %zmm1
1906 ; AVX512BW-FAST-NEXT: vmovdqa64 (%r9), %zmm0
1907 ; AVX512BW-FAST-NEXT: vbroadcasti64x4 {{.*#+}} zmm6 = [1,17,0,16,0,0,2,18,1,17,0,16,0,0,2,18]
1908 ; AVX512BW-FAST-NEXT: # zmm6 = mem[0,1,2,3,0,1,2,3]
1909 ; AVX512BW-FAST-NEXT: vpermi2d %zmm4, %zmm2, %zmm6
1910 ; AVX512BW-FAST-NEXT: vbroadcasti64x4 {{.*#+}} zmm7 = [0,16,0,0,2,18,1,17,0,16,0,0,2,18,1,17]
1911 ; AVX512BW-FAST-NEXT: # zmm7 = mem[0,1,2,3,0,1,2,3]
1912 ; AVX512BW-FAST-NEXT: vpermi2d %zmm5, %zmm3, %zmm7
1913 ; AVX512BW-FAST-NEXT: movb $-110, %cl
1914 ; AVX512BW-FAST-NEXT: kmovd %ecx, %k2
1915 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm6, %zmm7 {%k2}
1916 ; AVX512BW-FAST-NEXT: vmovdqa64 {{.*#+}} zmm8 = <0,1,2,3,16,u,6,7,8,9,17,u,12,13,14,15>
1917 ; AVX512BW-FAST-NEXT: vpermi2d %zmm1, %zmm7, %zmm8
1918 ; AVX512BW-FAST-NEXT: vmovdqa64 {{.*#+}} zmm6 = [0,1,2,3,4,16,6,7,8,9,10,17,12,13,14,15]
1919 ; AVX512BW-FAST-NEXT: vpermi2d %zmm0, %zmm8, %zmm6
1920 ; AVX512BW-FAST-NEXT: vmovdqa64 {{.*#+}} zmm7 = <u,u,0,16,3,19,u,u,7,23,4,20,u,u,u,u>
1921 ; AVX512BW-FAST-NEXT: vpermi2d %zmm4, %zmm2, %zmm7
1922 ; AVX512BW-FAST-NEXT: vbroadcasti64x4 {{.*#+}} zmm8 = [4,20,3,19,0,0,5,21,4,20,3,19,0,0,5,21]
1923 ; AVX512BW-FAST-NEXT: # zmm8 = mem[0,1,2,3,0,1,2,3]
1924 ; AVX512BW-FAST-NEXT: vpermi2d %zmm5, %zmm3, %zmm8
1925 ; AVX512BW-FAST-NEXT: movb $36, %cl
1926 ; AVX512BW-FAST-NEXT: kmovd %ecx, %k1
1927 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm7, %zmm8 {%k1}
1928 ; AVX512BW-FAST-NEXT: vmovdqa64 {{.*#+}} zmm7 = <18,u,2,3,4,5,19,u,8,9,10,11,20,u,14,15>
1929 ; AVX512BW-FAST-NEXT: vpermi2d %zmm1, %zmm8, %zmm7
1930 ; AVX512BW-FAST-NEXT: vmovdqa64 {{.*#+}} zmm8 = [0,18,2,3,4,5,6,19,8,9,10,11,12,20,14,15]
1931 ; AVX512BW-FAST-NEXT: vpermi2d %zmm0, %zmm7, %zmm8
1932 ; AVX512BW-FAST-NEXT: vbroadcasti32x4 {{.*#+}} zmm7 = [6,22,7,23,6,22,7,23,6,22,7,23,6,22,7,23]
1933 ; AVX512BW-FAST-NEXT: # zmm7 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
1934 ; AVX512BW-FAST-NEXT: vpermi2d %zmm5, %zmm3, %zmm7
1935 ; AVX512BW-FAST-NEXT: vbroadcasti64x4 {{.*#+}} zmm9 = [5,21,0,0,7,23,6,22,5,21,0,0,7,23,6,22]
1936 ; AVX512BW-FAST-NEXT: # zmm9 = mem[0,1,2,3,0,1,2,3]
1937 ; AVX512BW-FAST-NEXT: vpermi2d %zmm4, %zmm2, %zmm9
1938 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm7, %zmm9 {%k1}
1939 ; AVX512BW-FAST-NEXT: vmovdqa64 {{.*#+}} zmm7 = <0,1,21,u,4,5,6,7,22,u,10,11,12,13,23,u>
1940 ; AVX512BW-FAST-NEXT: vpermi2d %zmm1, %zmm9, %zmm7
1941 ; AVX512BW-FAST-NEXT: vmovdqa64 {{.*#+}} zmm9 = [0,1,2,21,4,5,6,7,8,22,10,11,12,13,14,23]
1942 ; AVX512BW-FAST-NEXT: vpermi2d %zmm0, %zmm7, %zmm9
1943 ; AVX512BW-FAST-NEXT: vbroadcasti64x4 {{.*#+}} zmm7 = [9,25,8,24,0,0,10,26,9,25,8,24,0,0,10,26]
1944 ; AVX512BW-FAST-NEXT: # zmm7 = mem[0,1,2,3,0,1,2,3]
1945 ; AVX512BW-FAST-NEXT: vpermi2d %zmm4, %zmm2, %zmm7
1946 ; AVX512BW-FAST-NEXT: vbroadcasti64x4 {{.*#+}} zmm10 = [8,24,0,0,10,26,9,25,8,24,0,0,10,26,9,25]
1947 ; AVX512BW-FAST-NEXT: # zmm10 = mem[0,1,2,3,0,1,2,3]
1948 ; AVX512BW-FAST-NEXT: vpermi2d %zmm5, %zmm3, %zmm10
1949 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm7, %zmm10 {%k2}
1950 ; AVX512BW-FAST-NEXT: vmovdqa64 {{.*#+}} zmm7 = <0,1,2,3,24,u,6,7,8,9,25,u,12,13,14,15>
1951 ; AVX512BW-FAST-NEXT: vpermi2d %zmm1, %zmm10, %zmm7
1952 ; AVX512BW-FAST-NEXT: vmovdqa64 {{.*#+}} zmm10 = [0,1,2,3,4,24,6,7,8,9,10,25,12,13,14,15]
1953 ; AVX512BW-FAST-NEXT: vpermi2d %zmm0, %zmm7, %zmm10
1954 ; AVX512BW-FAST-NEXT: vmovdqa64 {{.*#+}} zmm7 = <u,u,0,16,11,27,u,u,15,31,12,28,u,u,12,28>
1955 ; AVX512BW-FAST-NEXT: vpermi2d %zmm4, %zmm2, %zmm7
1956 ; AVX512BW-FAST-NEXT: vbroadcasti64x4 {{.*#+}} zmm11 = [12,28,11,27,0,0,13,29,12,28,11,27,0,0,13,29]
1957 ; AVX512BW-FAST-NEXT: # zmm11 = mem[0,1,2,3,0,1,2,3]
1958 ; AVX512BW-FAST-NEXT: vpermi2d %zmm5, %zmm3, %zmm11
1959 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm7, %zmm11 {%k1}
1960 ; AVX512BW-FAST-NEXT: vmovdqa64 {{.*#+}} zmm7 = <26,u,2,3,4,5,27,u,8,9,10,11,28,u,14,15>
1961 ; AVX512BW-FAST-NEXT: vpermi2d %zmm1, %zmm11, %zmm7
1962 ; AVX512BW-FAST-NEXT: vmovdqa64 {{.*#+}} zmm11 = [0,26,2,3,4,5,6,27,8,9,10,11,12,28,14,15]
1963 ; AVX512BW-FAST-NEXT: vpermi2d %zmm0, %zmm7, %zmm11
1964 ; AVX512BW-FAST-NEXT: vbroadcasti32x4 {{.*#+}} zmm7 = [14,30,15,31,14,30,15,31,14,30,15,31,14,30,15,31]
1965 ; AVX512BW-FAST-NEXT: # zmm7 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
1966 ; AVX512BW-FAST-NEXT: vpermi2d %zmm5, %zmm3, %zmm7
1967 ; AVX512BW-FAST-NEXT: vbroadcasti64x4 {{.*#+}} zmm3 = [13,29,0,0,15,31,14,30,13,29,0,0,15,31,14,30]
1968 ; AVX512BW-FAST-NEXT: # zmm3 = mem[0,1,2,3,0,1,2,3]
1969 ; AVX512BW-FAST-NEXT: vpermi2d %zmm4, %zmm2, %zmm3
1970 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm7, %zmm3 {%k1}
1971 ; AVX512BW-FAST-NEXT: vmovdqa64 {{.*#+}} zmm2 = <0,1,29,u,4,5,6,7,30,u,10,11,12,13,31,u>
1972 ; AVX512BW-FAST-NEXT: vpermi2d %zmm1, %zmm3, %zmm2
1973 ; AVX512BW-FAST-NEXT: vmovdqa64 {{.*#+}} zmm1 = [0,1,2,29,4,5,6,7,8,30,10,11,12,13,14,31]
1974 ; AVX512BW-FAST-NEXT: vpermi2d %zmm0, %zmm2, %zmm1
1975 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm1, 320(%rax)
1976 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm11, 256(%rax)
1977 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm10, 192(%rax)
1978 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm9, 128(%rax)
1979 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm8, 64(%rax)
1980 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm6, (%rax)
1981 ; AVX512BW-FAST-NEXT: vzeroupper
1982 ; AVX512BW-FAST-NEXT: retq
1983 %in.vec0 = load <16 x i32>, ptr %in.vecptr0, align 64
1984 %in.vec1 = load <16 x i32>, ptr %in.vecptr1, align 64
1985 %in.vec2 = load <16 x i32>, ptr %in.vecptr2, align 64
1986 %in.vec3 = load <16 x i32>, ptr %in.vecptr3, align 64
1987 %in.vec4 = load <16 x i32>, ptr %in.vecptr4, align 64
1988 %in.vec5 = load <16 x i32>, ptr %in.vecptr5, align 64
1989 %1 = shufflevector <16 x i32> %in.vec0, <16 x i32> %in.vec1, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
1990 %2 = shufflevector <16 x i32> %in.vec2, <16 x i32> %in.vec3, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
1991 %3 = shufflevector <16 x i32> %in.vec4, <16 x i32> %in.vec5, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
1992 %4 = shufflevector <32 x i32> %1, <32 x i32> %2, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
1993 %5 = shufflevector <32 x i32> %3, <32 x i32> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
1994 %6 = shufflevector <64 x i32> %4, <64 x i32> %5, <96 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95>
1995 %interleaved.vec = shufflevector <96 x i32> %6, <96 x i32> poison, <96 x i32> <i32 0, i32 16, i32 32, i32 48, i32 64, i32 80, i32 1, i32 17, i32 33, i32 49, i32 65, i32 81, i32 2, i32 18, i32 34, i32 50, i32 66, i32 82, i32 3, i32 19, i32 35, i32 51, i32 67, i32 83, i32 4, i32 20, i32 36, i32 52, i32 68, i32 84, i32 5, i32 21, i32 37, i32 53, i32 69, i32 85, i32 6, i32 22, i32 38, i32 54, i32 70, i32 86, i32 7, i32 23, i32 39, i32 55, i32 71, i32 87, i32 8, i32 24, i32 40, i32 56, i32 72, i32 88, i32 9, i32 25, i32 41, i32 57, i32 73, i32 89, i32 10, i32 26, i32 42, i32 58, i32 74, i32 90, i32 11, i32 27, i32 43, i32 59, i32 75, i32 91, i32 12, i32 28, i32 44, i32 60, i32 76, i32 92, i32 13, i32 29, i32 45, i32 61, i32 77, i32 93, i32 14, i32 30, i32 46, i32 62, i32 78, i32 94, i32 15, i32 31, i32 47, i32 63, i32 79, i32 95>
1996 store <96 x i32> %interleaved.vec, ptr %out.vec, align 64
2000 define void @store_i32_stride6_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecptr2, ptr %in.vecptr3, ptr %in.vecptr4, ptr %in.vecptr5, ptr %out.vec) nounwind {
2001 ; SSE-LABEL: store_i32_stride6_vf32:
2003 ; SSE-NEXT: subq $456, %rsp # imm = 0x1C8
2004 ; SSE-NEXT: movaps (%rdi), %xmm9
2005 ; SSE-NEXT: movaps 16(%rdi), %xmm10
2006 ; SSE-NEXT: movaps (%rsi), %xmm4
2007 ; SSE-NEXT: movaps 16(%rsi), %xmm0
2008 ; SSE-NEXT: movaps (%rdx), %xmm11
2009 ; SSE-NEXT: movaps 16(%rdx), %xmm12
2010 ; SSE-NEXT: movaps (%rcx), %xmm5
2011 ; SSE-NEXT: movaps 16(%rcx), %xmm1
2012 ; SSE-NEXT: movaps (%r8), %xmm6
2013 ; SSE-NEXT: movaps 16(%r8), %xmm2
2014 ; SSE-NEXT: movaps (%r9), %xmm7
2015 ; SSE-NEXT: movaps 16(%r9), %xmm3
2016 ; SSE-NEXT: movaps %xmm11, %xmm13
2017 ; SSE-NEXT: unpcklps {{.*#+}} xmm13 = xmm13[0],xmm5[0],xmm13[1],xmm5[1]
2018 ; SSE-NEXT: movaps %xmm9, %xmm8
2019 ; SSE-NEXT: unpcklps {{.*#+}} xmm8 = xmm8[0],xmm4[0],xmm8[1],xmm4[1]
2020 ; SSE-NEXT: movaps %xmm7, %xmm14
2021 ; SSE-NEXT: movlhps {{.*#+}} xmm14 = xmm14[0],xmm6[0]
2022 ; SSE-NEXT: shufps {{.*#+}} xmm14 = xmm14[2,0],xmm8[2,3]
2023 ; SSE-NEXT: movaps %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2024 ; SSE-NEXT: movlhps {{.*#+}} xmm8 = xmm8[0],xmm13[0]
2025 ; SSE-NEXT: movaps %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2026 ; SSE-NEXT: movaps %xmm6, %xmm8
2027 ; SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[1,1],xmm7[1,1]
2028 ; SSE-NEXT: shufps {{.*#+}} xmm13 = xmm13[2,3],xmm8[0,2]
2029 ; SSE-NEXT: movaps %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2030 ; SSE-NEXT: movaps %xmm5, %xmm8
2031 ; SSE-NEXT: unpckhpd {{.*#+}} xmm8 = xmm8[1],xmm11[1]
2032 ; SSE-NEXT: unpckhps {{.*#+}} xmm9 = xmm9[2],xmm4[2],xmm9[3],xmm4[3]
2033 ; SSE-NEXT: movaps %xmm6, %xmm4
2034 ; SSE-NEXT: unpckhpd {{.*#+}} xmm4 = xmm4[1],xmm7[1]
2035 ; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,2],xmm9[2,3]
2036 ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2037 ; SSE-NEXT: shufps {{.*#+}} xmm9 = xmm9[0,1],xmm8[2,0]
2038 ; SSE-NEXT: movaps %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2039 ; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[3,3],xmm7[3,3]
2040 ; SSE-NEXT: unpckhps {{.*#+}} xmm11 = xmm11[2],xmm5[2],xmm11[3],xmm5[3]
2041 ; SSE-NEXT: shufps {{.*#+}} xmm11 = xmm11[2,3],xmm6[0,2]
2042 ; SSE-NEXT: movaps %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2043 ; SSE-NEXT: movaps %xmm12, %xmm5
2044 ; SSE-NEXT: unpcklps {{.*#+}} xmm5 = xmm5[0],xmm1[0],xmm5[1],xmm1[1]
2045 ; SSE-NEXT: movaps %xmm10, %xmm4
2046 ; SSE-NEXT: unpcklps {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1]
2047 ; SSE-NEXT: movaps %xmm3, %xmm6
2048 ; SSE-NEXT: movlhps {{.*#+}} xmm6 = xmm6[0],xmm2[0]
2049 ; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[2,0],xmm4[2,3]
2050 ; SSE-NEXT: movaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2051 ; SSE-NEXT: movlhps {{.*#+}} xmm4 = xmm4[0],xmm5[0]
2052 ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2053 ; SSE-NEXT: movaps %xmm2, %xmm4
2054 ; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[1,1],xmm3[1,1]
2055 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[2,3],xmm4[0,2]
2056 ; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2057 ; SSE-NEXT: movaps %xmm1, %xmm4
2058 ; SSE-NEXT: unpckhpd {{.*#+}} xmm4 = xmm4[1],xmm12[1]
2059 ; SSE-NEXT: unpckhps {{.*#+}} xmm10 = xmm10[2],xmm0[2],xmm10[3],xmm0[3]
2060 ; SSE-NEXT: movaps %xmm2, %xmm0
2061 ; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm3[1]
2062 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm10[2,3]
2063 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2064 ; SSE-NEXT: shufps {{.*#+}} xmm10 = xmm10[0,1],xmm4[2,0]
2065 ; SSE-NEXT: movaps %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2066 ; SSE-NEXT: movaps 32(%rdi), %xmm5
2067 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,3],xmm3[3,3]
2068 ; SSE-NEXT: movaps 32(%rdx), %xmm6
2069 ; SSE-NEXT: unpckhps {{.*#+}} xmm12 = xmm12[2],xmm1[2],xmm12[3],xmm1[3]
2070 ; SSE-NEXT: movaps 32(%rcx), %xmm0
2071 ; SSE-NEXT: shufps {{.*#+}} xmm12 = xmm12[2,3],xmm2[0,2]
2072 ; SSE-NEXT: movaps %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2073 ; SSE-NEXT: movaps %xmm6, %xmm7
2074 ; SSE-NEXT: unpcklps {{.*#+}} xmm7 = xmm7[0],xmm0[0],xmm7[1],xmm0[1]
2075 ; SSE-NEXT: movaps 32(%rsi), %xmm1
2076 ; SSE-NEXT: movaps %xmm5, %xmm4
2077 ; SSE-NEXT: unpcklps {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
2078 ; SSE-NEXT: movaps 32(%r8), %xmm2
2079 ; SSE-NEXT: movaps 32(%r9), %xmm3
2080 ; SSE-NEXT: movaps %xmm3, %xmm8
2081 ; SSE-NEXT: movlhps {{.*#+}} xmm8 = xmm8[0],xmm2[0]
2082 ; SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[2,0],xmm4[2,3]
2083 ; SSE-NEXT: movaps %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2084 ; SSE-NEXT: movlhps {{.*#+}} xmm4 = xmm4[0],xmm7[0]
2085 ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2086 ; SSE-NEXT: movaps %xmm2, %xmm4
2087 ; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[1,1],xmm3[1,1]
2088 ; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[2,3],xmm4[0,2]
2089 ; SSE-NEXT: movaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2090 ; SSE-NEXT: unpckhps {{.*#+}} xmm5 = xmm5[2],xmm1[2],xmm5[3],xmm1[3]
2091 ; SSE-NEXT: movaps %xmm0, %xmm1
2092 ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm6[1]
2093 ; SSE-NEXT: movaps %xmm2, %xmm4
2094 ; SSE-NEXT: unpckhpd {{.*#+}} xmm4 = xmm4[1],xmm3[1]
2095 ; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,2],xmm5[2,3]
2096 ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2097 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,1],xmm1[2,0]
2098 ; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2099 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,3],xmm3[3,3]
2100 ; SSE-NEXT: unpckhps {{.*#+}} xmm6 = xmm6[2],xmm0[2],xmm6[3],xmm0[3]
2101 ; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[2,3],xmm2[0,2]
2102 ; SSE-NEXT: movaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2103 ; SSE-NEXT: movaps 48(%rdx), %xmm6
2104 ; SSE-NEXT: movaps 48(%rcx), %xmm0
2105 ; SSE-NEXT: movaps %xmm6, %xmm5
2106 ; SSE-NEXT: unpcklps {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1]
2107 ; SSE-NEXT: movaps 48(%rdi), %xmm7
2108 ; SSE-NEXT: movaps 48(%rsi), %xmm1
2109 ; SSE-NEXT: movaps %xmm7, %xmm4
2110 ; SSE-NEXT: unpcklps {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
2111 ; SSE-NEXT: movaps 48(%r8), %xmm2
2112 ; SSE-NEXT: movaps 48(%r9), %xmm3
2113 ; SSE-NEXT: movaps %xmm3, %xmm8
2114 ; SSE-NEXT: movlhps {{.*#+}} xmm8 = xmm8[0],xmm2[0]
2115 ; SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[2,0],xmm4[2,3]
2116 ; SSE-NEXT: movaps %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2117 ; SSE-NEXT: movlhps {{.*#+}} xmm4 = xmm4[0],xmm5[0]
2118 ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2119 ; SSE-NEXT: movaps %xmm2, %xmm4
2120 ; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[1,1],xmm3[1,1]
2121 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[2,3],xmm4[0,2]
2122 ; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2123 ; SSE-NEXT: unpckhps {{.*#+}} xmm7 = xmm7[2],xmm1[2],xmm7[3],xmm1[3]
2124 ; SSE-NEXT: movaps %xmm0, %xmm1
2125 ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm6[1]
2126 ; SSE-NEXT: movaps %xmm2, %xmm4
2127 ; SSE-NEXT: unpckhpd {{.*#+}} xmm4 = xmm4[1],xmm3[1]
2128 ; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,2],xmm7[2,3]
2129 ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2130 ; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,1],xmm1[2,0]
2131 ; SSE-NEXT: movaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2132 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,3],xmm3[3,3]
2133 ; SSE-NEXT: unpckhps {{.*#+}} xmm6 = xmm6[2],xmm0[2],xmm6[3],xmm0[3]
2134 ; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[2,3],xmm2[0,2]
2135 ; SSE-NEXT: movaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2136 ; SSE-NEXT: movaps 64(%rdx), %xmm6
2137 ; SSE-NEXT: movaps 64(%rcx), %xmm0
2138 ; SSE-NEXT: movaps %xmm6, %xmm5
2139 ; SSE-NEXT: unpcklps {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1]
2140 ; SSE-NEXT: movaps 64(%rdi), %xmm7
2141 ; SSE-NEXT: movaps 64(%rsi), %xmm1
2142 ; SSE-NEXT: movaps %xmm7, %xmm4
2143 ; SSE-NEXT: unpcklps {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
2144 ; SSE-NEXT: movaps 64(%r8), %xmm2
2145 ; SSE-NEXT: movaps 64(%r9), %xmm3
2146 ; SSE-NEXT: movaps %xmm3, %xmm8
2147 ; SSE-NEXT: movlhps {{.*#+}} xmm8 = xmm8[0],xmm2[0]
2148 ; SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[2,0],xmm4[2,3]
2149 ; SSE-NEXT: movaps %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2150 ; SSE-NEXT: movlhps {{.*#+}} xmm4 = xmm4[0],xmm5[0]
2151 ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2152 ; SSE-NEXT: movaps %xmm2, %xmm4
2153 ; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[1,1],xmm3[1,1]
2154 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[2,3],xmm4[0,2]
2155 ; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2156 ; SSE-NEXT: unpckhps {{.*#+}} xmm7 = xmm7[2],xmm1[2],xmm7[3],xmm1[3]
2157 ; SSE-NEXT: movaps %xmm0, %xmm1
2158 ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm6[1]
2159 ; SSE-NEXT: movaps %xmm2, %xmm4
2160 ; SSE-NEXT: unpckhpd {{.*#+}} xmm4 = xmm4[1],xmm3[1]
2161 ; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,2],xmm7[2,3]
2162 ; SSE-NEXT: movaps %xmm4, (%rsp) # 16-byte Spill
2163 ; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,1],xmm1[2,0]
2164 ; SSE-NEXT: movaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2165 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,3],xmm3[3,3]
2166 ; SSE-NEXT: unpckhps {{.*#+}} xmm6 = xmm6[2],xmm0[2],xmm6[3],xmm0[3]
2167 ; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[2,3],xmm2[0,2]
2168 ; SSE-NEXT: movaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2169 ; SSE-NEXT: movaps 80(%rdx), %xmm6
2170 ; SSE-NEXT: movaps 80(%rcx), %xmm0
2171 ; SSE-NEXT: movaps %xmm6, %xmm5
2172 ; SSE-NEXT: unpcklps {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1]
2173 ; SSE-NEXT: movaps 80(%rdi), %xmm7
2174 ; SSE-NEXT: movaps 80(%rsi), %xmm1
2175 ; SSE-NEXT: movaps %xmm7, %xmm4
2176 ; SSE-NEXT: unpcklps {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
2177 ; SSE-NEXT: movaps 80(%r8), %xmm2
2178 ; SSE-NEXT: movaps 80(%r9), %xmm3
2179 ; SSE-NEXT: movaps %xmm3, %xmm8
2180 ; SSE-NEXT: movlhps {{.*#+}} xmm8 = xmm8[0],xmm2[0]
2181 ; SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[2,0],xmm4[2,3]
2182 ; SSE-NEXT: movaps %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2183 ; SSE-NEXT: movlhps {{.*#+}} xmm4 = xmm4[0],xmm5[0]
2184 ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2185 ; SSE-NEXT: movaps %xmm2, %xmm4
2186 ; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[1,1],xmm3[1,1]
2187 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[2,3],xmm4[0,2]
2188 ; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2189 ; SSE-NEXT: unpckhps {{.*#+}} xmm7 = xmm7[2],xmm1[2],xmm7[3],xmm1[3]
2190 ; SSE-NEXT: movaps %xmm0, %xmm1
2191 ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm6[1]
2192 ; SSE-NEXT: movaps %xmm2, %xmm4
2193 ; SSE-NEXT: unpckhpd {{.*#+}} xmm4 = xmm4[1],xmm3[1]
2194 ; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,2],xmm7[2,3]
2195 ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2196 ; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,1],xmm1[2,0]
2197 ; SSE-NEXT: movaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2198 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,3],xmm3[3,3]
2199 ; SSE-NEXT: unpckhps {{.*#+}} xmm6 = xmm6[2],xmm0[2],xmm6[3],xmm0[3]
2200 ; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[2,3],xmm2[0,2]
2201 ; SSE-NEXT: movaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2202 ; SSE-NEXT: movaps 96(%rdx), %xmm9
2203 ; SSE-NEXT: movaps 96(%rcx), %xmm0
2204 ; SSE-NEXT: movaps %xmm9, %xmm14
2205 ; SSE-NEXT: unpcklps {{.*#+}} xmm14 = xmm14[0],xmm0[0],xmm14[1],xmm0[1]
2206 ; SSE-NEXT: movaps 96(%rdi), %xmm11
2207 ; SSE-NEXT: movaps 96(%rsi), %xmm1
2208 ; SSE-NEXT: movaps %xmm11, %xmm13
2209 ; SSE-NEXT: unpcklps {{.*#+}} xmm13 = xmm13[0],xmm1[0],xmm13[1],xmm1[1]
2210 ; SSE-NEXT: movaps 96(%r8), %xmm2
2211 ; SSE-NEXT: movaps 96(%r9), %xmm3
2212 ; SSE-NEXT: movaps %xmm3, %xmm15
2213 ; SSE-NEXT: movlhps {{.*#+}} xmm15 = xmm15[0],xmm2[0]
2214 ; SSE-NEXT: shufps {{.*#+}} xmm15 = xmm15[2,0],xmm13[2,3]
2215 ; SSE-NEXT: movlhps {{.*#+}} xmm13 = xmm13[0],xmm14[0]
2216 ; SSE-NEXT: movaps %xmm2, %xmm4
2217 ; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[1,1],xmm3[1,1]
2218 ; SSE-NEXT: shufps {{.*#+}} xmm14 = xmm14[2,3],xmm4[0,2]
2219 ; SSE-NEXT: unpckhps {{.*#+}} xmm11 = xmm11[2],xmm1[2],xmm11[3],xmm1[3]
2220 ; SSE-NEXT: movaps %xmm0, %xmm1
2221 ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm9[1]
2222 ; SSE-NEXT: movaps %xmm2, %xmm8
2223 ; SSE-NEXT: unpckhpd {{.*#+}} xmm8 = xmm8[1],xmm3[1]
2224 ; SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[0,2],xmm11[2,3]
2225 ; SSE-NEXT: shufps {{.*#+}} xmm11 = xmm11[0,1],xmm1[2,0]
2226 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,3],xmm3[3,3]
2227 ; SSE-NEXT: unpckhps {{.*#+}} xmm9 = xmm9[2],xmm0[2],xmm9[3],xmm0[3]
2228 ; SSE-NEXT: shufps {{.*#+}} xmm9 = xmm9[2,3],xmm2[0,2]
2229 ; SSE-NEXT: movaps 112(%rdx), %xmm3
2230 ; SSE-NEXT: movaps 112(%rcx), %xmm12
2231 ; SSE-NEXT: movaps %xmm3, %xmm5
2232 ; SSE-NEXT: unpcklps {{.*#+}} xmm5 = xmm5[0],xmm12[0],xmm5[1],xmm12[1]
2233 ; SSE-NEXT: movaps 112(%rdi), %xmm2
2234 ; SSE-NEXT: movaps 112(%rsi), %xmm10
2235 ; SSE-NEXT: movaps %xmm2, %xmm4
2236 ; SSE-NEXT: unpcklps {{.*#+}} xmm4 = xmm4[0],xmm10[0],xmm4[1],xmm10[1]
2237 ; SSE-NEXT: movaps 112(%r8), %xmm1
2238 ; SSE-NEXT: movaps 112(%r9), %xmm7
2239 ; SSE-NEXT: movaps %xmm7, %xmm6
2240 ; SSE-NEXT: movlhps {{.*#+}} xmm6 = xmm6[0],xmm1[0]
2241 ; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[2,0],xmm4[2,3]
2242 ; SSE-NEXT: movlhps {{.*#+}} xmm4 = xmm4[0],xmm5[0]
2243 ; SSE-NEXT: movaps %xmm1, %xmm0
2244 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm7[1,1]
2245 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[2,3],xmm0[0,2]
2246 ; SSE-NEXT: unpckhps {{.*#+}} xmm2 = xmm2[2],xmm10[2],xmm2[3],xmm10[3]
2247 ; SSE-NEXT: movaps %xmm12, %xmm0
2248 ; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm3[1]
2249 ; SSE-NEXT: movaps %xmm1, %xmm10
2250 ; SSE-NEXT: unpckhpd {{.*#+}} xmm10 = xmm10[1],xmm7[1]
2251 ; SSE-NEXT: shufps {{.*#+}} xmm10 = xmm10[0,2],xmm2[2,3]
2252 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1],xmm0[2,0]
2253 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,3],xmm7[3,3]
2254 ; SSE-NEXT: unpckhps {{.*#+}} xmm3 = xmm3[2],xmm12[2],xmm3[3],xmm12[3]
2255 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[2,3],xmm1[0,2]
2256 ; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax
2257 ; SSE-NEXT: movaps %xmm3, 752(%rax)
2258 ; SSE-NEXT: movaps %xmm10, 736(%rax)
2259 ; SSE-NEXT: movaps %xmm2, 720(%rax)
2260 ; SSE-NEXT: movaps %xmm5, 704(%rax)
2261 ; SSE-NEXT: movaps %xmm6, 688(%rax)
2262 ; SSE-NEXT: movaps %xmm4, 672(%rax)
2263 ; SSE-NEXT: movaps %xmm9, 656(%rax)
2264 ; SSE-NEXT: movaps %xmm8, 640(%rax)
2265 ; SSE-NEXT: movaps %xmm11, 624(%rax)
2266 ; SSE-NEXT: movaps %xmm14, 608(%rax)
2267 ; SSE-NEXT: movaps %xmm15, 592(%rax)
2268 ; SSE-NEXT: movaps %xmm13, 576(%rax)
2269 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2270 ; SSE-NEXT: movaps %xmm0, 560(%rax)
2271 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2272 ; SSE-NEXT: movaps %xmm0, 544(%rax)
2273 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2274 ; SSE-NEXT: movaps %xmm0, 528(%rax)
2275 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2276 ; SSE-NEXT: movaps %xmm0, 512(%rax)
2277 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2278 ; SSE-NEXT: movaps %xmm0, 496(%rax)
2279 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2280 ; SSE-NEXT: movaps %xmm0, 480(%rax)
2281 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2282 ; SSE-NEXT: movaps %xmm0, 464(%rax)
2283 ; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
2284 ; SSE-NEXT: movaps %xmm0, 448(%rax)
2285 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2286 ; SSE-NEXT: movaps %xmm0, 432(%rax)
2287 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2288 ; SSE-NEXT: movaps %xmm0, 416(%rax)
2289 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2290 ; SSE-NEXT: movaps %xmm0, 400(%rax)
2291 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2292 ; SSE-NEXT: movaps %xmm0, 384(%rax)
2293 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2294 ; SSE-NEXT: movaps %xmm0, 368(%rax)
2295 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2296 ; SSE-NEXT: movaps %xmm0, 352(%rax)
2297 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2298 ; SSE-NEXT: movaps %xmm0, 336(%rax)
2299 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2300 ; SSE-NEXT: movaps %xmm0, 320(%rax)
2301 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2302 ; SSE-NEXT: movaps %xmm0, 304(%rax)
2303 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2304 ; SSE-NEXT: movaps %xmm0, 288(%rax)
2305 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2306 ; SSE-NEXT: movaps %xmm0, 272(%rax)
2307 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2308 ; SSE-NEXT: movaps %xmm0, 256(%rax)
2309 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2310 ; SSE-NEXT: movaps %xmm0, 240(%rax)
2311 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2312 ; SSE-NEXT: movaps %xmm0, 224(%rax)
2313 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2314 ; SSE-NEXT: movaps %xmm0, 208(%rax)
2315 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2316 ; SSE-NEXT: movaps %xmm0, 192(%rax)
2317 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2318 ; SSE-NEXT: movaps %xmm0, 176(%rax)
2319 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2320 ; SSE-NEXT: movaps %xmm0, 160(%rax)
2321 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2322 ; SSE-NEXT: movaps %xmm0, 144(%rax)
2323 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2324 ; SSE-NEXT: movaps %xmm0, 128(%rax)
2325 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2326 ; SSE-NEXT: movaps %xmm0, 112(%rax)
2327 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2328 ; SSE-NEXT: movaps %xmm0, 96(%rax)
2329 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2330 ; SSE-NEXT: movaps %xmm0, 80(%rax)
2331 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2332 ; SSE-NEXT: movaps %xmm0, 64(%rax)
2333 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2334 ; SSE-NEXT: movaps %xmm0, 48(%rax)
2335 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2336 ; SSE-NEXT: movaps %xmm0, 32(%rax)
2337 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2338 ; SSE-NEXT: movaps %xmm0, 16(%rax)
2339 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2340 ; SSE-NEXT: movaps %xmm0, (%rax)
2341 ; SSE-NEXT: addq $456, %rsp # imm = 0x1C8
2344 ; AVX1-ONLY-LABEL: store_i32_stride6_vf32:
2345 ; AVX1-ONLY: # %bb.0:
2346 ; AVX1-ONLY-NEXT: subq $1032, %rsp # imm = 0x408
2347 ; AVX1-ONLY-NEXT: vmovaps (%rdi), %ymm12
2348 ; AVX1-ONLY-NEXT: vmovaps (%rsi), %ymm8
2349 ; AVX1-ONLY-NEXT: vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2350 ; AVX1-ONLY-NEXT: vmovaps (%rdx), %ymm4
2351 ; AVX1-ONLY-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2352 ; AVX1-ONLY-NEXT: vmovaps (%rcx), %ymm6
2353 ; AVX1-ONLY-NEXT: vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2354 ; AVX1-ONLY-NEXT: vmovaps (%r8), %ymm5
2355 ; AVX1-ONLY-NEXT: vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2356 ; AVX1-ONLY-NEXT: vmovaps (%rcx), %xmm1
2357 ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2358 ; AVX1-ONLY-NEXT: vmovaps 32(%rcx), %xmm2
2359 ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2360 ; AVX1-ONLY-NEXT: vmovaps (%rdx), %xmm0
2361 ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2362 ; AVX1-ONLY-NEXT: vmovaps 32(%rdx), %xmm3
2363 ; AVX1-ONLY-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2364 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,2],xmm1[1,2]
2365 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2,1,3]
2366 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
2367 ; AVX1-ONLY-NEXT: vmovaps (%rsi), %xmm1
2368 ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2369 ; AVX1-ONLY-NEXT: vmovaps (%rdi), %xmm7
2370 ; AVX1-ONLY-NEXT: vmovaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2371 ; AVX1-ONLY-NEXT: vunpckhps {{.*#+}} xmm1 = xmm7[2],xmm1[2],xmm7[3],xmm1[3]
2372 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2373 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
2374 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
2375 ; AVX1-ONLY-NEXT: vbroadcastss 4(%r8), %xmm1
2376 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
2377 ; AVX1-ONLY-NEXT: vbroadcastss 4(%r9), %ymm1
2378 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3],ymm0[4,5,6,7]
2379 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2380 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} ymm0 = ymm12[0],ymm8[0],ymm12[1],ymm8[1],ymm12[4],ymm8[4],ymm12[5],ymm8[5]
2381 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
2382 ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm6[0],ymm4[0],ymm6[2],ymm4[2]
2383 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,1,2,0,4,5,6,4]
2384 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm1, %xmm1
2385 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
2386 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm5[4,5],ymm0[6,7]
2387 ; AVX1-ONLY-NEXT: vbroadcastss 16(%r9), %ymm1
2388 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
2389 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2390 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm3[1,2],xmm2[1,2]
2391 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2,1,3]
2392 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
2393 ; AVX1-ONLY-NEXT: vmovaps 32(%rsi), %xmm1
2394 ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2395 ; AVX1-ONLY-NEXT: vmovaps 32(%rdi), %xmm2
2396 ; AVX1-ONLY-NEXT: vmovaps %xmm2, (%rsp) # 16-byte Spill
2397 ; AVX1-ONLY-NEXT: vunpckhps {{.*#+}} xmm1 = xmm2[2],xmm1[2],xmm2[3],xmm1[3]
2398 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2399 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
2400 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
2401 ; AVX1-ONLY-NEXT: vbroadcastss 36(%r8), %xmm1
2402 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
2403 ; AVX1-ONLY-NEXT: vbroadcastss 36(%r9), %ymm1
2404 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3],ymm0[4,5,6,7]
2405 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2406 ; AVX1-ONLY-NEXT: vmovaps 32(%rdi), %ymm1
2407 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2408 ; AVX1-ONLY-NEXT: vmovaps 32(%rsi), %ymm0
2409 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2410 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[4],ymm0[4],ymm1[5],ymm0[5]
2411 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
2412 ; AVX1-ONLY-NEXT: vmovaps 32(%rdx), %ymm8
2413 ; AVX1-ONLY-NEXT: vmovaps 32(%rcx), %ymm13
2414 ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm13[0],ymm8[0],ymm13[2],ymm8[2]
2415 ; AVX1-ONLY-NEXT: vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2416 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,1,2,0,4,5,6,4]
2417 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm1, %xmm1
2418 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
2419 ; AVX1-ONLY-NEXT: vmovaps 32(%r8), %ymm1
2420 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2421 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
2422 ; AVX1-ONLY-NEXT: vbroadcastss 48(%r9), %ymm1
2423 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
2424 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2425 ; AVX1-ONLY-NEXT: vmovaps 64(%rcx), %xmm1
2426 ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2427 ; AVX1-ONLY-NEXT: vmovaps 64(%rdx), %xmm0
2428 ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2429 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,2],xmm1[1,2]
2430 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2,1,3]
2431 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
2432 ; AVX1-ONLY-NEXT: vmovaps 64(%rsi), %xmm1
2433 ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2434 ; AVX1-ONLY-NEXT: vmovaps 64(%rdi), %xmm2
2435 ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2436 ; AVX1-ONLY-NEXT: vunpckhps {{.*#+}} xmm1 = xmm2[2],xmm1[2],xmm2[3],xmm1[3]
2437 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2438 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
2439 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
2440 ; AVX1-ONLY-NEXT: vbroadcastss 68(%r8), %xmm1
2441 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
2442 ; AVX1-ONLY-NEXT: vbroadcastss 68(%r9), %ymm1
2443 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3],ymm0[4,5,6,7]
2444 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2445 ; AVX1-ONLY-NEXT: vmovaps 64(%rdi), %ymm6
2446 ; AVX1-ONLY-NEXT: vmovaps 64(%rsi), %ymm14
2447 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} ymm0 = ymm6[0],ymm14[0],ymm6[1],ymm14[1],ymm6[4],ymm14[4],ymm6[5],ymm14[5]
2448 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
2449 ; AVX1-ONLY-NEXT: vmovaps 64(%rdx), %ymm2
2450 ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2451 ; AVX1-ONLY-NEXT: vmovaps 64(%rcx), %ymm1
2452 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2453 ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[2],ymm2[2]
2454 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,1,2,0,4,5,6,4]
2455 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm1, %xmm1
2456 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
2457 ; AVX1-ONLY-NEXT: vmovaps 64(%r8), %ymm1
2458 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2459 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
2460 ; AVX1-ONLY-NEXT: vbroadcastss 80(%r9), %ymm1
2461 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
2462 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2463 ; AVX1-ONLY-NEXT: vmovaps 96(%rcx), %xmm9
2464 ; AVX1-ONLY-NEXT: vmovaps 96(%rdx), %xmm11
2465 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm11[1,2],xmm9[1,2]
2466 ; AVX1-ONLY-NEXT: vmovaps %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2467 ; AVX1-ONLY-NEXT: vmovaps %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2468 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2,1,3]
2469 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm2
2470 ; AVX1-ONLY-NEXT: vmovaps 96(%rsi), %xmm5
2471 ; AVX1-ONLY-NEXT: vmovaps 96(%rdi), %xmm4
2472 ; AVX1-ONLY-NEXT: vunpckhps {{.*#+}} xmm0 = xmm4[2],xmm5[2],xmm4[3],xmm5[3]
2473 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2474 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm3
2475 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5],ymm2[6,7]
2476 ; AVX1-ONLY-NEXT: vbroadcastss 100(%r8), %xmm3
2477 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm3[2,3],ymm2[4,5,6,7]
2478 ; AVX1-ONLY-NEXT: vbroadcastss 100(%r9), %ymm3
2479 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2],ymm3[3],ymm2[4,5,6,7]
2480 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2481 ; AVX1-ONLY-NEXT: vmovaps 96(%rdi), %ymm7
2482 ; AVX1-ONLY-NEXT: vmovaps 96(%rsi), %ymm3
2483 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} ymm10 = ymm7[0],ymm3[0],ymm7[1],ymm3[1],ymm7[4],ymm3[4],ymm7[5],ymm3[5]
2484 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm10[2,3,2,3]
2485 ; AVX1-ONLY-NEXT: vmovaps 96(%rdx), %ymm10
2486 ; AVX1-ONLY-NEXT: vmovaps 96(%rcx), %ymm2
2487 ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm15 = ymm2[0],ymm10[0],ymm2[2],ymm10[2]
2488 ; AVX1-ONLY-NEXT: vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2489 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm15 = ymm15[0,1,2,0,4,5,6,4]
2490 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm15, %xmm15
2491 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm15[2,3],ymm1[4,5,6,7]
2492 ; AVX1-ONLY-NEXT: vmovaps 96(%r8), %ymm1
2493 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2494 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
2495 ; AVX1-ONLY-NEXT: vbroadcastss 112(%r9), %ymm15
2496 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm15[5],ymm0[6,7]
2497 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2498 ; AVX1-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm1 # 32-byte Folded Reload
2499 ; AVX1-ONLY-NEXT: # ymm1 = ymm12[2],mem[2],ymm12[3],mem[3],ymm12[6],mem[6],ymm12[7],mem[7]
2500 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2501 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
2502 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2503 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[1,2],ymm12[1,2],ymm0[5,6],ymm12[5,6]
2504 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
2505 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2,1,3,4,6,5,7]
2506 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
2507 ; AVX1-ONLY-NEXT: vbroadcastss 20(%r8), %xmm15
2508 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm15[2,3],ymm0[4,5,6,7]
2509 ; AVX1-ONLY-NEXT: vbroadcastss 20(%r9), %ymm15
2510 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm15[3],ymm0[4,5,6,7]
2511 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2512 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2513 ; AVX1-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm15 # 32-byte Folded Reload
2514 ; AVX1-ONLY-NEXT: # ymm15 = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7]
2515 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm8[1,2],ymm13[1,2],ymm8[5,6],ymm13[5,6]
2516 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
2517 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2,1,3,4,6,5,7]
2518 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm15[4,5],ymm0[6,7]
2519 ; AVX1-ONLY-NEXT: vbroadcastss 52(%r8), %xmm8
2520 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm8[2,3],ymm0[4,5,6,7]
2521 ; AVX1-ONLY-NEXT: vbroadcastss 52(%r9), %ymm8
2522 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm8[3],ymm0[4,5,6,7]
2523 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2524 ; AVX1-ONLY-NEXT: vunpckhps {{.*#+}} ymm1 = ymm6[2],ymm14[2],ymm6[3],ymm14[3],ymm6[6],ymm14[6],ymm6[7],ymm14[7]
2525 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2526 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
2527 ; AVX1-ONLY-NEXT: vshufps $153, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm0 # 32-byte Folded Reload
2528 ; AVX1-ONLY-NEXT: # ymm0 = ymm14[1,2],mem[1,2],ymm14[5,6],mem[5,6]
2529 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
2530 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2,1,3,4,6,5,7]
2531 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
2532 ; AVX1-ONLY-NEXT: vbroadcastss 84(%r8), %xmm6
2533 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm6[2,3],ymm0[4,5,6,7]
2534 ; AVX1-ONLY-NEXT: vbroadcastss 84(%r9), %ymm6
2535 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm6[3],ymm0[4,5,6,7]
2536 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2537 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} xmm0 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
2538 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm9[0,0,0,0]
2539 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm4 = xmm11[0,0,0,0]
2540 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} xmm1 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
2541 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm4
2542 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm4[0,1],ymm1[2,3],ymm4[4,5,6,7]
2543 ; AVX1-ONLY-NEXT: vinsertf128 $1, 96(%r8), %ymm0, %ymm0
2544 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
2545 ; AVX1-ONLY-NEXT: vbroadcastss 96(%r9), %ymm1
2546 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
2547 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2548 ; AVX1-ONLY-NEXT: vunpckhps {{.*#+}} ymm11 = ymm7[2],ymm3[2],ymm7[3],ymm3[3],ymm7[6],ymm3[6],ymm7[7],ymm3[7]
2549 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm10[1,2],ymm2[1,2],ymm10[5,6],ymm2[5,6]
2550 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
2551 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2,1,3,4,6,5,7]
2552 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm11[4,5],ymm0[6,7]
2553 ; AVX1-ONLY-NEXT: vbroadcastss 116(%r8), %xmm1
2554 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
2555 ; AVX1-ONLY-NEXT: vbroadcastss 116(%r9), %ymm1
2556 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3],ymm0[4,5,6,7]
2557 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2558 ; AVX1-ONLY-NEXT: vbroadcastss (%rcx), %xmm0
2559 ; AVX1-ONLY-NEXT: vbroadcastss (%rdx), %xmm1
2560 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
2561 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
2562 ; AVX1-ONLY-NEXT: vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
2563 ; AVX1-ONLY-NEXT: # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1]
2564 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm3
2565 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm3[0,1],ymm0[2,3],ymm3[4,5,6,7]
2566 ; AVX1-ONLY-NEXT: vinsertf128 $1, (%r8), %ymm1, %ymm1
2567 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
2568 ; AVX1-ONLY-NEXT: vbroadcastss (%r9), %ymm1
2569 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
2570 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
2571 ; AVX1-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
2572 ; AVX1-ONLY-NEXT: # xmm1 = xmm1[2],mem[2],xmm1[3],mem[3]
2573 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm1[2,3,2,3]
2574 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
2575 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm3, %ymm1
2576 ; AVX1-ONLY-NEXT: vpermilps {{.*#+}} xmm3 = mem[2,1,3,3]
2577 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm3, %ymm3, %ymm3
2578 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm3[0,1],ymm1[2,3,4,5],ymm3[6,7]
2579 ; AVX1-ONLY-NEXT: vmovaps (%r9), %xmm3
2580 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm4 = xmm3[0,2,2,3]
2581 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm3
2582 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm3[1],ymm1[2,3,4,5,6],ymm3[7]
2583 ; AVX1-ONLY-NEXT: vshufps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm3 # 32-byte Folded Reload
2584 ; AVX1-ONLY-NEXT: # ymm3 = ymm12[3,0],mem[3,0],ymm12[7,4],mem[7,4]
2585 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm3 = ymm3[2,0,2,3,6,4,6,7]
2586 ; AVX1-ONLY-NEXT: vperm2f128 $19, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
2587 ; AVX1-ONLY-NEXT: # ymm3 = mem[2,3],ymm3[2,3]
2588 ; AVX1-ONLY-NEXT: vperm2f128 $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm4 # 32-byte Folded Reload
2589 ; AVX1-ONLY-NEXT: # ymm4 = mem[2,3,2,3]
2590 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm4 = ymm4[2,1,3,3,6,5,7,7]
2591 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0,1],ymm3[2,3,4,5],ymm4[6,7]
2592 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm4 = mem[2,3,2,3]
2593 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm4 = ymm4[0,2,2,3,4,6,6,7]
2594 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0],ymm4[1],ymm3[2,3,4,5,6],ymm4[7]
2595 ; AVX1-ONLY-NEXT: vbroadcastss 32(%rcx), %xmm4
2596 ; AVX1-ONLY-NEXT: vbroadcastss 32(%rdx), %xmm6
2597 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} xmm4 = xmm6[0],xmm4[0],xmm6[1],xmm4[1]
2598 ; AVX1-ONLY-NEXT: vmovaps (%rsp), %xmm5 # 16-byte Reload
2599 ; AVX1-ONLY-NEXT: vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5, %xmm6 # 16-byte Folded Reload
2600 ; AVX1-ONLY-NEXT: # xmm6 = xmm5[0],mem[0],xmm5[1],mem[1]
2601 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm6, %ymm0, %ymm7
2602 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm4 = ymm7[0,1],ymm4[2,3],ymm7[4,5,6,7]
2603 ; AVX1-ONLY-NEXT: vinsertf128 $1, 32(%r8), %ymm6, %ymm6
2604 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm4 = ymm6[0,1],ymm4[2,3],ymm6[4,5],ymm4[6,7]
2605 ; AVX1-ONLY-NEXT: vbroadcastss 32(%r9), %ymm6
2606 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3,4],ymm6[5],ymm4[6,7]
2607 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
2608 ; AVX1-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6, %xmm6 # 16-byte Folded Reload
2609 ; AVX1-ONLY-NEXT: # xmm6 = xmm6[2],mem[2],xmm6[3],mem[3]
2610 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm6 = xmm6[2,3,2,3]
2611 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
2612 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm6, %ymm7, %ymm6
2613 ; AVX1-ONLY-NEXT: vpermilps {{.*#+}} xmm7 = mem[2,1,3,3]
2614 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm7, %ymm7, %ymm7
2615 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm6 = ymm7[0,1],ymm6[2,3,4,5],ymm7[6,7]
2616 ; AVX1-ONLY-NEXT: vmovaps 32(%r9), %xmm7
2617 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm8 = xmm7[0,2,2,3]
2618 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm7, %ymm8, %ymm7
2619 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm7 = ymm6[0],ymm7[1],ymm6[2,3,4,5,6],ymm7[7]
2620 ; AVX1-ONLY-NEXT: vshufps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm6 # 32-byte Folded Reload
2621 ; AVX1-ONLY-NEXT: # ymm6 = ymm13[3,0],mem[3,0],ymm13[7,4],mem[7,4]
2622 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm6 = ymm6[2,0,2,3,6,4,6,7]
2623 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm6 = ymm15[2,3],ymm6[2,3]
2624 ; AVX1-ONLY-NEXT: vperm2f128 $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm8 # 32-byte Folded Reload
2625 ; AVX1-ONLY-NEXT: # ymm8 = mem[2,3,2,3]
2626 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm8 = ymm8[2,1,3,3,6,5,7,7]
2627 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm6 = ymm8[0,1],ymm6[2,3,4,5],ymm8[6,7]
2628 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm8 = mem[2,3,2,3]
2629 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm8 = ymm8[0,2,2,3,4,6,6,7]
2630 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0],ymm8[1],ymm6[2,3,4,5,6],ymm8[7]
2631 ; AVX1-ONLY-NEXT: vbroadcastss 64(%rcx), %xmm8
2632 ; AVX1-ONLY-NEXT: vbroadcastss 64(%rdx), %xmm9
2633 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} xmm8 = xmm9[0],xmm8[0],xmm9[1],xmm8[1]
2634 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
2635 ; AVX1-ONLY-NEXT: vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5, %xmm9 # 16-byte Folded Reload
2636 ; AVX1-ONLY-NEXT: # xmm9 = xmm5[0],mem[0],xmm5[1],mem[1]
2637 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm9, %ymm0, %ymm12
2638 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm8 = ymm12[0,1],ymm8[2,3],ymm12[4,5,6,7]
2639 ; AVX1-ONLY-NEXT: vinsertf128 $1, 64(%r8), %ymm9, %ymm9
2640 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm8 = ymm9[0,1],ymm8[2,3],ymm9[4,5],ymm8[6,7]
2641 ; AVX1-ONLY-NEXT: vbroadcastss 64(%r9), %ymm9
2642 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1,2,3,4],ymm9[5],ymm8[6,7]
2643 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
2644 ; AVX1-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm9, %xmm9 # 16-byte Folded Reload
2645 ; AVX1-ONLY-NEXT: # xmm9 = xmm9[2],mem[2],xmm9[3],mem[3]
2646 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm9 = xmm9[2,3,2,3]
2647 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
2648 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm9, %ymm12, %ymm9
2649 ; AVX1-ONLY-NEXT: vpermilps {{.*#+}} xmm12 = mem[2,1,3,3]
2650 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm12, %ymm12, %ymm12
2651 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm9 = ymm12[0,1],ymm9[2,3,4,5],ymm12[6,7]
2652 ; AVX1-ONLY-NEXT: vmovaps 64(%r9), %xmm12
2653 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm13 = xmm12[0,2,2,3]
2654 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm12, %ymm13, %ymm12
2655 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm9 = ymm9[0],ymm12[1],ymm9[2,3,4,5,6],ymm12[7]
2656 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
2657 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm12 = ymm5[3,0],ymm14[3,0],ymm5[7,4],ymm14[7,4]
2658 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm12 = ymm12[2,0,2,3,6,4,6,7]
2659 ; AVX1-ONLY-NEXT: vperm2f128 $19, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm5 # 32-byte Folded Reload
2660 ; AVX1-ONLY-NEXT: # ymm5 = mem[2,3],ymm12[2,3]
2661 ; AVX1-ONLY-NEXT: vperm2f128 $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm12 # 32-byte Folded Reload
2662 ; AVX1-ONLY-NEXT: # ymm12 = mem[2,3,2,3]
2663 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm12 = ymm12[2,1,3,3,6,5,7,7]
2664 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm5 = ymm12[0,1],ymm5[2,3,4,5],ymm12[6,7]
2665 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm12 = mem[2,3,2,3]
2666 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm12 = ymm12[0,2,2,3,4,6,6,7]
2667 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0],ymm12[1],ymm5[2,3,4,5,6],ymm12[7]
2668 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
2669 ; AVX1-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm10, %xmm12 # 16-byte Folded Reload
2670 ; AVX1-ONLY-NEXT: # xmm12 = xmm10[2],mem[2],xmm10[3],mem[3]
2671 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm12 = xmm12[2,3,2,3]
2672 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
2673 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm12, %ymm10, %ymm12
2674 ; AVX1-ONLY-NEXT: vpermilps {{.*#+}} xmm13 = mem[2,1,3,3]
2675 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm13, %ymm13, %ymm13
2676 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm12 = ymm13[0,1],ymm12[2,3,4,5],ymm13[6,7]
2677 ; AVX1-ONLY-NEXT: vmovaps 96(%r9), %xmm13
2678 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm14 = xmm13[0,2,2,3]
2679 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm13, %ymm14, %ymm13
2680 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm12 = ymm12[0],ymm13[1],ymm12[2,3,4,5,6],ymm13[7]
2681 ; AVX1-ONLY-NEXT: vshufps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm10 # 32-byte Folded Reload
2682 ; AVX1-ONLY-NEXT: # ymm10 = ymm2[3,0],mem[3,0],ymm2[7,4],mem[7,4]
2683 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm10 = ymm10[2,0,2,3,6,4,6,7]
2684 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm11[2,3],ymm10[2,3]
2685 ; AVX1-ONLY-NEXT: vperm2f128 $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm10 # 32-byte Folded Reload
2686 ; AVX1-ONLY-NEXT: # ymm10 = mem[2,3,2,3]
2687 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm10 = ymm10[2,1,3,3,6,5,7,7]
2688 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm10[0,1],ymm2[2,3,4,5],ymm10[6,7]
2689 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm10 = mem[2,3,2,3]
2690 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm10 = ymm10[0,2,2,3,4,6,6,7]
2691 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0],ymm10[1],ymm2[2,3,4,5,6],ymm10[7]
2692 ; AVX1-ONLY-NEXT: movq {{[0-9]+}}(%rsp), %rax
2693 ; AVX1-ONLY-NEXT: vmovaps %ymm2, 736(%rax)
2694 ; AVX1-ONLY-NEXT: vmovaps %ymm12, 640(%rax)
2695 ; AVX1-ONLY-NEXT: vmovaps %ymm5, 544(%rax)
2696 ; AVX1-ONLY-NEXT: vmovaps %ymm9, 448(%rax)
2697 ; AVX1-ONLY-NEXT: vmovaps %ymm8, 384(%rax)
2698 ; AVX1-ONLY-NEXT: vmovaps %ymm6, 352(%rax)
2699 ; AVX1-ONLY-NEXT: vmovaps %ymm7, 256(%rax)
2700 ; AVX1-ONLY-NEXT: vmovaps %ymm4, 192(%rax)
2701 ; AVX1-ONLY-NEXT: vmovaps %ymm3, 160(%rax)
2702 ; AVX1-ONLY-NEXT: vmovaps %ymm1, 64(%rax)
2703 ; AVX1-ONLY-NEXT: vmovaps %ymm0, (%rax)
2704 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2705 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 704(%rax)
2706 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2707 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 576(%rax)
2708 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2709 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 512(%rax)
2710 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2711 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 320(%rax)
2712 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2713 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 128(%rax)
2714 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2715 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 672(%rax)
2716 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2717 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 608(%rax)
2718 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2719 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 480(%rax)
2720 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2721 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 416(%rax)
2722 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2723 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 288(%rax)
2724 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2725 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 224(%rax)
2726 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2727 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 96(%rax)
2728 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2729 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 32(%rax)
2730 ; AVX1-ONLY-NEXT: addq $1032, %rsp # imm = 0x408
2731 ; AVX1-ONLY-NEXT: vzeroupper
2732 ; AVX1-ONLY-NEXT: retq
2734 ; AVX2-SLOW-LABEL: store_i32_stride6_vf32:
2735 ; AVX2-SLOW: # %bb.0:
2736 ; AVX2-SLOW-NEXT: subq $904, %rsp # imm = 0x388
2737 ; AVX2-SLOW-NEXT: vmovdqa (%rsi), %xmm0
2738 ; AVX2-SLOW-NEXT: vmovdqa 32(%rsi), %xmm2
2739 ; AVX2-SLOW-NEXT: vmovdqa (%rdi), %xmm1
2740 ; AVX2-SLOW-NEXT: vmovdqa 32(%rdi), %xmm3
2741 ; AVX2-SLOW-NEXT: vpunpckhdq {{.*#+}} xmm4 = xmm1[2],xmm0[2],xmm1[3],xmm0[3]
2742 ; AVX2-SLOW-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2743 ; AVX2-SLOW-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4
2744 ; AVX2-SLOW-NEXT: vmovdqa (%rcx), %xmm5
2745 ; AVX2-SLOW-NEXT: vmovdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2746 ; AVX2-SLOW-NEXT: vmovdqa 32(%rcx), %xmm8
2747 ; AVX2-SLOW-NEXT: vmovdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2748 ; AVX2-SLOW-NEXT: vmovdqa 64(%rcx), %xmm7
2749 ; AVX2-SLOW-NEXT: vmovdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2750 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm5 = xmm5[1,2,2,3]
2751 ; AVX2-SLOW-NEXT: vmovdqa (%rdx), %xmm6
2752 ; AVX2-SLOW-NEXT: vmovdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2753 ; AVX2-SLOW-NEXT: vmovdqa 32(%rdx), %xmm9
2754 ; AVX2-SLOW-NEXT: vmovdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2755 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm6 = xmm6[1,2,2,3]
2756 ; AVX2-SLOW-NEXT: vpunpckldq {{.*#+}} xmm5 = xmm6[0],xmm5[0],xmm6[1],xmm5[1]
2757 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm5 = ymm5[0,1,2,1]
2758 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm4 = ymm5[0,1,2,3],ymm4[4,5],ymm5[6,7]
2759 ; AVX2-SLOW-NEXT: vmovdqa (%r8), %xmm10
2760 ; AVX2-SLOW-NEXT: vmovdqa 32(%r8), %xmm6
2761 ; AVX2-SLOW-NEXT: vpmovzxdq {{.*#+}} xmm5 = xmm10[0],zero,xmm10[1],zero
2762 ; AVX2-SLOW-NEXT: vmovdqa %xmm10, %xmm11
2763 ; AVX2-SLOW-NEXT: vmovdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2764 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1],ymm5[2,3],ymm4[4,5,6,7]
2765 ; AVX2-SLOW-NEXT: vpbroadcastd 4(%r9), %ymm5
2766 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1,2],ymm5[3],ymm4[4,5,6,7]
2767 ; AVX2-SLOW-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2768 ; AVX2-SLOW-NEXT: vpunpckhdq {{.*#+}} xmm10 = xmm3[2],xmm2[2],xmm3[3],xmm2[3]
2769 ; AVX2-SLOW-NEXT: vmovdqu %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2770 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm4 = xmm8[1,2,2,3]
2771 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm5 = xmm9[1,2,2,3]
2772 ; AVX2-SLOW-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm5[0],xmm4[0],xmm5[1],xmm4[1]
2773 ; AVX2-SLOW-NEXT: vinserti128 $1, %xmm10, %ymm0, %ymm5
2774 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,1,2,1]
2775 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm5[4,5],ymm4[6,7]
2776 ; AVX2-SLOW-NEXT: vpmovzxdq {{.*#+}} xmm5 = xmm6[0],zero,xmm6[1],zero
2777 ; AVX2-SLOW-NEXT: vmovdqa %xmm6, %xmm12
2778 ; AVX2-SLOW-NEXT: vmovdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2779 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1],ymm5[2,3],ymm4[4,5,6,7]
2780 ; AVX2-SLOW-NEXT: vpbroadcastd 36(%r9), %ymm5
2781 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1,2],ymm5[3],ymm4[4,5,6,7]
2782 ; AVX2-SLOW-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2783 ; AVX2-SLOW-NEXT: vmovdqa 64(%rdx), %xmm5
2784 ; AVX2-SLOW-NEXT: vmovdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2785 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm4 = xmm7[1,2,2,3]
2786 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm5 = xmm5[1,2,2,3]
2787 ; AVX2-SLOW-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm5[0],xmm4[0],xmm5[1],xmm4[1]
2788 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm6 = ymm4[0,1,2,1]
2789 ; AVX2-SLOW-NEXT: vmovdqa 64(%rsi), %xmm4
2790 ; AVX2-SLOW-NEXT: vmovdqa 64(%rdi), %xmm5
2791 ; AVX2-SLOW-NEXT: vpunpckhdq {{.*#+}} xmm7 = xmm5[2],xmm4[2],xmm5[3],xmm4[3]
2792 ; AVX2-SLOW-NEXT: vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2793 ; AVX2-SLOW-NEXT: vinserti128 $1, %xmm7, %ymm0, %ymm7
2794 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm7[4,5],ymm6[6,7]
2795 ; AVX2-SLOW-NEXT: vmovdqa 64(%r8), %xmm8
2796 ; AVX2-SLOW-NEXT: vpmovzxdq {{.*#+}} xmm7 = xmm8[0],zero,xmm8[1],zero
2797 ; AVX2-SLOW-NEXT: vmovdqa %xmm8, %xmm15
2798 ; AVX2-SLOW-NEXT: vmovdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2799 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm6 = ymm6[0,1],ymm7[2,3],ymm6[4,5,6,7]
2800 ; AVX2-SLOW-NEXT: vpbroadcastd 68(%r9), %ymm7
2801 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm6 = ymm6[0,1,2],ymm7[3],ymm6[4,5,6,7]
2802 ; AVX2-SLOW-NEXT: vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2803 ; AVX2-SLOW-NEXT: vmovdqa 96(%rcx), %xmm6
2804 ; AVX2-SLOW-NEXT: vmovdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2805 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm6 = xmm6[1,2,2,3]
2806 ; AVX2-SLOW-NEXT: vmovdqa 96(%rdx), %xmm7
2807 ; AVX2-SLOW-NEXT: vmovdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2808 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm7 = xmm7[1,2,2,3]
2809 ; AVX2-SLOW-NEXT: vpunpckldq {{.*#+}} xmm6 = xmm7[0],xmm6[0],xmm7[1],xmm6[1]
2810 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm8 = ymm6[0,1,2,1]
2811 ; AVX2-SLOW-NEXT: vmovdqa 96(%rsi), %xmm14
2812 ; AVX2-SLOW-NEXT: vmovdqa 96(%rdi), %xmm7
2813 ; AVX2-SLOW-NEXT: vpunpckhdq {{.*#+}} xmm6 = xmm7[2],xmm14[2],xmm7[3],xmm14[3]
2814 ; AVX2-SLOW-NEXT: vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2815 ; AVX2-SLOW-NEXT: vinserti128 $1, %xmm6, %ymm0, %ymm9
2816 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm9[4,5],ymm8[6,7]
2817 ; AVX2-SLOW-NEXT: vmovdqa 96(%r8), %xmm6
2818 ; AVX2-SLOW-NEXT: vmovdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2819 ; AVX2-SLOW-NEXT: vpmovzxdq {{.*#+}} xmm9 = xmm6[0],zero,xmm6[1],zero
2820 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm8 = ymm8[0,1],ymm9[2,3],ymm8[4,5,6,7]
2821 ; AVX2-SLOW-NEXT: vpbroadcastd 100(%r9), %ymm9
2822 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm6 = ymm8[0,1,2],ymm9[3],ymm8[4,5,6,7]
2823 ; AVX2-SLOW-NEXT: vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2824 ; AVX2-SLOW-NEXT: vpbroadcastd (%rcx), %xmm8
2825 ; AVX2-SLOW-NEXT: vpbroadcastd (%rdx), %xmm9
2826 ; AVX2-SLOW-NEXT: vpunpckldq {{.*#+}} xmm8 = xmm9[0],xmm8[0],xmm9[1],xmm8[1]
2827 ; AVX2-SLOW-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
2828 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,2,1]
2829 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm8[2,3],ymm0[4,5,6,7]
2830 ; AVX2-SLOW-NEXT: vpbroadcastq %xmm11, %ymm1
2831 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
2832 ; AVX2-SLOW-NEXT: vmovdqa (%r9), %xmm1
2833 ; AVX2-SLOW-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2834 ; AVX2-SLOW-NEXT: vpbroadcastd %xmm1, %ymm1
2835 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
2836 ; AVX2-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2837 ; AVX2-SLOW-NEXT: vmovdqa (%rdx), %ymm0
2838 ; AVX2-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2839 ; AVX2-SLOW-NEXT: vmovdqa (%rcx), %ymm6
2840 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm8 = ymm6[0,1,2,2,4,5,6,6]
2841 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm9 = ymm0[1,1,2,3,5,5,6,7]
2842 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm8 = ymm9[0],ymm8[1],ymm9[2],ymm8[3],ymm9[4],ymm8[5],ymm9[6],ymm8[7]
2843 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm10 = ymm8[2,1,2,3]
2844 ; AVX2-SLOW-NEXT: vmovdqa (%rdi), %ymm9
2845 ; AVX2-SLOW-NEXT: vmovdqa (%rsi), %ymm8
2846 ; AVX2-SLOW-NEXT: vpunpckhdq {{.*#+}} ymm0 = ymm9[2],ymm8[2],ymm9[3],ymm8[3],ymm9[6],ymm8[6],ymm9[7],ymm8[7]
2847 ; AVX2-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2848 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm10 = ymm10[0,1,2,3],ymm0[4,5],ymm10[6,7]
2849 ; AVX2-SLOW-NEXT: vpmovzxdq {{.*#+}} xmm11 = mem[0],zero,mem[1],zero
2850 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm10 = ymm10[0,1],ymm11[2,3],ymm10[4,5,6,7]
2851 ; AVX2-SLOW-NEXT: vpbroadcastd 20(%r9), %ymm11
2852 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm10[0,1,2],ymm11[3],ymm10[4,5,6,7]
2853 ; AVX2-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2854 ; AVX2-SLOW-NEXT: vpbroadcastd 32(%rcx), %xmm10
2855 ; AVX2-SLOW-NEXT: vpbroadcastd 32(%rdx), %xmm11
2856 ; AVX2-SLOW-NEXT: vpunpckldq {{.*#+}} xmm10 = xmm11[0],xmm10[0],xmm11[1],xmm10[1]
2857 ; AVX2-SLOW-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
2858 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,1,2,1]
2859 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1],ymm10[2,3],ymm2[4,5,6,7]
2860 ; AVX2-SLOW-NEXT: vpbroadcastq %xmm12, %ymm3
2861 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5],ymm2[6,7]
2862 ; AVX2-SLOW-NEXT: vmovdqa 32(%r9), %xmm0
2863 ; AVX2-SLOW-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2864 ; AVX2-SLOW-NEXT: vpbroadcastd %xmm0, %ymm3
2865 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3,4],ymm3[5],ymm2[6,7]
2866 ; AVX2-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2867 ; AVX2-SLOW-NEXT: vmovdqa 32(%rdx), %ymm3
2868 ; AVX2-SLOW-NEXT: vmovdqa 32(%rcx), %ymm2
2869 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm10 = ymm2[0,1,2,2,4,5,6,6]
2870 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm11 = ymm3[1,1,2,3,5,5,6,7]
2871 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm10 = ymm11[0],ymm10[1],ymm11[2],ymm10[3],ymm11[4],ymm10[5],ymm11[6],ymm10[7]
2872 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm12 = ymm10[2,1,2,3]
2873 ; AVX2-SLOW-NEXT: vmovdqa 32(%rdi), %ymm11
2874 ; AVX2-SLOW-NEXT: vmovdqa 32(%rsi), %ymm10
2875 ; AVX2-SLOW-NEXT: vpunpckhdq {{.*#+}} ymm0 = ymm11[2],ymm10[2],ymm11[3],ymm10[3],ymm11[6],ymm10[6],ymm11[7],ymm10[7]
2876 ; AVX2-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2877 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm12 = ymm12[0,1,2,3],ymm0[4,5],ymm12[6,7]
2878 ; AVX2-SLOW-NEXT: vpmovzxdq {{.*#+}} xmm13 = mem[0],zero,mem[1],zero
2879 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm12 = ymm12[0,1],ymm13[2,3],ymm12[4,5,6,7]
2880 ; AVX2-SLOW-NEXT: vpbroadcastd 52(%r9), %ymm13
2881 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm12[0,1,2],ymm13[3],ymm12[4,5,6,7]
2882 ; AVX2-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2883 ; AVX2-SLOW-NEXT: vpbroadcastd 64(%rcx), %xmm12
2884 ; AVX2-SLOW-NEXT: vpbroadcastd 64(%rdx), %xmm13
2885 ; AVX2-SLOW-NEXT: vpunpckldq {{.*#+}} xmm12 = xmm13[0],xmm12[0],xmm13[1],xmm12[1]
2886 ; AVX2-SLOW-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm5[0],xmm4[0],xmm5[1],xmm4[1]
2887 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,1,2,1]
2888 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1],ymm12[2,3],ymm4[4,5,6,7]
2889 ; AVX2-SLOW-NEXT: vpbroadcastq %xmm15, %ymm5
2890 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm5[4,5],ymm4[6,7]
2891 ; AVX2-SLOW-NEXT: vmovdqa 64(%r9), %xmm0
2892 ; AVX2-SLOW-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill
2893 ; AVX2-SLOW-NEXT: vpbroadcastd %xmm0, %ymm5
2894 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm4[0,1,2,3,4],ymm5[5],ymm4[6,7]
2895 ; AVX2-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2896 ; AVX2-SLOW-NEXT: vmovdqa 64(%rdx), %ymm5
2897 ; AVX2-SLOW-NEXT: vmovdqa 64(%rcx), %ymm4
2898 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm12 = ymm4[0,1,2,2,4,5,6,6]
2899 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm13 = ymm5[1,1,2,3,5,5,6,7]
2900 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm12 = ymm13[0],ymm12[1],ymm13[2],ymm12[3],ymm13[4],ymm12[5],ymm13[6],ymm12[7]
2901 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm12[2,1,2,3]
2902 ; AVX2-SLOW-NEXT: vmovdqa 64(%rdi), %ymm13
2903 ; AVX2-SLOW-NEXT: vmovdqa 64(%rsi), %ymm12
2904 ; AVX2-SLOW-NEXT: vpunpckhdq {{.*#+}} ymm15 = ymm13[2],ymm12[2],ymm13[3],ymm12[3],ymm13[6],ymm12[6],ymm13[7],ymm12[7]
2905 ; AVX2-SLOW-NEXT: vmovdqu %ymm15, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2906 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm15[4,5],ymm0[6,7]
2907 ; AVX2-SLOW-NEXT: vpmovzxdq {{.*#+}} xmm15 = mem[0],zero,mem[1],zero
2908 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm15[2,3],ymm0[4,5,6,7]
2909 ; AVX2-SLOW-NEXT: vpbroadcastd 84(%r9), %ymm15
2910 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm15[3],ymm0[4,5,6,7]
2911 ; AVX2-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2912 ; AVX2-SLOW-NEXT: vpbroadcastd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
2913 ; AVX2-SLOW-NEXT: vpbroadcastd {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Folded Reload
2914 ; AVX2-SLOW-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm15[0],xmm0[0],xmm15[1],xmm0[1]
2915 ; AVX2-SLOW-NEXT: vpunpckldq {{.*#+}} xmm7 = xmm7[0],xmm14[0],xmm7[1],xmm14[1]
2916 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm7 = ymm7[0,1,2,1]
2917 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm7[0,1],ymm0[2,3],ymm7[4,5,6,7]
2918 ; AVX2-SLOW-NEXT: vpbroadcastq {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 16-byte Folded Reload
2919 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm7[4,5],ymm0[6,7]
2920 ; AVX2-SLOW-NEXT: vmovdqa 96(%r9), %xmm7
2921 ; AVX2-SLOW-NEXT: vmovdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2922 ; AVX2-SLOW-NEXT: vpbroadcastd %xmm7, %ymm7
2923 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm7[5],ymm0[6,7]
2924 ; AVX2-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2925 ; AVX2-SLOW-NEXT: vmovdqa 96(%rdx), %ymm0
2926 ; AVX2-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2927 ; AVX2-SLOW-NEXT: vmovdqa 96(%rcx), %ymm7
2928 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm14 = ymm7[0,1,2,2,4,5,6,6]
2929 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm15 = ymm0[1,1,2,3,5,5,6,7]
2930 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm14 = ymm15[0],ymm14[1],ymm15[2],ymm14[3],ymm15[4],ymm14[5],ymm15[6],ymm14[7]
2931 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm14 = ymm14[2,1,2,3]
2932 ; AVX2-SLOW-NEXT: vmovdqa 96(%rdi), %ymm1
2933 ; AVX2-SLOW-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2934 ; AVX2-SLOW-NEXT: vmovdqa 96(%rsi), %ymm0
2935 ; AVX2-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2936 ; AVX2-SLOW-NEXT: vpunpckhdq {{.*#+}} ymm15 = ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[6],ymm0[6],ymm1[7],ymm0[7]
2937 ; AVX2-SLOW-NEXT: vmovdqu %ymm15, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2938 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm14 = ymm14[0,1,2,3],ymm15[4,5],ymm14[6,7]
2939 ; AVX2-SLOW-NEXT: vpmovzxdq {{.*#+}} xmm15 = mem[0],zero,mem[1],zero
2940 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm14 = ymm14[0,1],ymm15[2,3],ymm14[4,5,6,7]
2941 ; AVX2-SLOW-NEXT: vpbroadcastd 116(%r9), %ymm15
2942 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm14 = ymm14[0,1,2],ymm15[3],ymm14[4,5,6,7]
2943 ; AVX2-SLOW-NEXT: vmovdqu %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2944 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
2945 ; AVX2-SLOW-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm14, %xmm14 # 16-byte Folded Reload
2946 ; AVX2-SLOW-NEXT: # xmm14 = xmm14[2],mem[2],xmm14[3],mem[3]
2947 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm14 = xmm14[2,3,2,3]
2948 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
2949 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm14, %ymm15, %ymm14
2950 ; AVX2-SLOW-NEXT: vpermilps $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Folded Reload
2951 ; AVX2-SLOW-NEXT: # xmm15 = mem[2,2,3,3]
2952 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm15 = ymm15[0,1,2,1]
2953 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm14 = ymm15[0,1],ymm14[2,3,4,5],ymm15[6,7]
2954 ; AVX2-SLOW-NEXT: vpermilps $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Folded Reload
2955 ; AVX2-SLOW-NEXT: # xmm15 = mem[2,2,3,3]
2956 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm15 = ymm15[0,1,2,1]
2957 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm15 = ymm14[0],ymm15[1],ymm14[2,3,4,5,6],ymm15[7]
2958 ; AVX2-SLOW-NEXT: vpunpckldq {{.*#+}} ymm8 = ymm9[0],ymm8[0],ymm9[1],ymm8[1],ymm9[4],ymm8[4],ymm9[5],ymm8[5]
2959 ; AVX2-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2960 ; AVX2-SLOW-NEXT: vpunpckldq {{.*#+}} ymm9 = ymm0[0],ymm6[0],ymm0[1],ymm6[1],ymm0[4],ymm6[4],ymm0[5],ymm6[5]
2961 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,2,2,2]
2962 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,1,2,3]
2963 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm8 = ymm8[0,1],ymm9[2,3],ymm8[4,5,6,7]
2964 ; AVX2-SLOW-NEXT: vmovdqa (%r8), %ymm9
2965 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm9[4,5],ymm8[6,7]
2966 ; AVX2-SLOW-NEXT: vpbroadcastd 16(%r9), %ymm14
2967 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3,4],ymm14[5],ymm8[6,7]
2968 ; AVX2-SLOW-NEXT: vpunpckhdq {{.*#+}} ymm1 = ymm0[2],ymm6[2],ymm0[3],ymm6[3],ymm0[6],ymm6[6],ymm0[7],ymm6[7]
2969 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[2,3,2,3,6,7,6,7]
2970 ; AVX2-SLOW-NEXT: vperm2i128 $19, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
2971 ; AVX2-SLOW-NEXT: # ymm1 = mem[2,3],ymm1[2,3]
2972 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm6 = ymm9[2,1,3,3,6,5,7,7]
2973 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm6 = ymm6[2,1,2,3]
2974 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm1 = ymm6[0,1],ymm1[2,3,4,5],ymm6[6,7]
2975 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm6 = mem[0,2,2,3,4,6,6,7]
2976 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm6 = ymm6[2,1,2,3]
2977 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm6 = ymm1[0],ymm6[1],ymm1[2,3,4,5,6],ymm6[7]
2978 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
2979 ; AVX2-SLOW-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
2980 ; AVX2-SLOW-NEXT: # xmm1 = xmm1[2],mem[2],xmm1[3],mem[3]
2981 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm1[2,3,2,3]
2982 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
2983 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm1, %ymm9, %ymm1
2984 ; AVX2-SLOW-NEXT: vpermilps $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Folded Reload
2985 ; AVX2-SLOW-NEXT: # xmm9 = mem[2,2,3,3]
2986 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm9 = ymm9[0,1,2,1]
2987 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm9[0,1],ymm1[2,3,4,5],ymm9[6,7]
2988 ; AVX2-SLOW-NEXT: vpermilps $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Folded Reload
2989 ; AVX2-SLOW-NEXT: # xmm9 = mem[2,2,3,3]
2990 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm9 = ymm9[0,1,2,1]
2991 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm9[1],ymm1[2,3,4,5,6],ymm9[7]
2992 ; AVX2-SLOW-NEXT: vpunpckldq {{.*#+}} ymm9 = ymm11[0],ymm10[0],ymm11[1],ymm10[1],ymm11[4],ymm10[4],ymm11[5],ymm10[5]
2993 ; AVX2-SLOW-NEXT: vpunpckldq {{.*#+}} ymm10 = ymm3[0],ymm2[0],ymm3[1],ymm2[1],ymm3[4],ymm2[4],ymm3[5],ymm2[5]
2994 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm10 = ymm10[2,2,2,2]
2995 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,1,2,3]
2996 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm9 = ymm9[0,1],ymm10[2,3],ymm9[4,5,6,7]
2997 ; AVX2-SLOW-NEXT: vmovdqa 32(%r8), %ymm10
2998 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3],ymm10[4,5],ymm9[6,7]
2999 ; AVX2-SLOW-NEXT: vpbroadcastd 48(%r9), %ymm11
3000 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3,4],ymm11[5],ymm9[6,7]
3001 ; AVX2-SLOW-NEXT: vpunpckhdq {{.*#+}} ymm2 = ymm3[2],ymm2[2],ymm3[3],ymm2[3],ymm3[6],ymm2[6],ymm3[7],ymm2[7]
3002 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm2 = ymm2[2,3,2,3,6,7,6,7]
3003 ; AVX2-SLOW-NEXT: vperm2i128 $19, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
3004 ; AVX2-SLOW-NEXT: # ymm2 = mem[2,3],ymm2[2,3]
3005 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm3 = ymm10[2,1,3,3,6,5,7,7]
3006 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,1,2,3]
3007 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3,4,5],ymm3[6,7]
3008 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm3 = mem[0,2,2,3,4,6,6,7]
3009 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,1,2,3]
3010 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0],ymm3[1],ymm2[2,3,4,5,6],ymm3[7]
3011 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
3012 ; AVX2-SLOW-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm3 # 16-byte Folded Reload
3013 ; AVX2-SLOW-NEXT: # xmm3 = xmm3[2],mem[2],xmm3[3],mem[3]
3014 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm3 = xmm3[2,3,2,3]
3015 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
3016 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm3, %ymm10, %ymm3
3017 ; AVX2-SLOW-NEXT: vpermilps $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Folded Reload
3018 ; AVX2-SLOW-NEXT: # xmm10 = mem[2,2,3,3]
3019 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm10 = ymm10[0,1,2,1]
3020 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm3 = ymm10[0,1],ymm3[2,3,4,5],ymm10[6,7]
3021 ; AVX2-SLOW-NEXT: vpermilps $250, (%rsp), %xmm10 # 16-byte Folded Reload
3022 ; AVX2-SLOW-NEXT: # xmm10 = mem[2,2,3,3]
3023 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm10 = ymm10[0,1,2,1]
3024 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0],ymm10[1],ymm3[2,3,4,5,6],ymm10[7]
3025 ; AVX2-SLOW-NEXT: vpunpckldq {{.*#+}} ymm10 = ymm13[0],ymm12[0],ymm13[1],ymm12[1],ymm13[4],ymm12[4],ymm13[5],ymm12[5]
3026 ; AVX2-SLOW-NEXT: vpunpckldq {{.*#+}} ymm11 = ymm5[0],ymm4[0],ymm5[1],ymm4[1],ymm5[4],ymm4[4],ymm5[5],ymm4[5]
3027 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,2,2,2]
3028 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm10 = ymm10[2,1,2,3]
3029 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm10 = ymm10[0,1],ymm11[2,3],ymm10[4,5,6,7]
3030 ; AVX2-SLOW-NEXT: vmovdqa 64(%r8), %ymm11
3031 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm10 = ymm10[0,1,2,3],ymm11[4,5],ymm10[6,7]
3032 ; AVX2-SLOW-NEXT: vpbroadcastd 80(%r9), %ymm12
3033 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm10 = ymm10[0,1,2,3,4],ymm12[5],ymm10[6,7]
3034 ; AVX2-SLOW-NEXT: vpunpckhdq {{.*#+}} ymm4 = ymm5[2],ymm4[2],ymm5[3],ymm4[3],ymm5[6],ymm4[6],ymm5[7],ymm4[7]
3035 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm4 = ymm4[2,3,2,3,6,7,6,7]
3036 ; AVX2-SLOW-NEXT: vperm2i128 $19, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm4 # 32-byte Folded Reload
3037 ; AVX2-SLOW-NEXT: # ymm4 = mem[2,3],ymm4[2,3]
3038 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm5 = ymm11[2,1,3,3,6,5,7,7]
3039 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm5 = ymm5[2,1,2,3]
3040 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm4 = ymm5[0,1],ymm4[2,3,4,5],ymm5[6,7]
3041 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm5 = mem[0,2,2,3,4,6,6,7]
3042 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm5 = ymm5[2,1,2,3]
3043 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0],ymm5[1],ymm4[2,3,4,5,6],ymm5[7]
3044 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
3045 ; AVX2-SLOW-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5, %xmm5 # 16-byte Folded Reload
3046 ; AVX2-SLOW-NEXT: # xmm5 = xmm5[2],mem[2],xmm5[3],mem[3]
3047 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm5 = xmm5[2,3,2,3]
3048 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
3049 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm5, %ymm11, %ymm5
3050 ; AVX2-SLOW-NEXT: vpermilps $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Folded Reload
3051 ; AVX2-SLOW-NEXT: # xmm11 = mem[2,2,3,3]
3052 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm11 = ymm11[0,1,2,1]
3053 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm5 = ymm11[0,1],ymm5[2,3,4,5],ymm11[6,7]
3054 ; AVX2-SLOW-NEXT: vpermilps $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Folded Reload
3055 ; AVX2-SLOW-NEXT: # xmm11 = mem[2,2,3,3]
3056 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm11 = ymm11[0,1,2,1]
3057 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0],ymm11[1],ymm5[2,3,4,5,6],ymm11[7]
3058 ; AVX2-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3059 ; AVX2-SLOW-NEXT: vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm11 # 32-byte Folded Reload
3060 ; AVX2-SLOW-NEXT: # ymm11 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5]
3061 ; AVX2-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3062 ; AVX2-SLOW-NEXT: vpunpckldq {{.*#+}} ymm12 = ymm0[0],ymm7[0],ymm0[1],ymm7[1],ymm0[4],ymm7[4],ymm0[5],ymm7[5]
3063 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm12 = ymm12[2,2,2,2]
3064 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,1,2,3]
3065 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm11 = ymm11[0,1],ymm12[2,3],ymm11[4,5,6,7]
3066 ; AVX2-SLOW-NEXT: vmovdqa 96(%r8), %ymm12
3067 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm11 = ymm11[0,1,2,3],ymm12[4,5],ymm11[6,7]
3068 ; AVX2-SLOW-NEXT: vpbroadcastd 112(%r9), %ymm13
3069 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm11 = ymm11[0,1,2,3,4],ymm13[5],ymm11[6,7]
3070 ; AVX2-SLOW-NEXT: vpunpckhdq {{.*#+}} ymm0 = ymm0[2],ymm7[2],ymm0[3],ymm7[3],ymm0[6],ymm7[6],ymm0[7],ymm7[7]
3071 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[2,3,2,3,6,7,6,7]
3072 ; AVX2-SLOW-NEXT: vperm2i128 $19, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
3073 ; AVX2-SLOW-NEXT: # ymm0 = mem[2,3],ymm0[2,3]
3074 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm7 = ymm12[2,1,3,3,6,5,7,7]
3075 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,1,2,3]
3076 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm7[0,1],ymm0[2,3,4,5],ymm7[6,7]
3077 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm7 = mem[0,2,2,3,4,6,6,7]
3078 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,1,2,3]
3079 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm7[1],ymm0[2,3,4,5,6],ymm7[7]
3080 ; AVX2-SLOW-NEXT: movq {{[0-9]+}}(%rsp), %rax
3081 ; AVX2-SLOW-NEXT: vmovdqa %ymm0, 736(%rax)
3082 ; AVX2-SLOW-NEXT: vmovdqa %ymm11, 672(%rax)
3083 ; AVX2-SLOW-NEXT: vmovaps %ymm5, 640(%rax)
3084 ; AVX2-SLOW-NEXT: vmovdqa %ymm4, 544(%rax)
3085 ; AVX2-SLOW-NEXT: vmovdqa %ymm10, 480(%rax)
3086 ; AVX2-SLOW-NEXT: vmovaps %ymm3, 448(%rax)
3087 ; AVX2-SLOW-NEXT: vmovdqa %ymm2, 352(%rax)
3088 ; AVX2-SLOW-NEXT: vmovdqa %ymm9, 288(%rax)
3089 ; AVX2-SLOW-NEXT: vmovaps %ymm1, 256(%rax)
3090 ; AVX2-SLOW-NEXT: vmovdqa %ymm6, 160(%rax)
3091 ; AVX2-SLOW-NEXT: vmovdqa %ymm8, 96(%rax)
3092 ; AVX2-SLOW-NEXT: vmovaps %ymm15, 64(%rax)
3093 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3094 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 704(%rax)
3095 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3096 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 576(%rax)
3097 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3098 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 512(%rax)
3099 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3100 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 384(%rax)
3101 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3102 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 320(%rax)
3103 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3104 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 192(%rax)
3105 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3106 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 128(%rax)
3107 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3108 ; AVX2-SLOW-NEXT: vmovaps %ymm0, (%rax)
3109 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3110 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 608(%rax)
3111 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3112 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 416(%rax)
3113 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3114 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 224(%rax)
3115 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3116 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 32(%rax)
3117 ; AVX2-SLOW-NEXT: addq $904, %rsp # imm = 0x388
3118 ; AVX2-SLOW-NEXT: vzeroupper
3119 ; AVX2-SLOW-NEXT: retq
3121 ; AVX2-FAST-LABEL: store_i32_stride6_vf32:
3122 ; AVX2-FAST: # %bb.0:
3123 ; AVX2-FAST-NEXT: subq $872, %rsp # imm = 0x368
3124 ; AVX2-FAST-NEXT: vmovdqa (%rsi), %xmm2
3125 ; AVX2-FAST-NEXT: vmovdqa 32(%rsi), %xmm4
3126 ; AVX2-FAST-NEXT: vmovdqa (%rdi), %xmm7
3127 ; AVX2-FAST-NEXT: vmovdqa 32(%rdi), %xmm1
3128 ; AVX2-FAST-NEXT: vpunpckhdq {{.*#+}} xmm0 = xmm7[2],xmm2[2],xmm7[3],xmm2[3]
3129 ; AVX2-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3130 ; AVX2-FAST-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
3131 ; AVX2-FAST-NEXT: vmovdqa (%rcx), %xmm3
3132 ; AVX2-FAST-NEXT: vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3133 ; AVX2-FAST-NEXT: vmovdqa 32(%rcx), %xmm9
3134 ; AVX2-FAST-NEXT: vmovdqa %xmm9, (%rsp) # 16-byte Spill
3135 ; AVX2-FAST-NEXT: vmovdqa 64(%rcx), %xmm6
3136 ; AVX2-FAST-NEXT: vmovdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3137 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,2,2,3]
3138 ; AVX2-FAST-NEXT: vmovdqa (%rdx), %xmm5
3139 ; AVX2-FAST-NEXT: vmovdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3140 ; AVX2-FAST-NEXT: vmovdqa 32(%rdx), %xmm10
3141 ; AVX2-FAST-NEXT: vmovdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3142 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm5 = xmm5[1,2,2,3]
3143 ; AVX2-FAST-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm5[0],xmm3[0],xmm5[1],xmm3[1]
3144 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,1,2,1]
3145 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm0[4,5],ymm3[6,7]
3146 ; AVX2-FAST-NEXT: vmovdqa (%r8), %xmm8
3147 ; AVX2-FAST-NEXT: vmovdqa 32(%r8), %xmm13
3148 ; AVX2-FAST-NEXT: vpmovzxdq {{.*#+}} xmm3 = xmm8[0],zero,xmm8[1],zero
3149 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm3[2,3],ymm0[4,5,6,7]
3150 ; AVX2-FAST-NEXT: vpbroadcastd 4(%r9), %ymm3
3151 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm3[3],ymm0[4,5,6,7]
3152 ; AVX2-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3153 ; AVX2-FAST-NEXT: vpunpckhdq {{.*#+}} xmm5 = xmm1[2],xmm4[2],xmm1[3],xmm4[3]
3154 ; AVX2-FAST-NEXT: vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3155 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm0 = xmm9[1,2,2,3]
3156 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm3 = xmm10[1,2,2,3]
3157 ; AVX2-FAST-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm3[0],xmm0[0],xmm3[1],xmm0[1]
3158 ; AVX2-FAST-NEXT: vinserti128 $1, %xmm5, %ymm0, %ymm3
3159 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,2,1]
3160 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm3[4,5],ymm0[6,7]
3161 ; AVX2-FAST-NEXT: vpmovzxdq {{.*#+}} xmm3 = xmm13[0],zero,xmm13[1],zero
3162 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm3[2,3],ymm0[4,5,6,7]
3163 ; AVX2-FAST-NEXT: vpbroadcastd 36(%r9), %ymm3
3164 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm3[3],ymm0[4,5,6,7]
3165 ; AVX2-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3166 ; AVX2-FAST-NEXT: vmovdqa 64(%rdx), %xmm3
3167 ; AVX2-FAST-NEXT: vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3168 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm0 = xmm6[1,2,2,3]
3169 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,2,2,3]
3170 ; AVX2-FAST-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm3[0],xmm0[0],xmm3[1],xmm0[1]
3171 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,2,1]
3172 ; AVX2-FAST-NEXT: vmovdqa 64(%rsi), %xmm6
3173 ; AVX2-FAST-NEXT: vmovdqa 64(%rdi), %xmm9
3174 ; AVX2-FAST-NEXT: vpunpckhdq {{.*#+}} xmm3 = xmm9[2],xmm6[2],xmm9[3],xmm6[3]
3175 ; AVX2-FAST-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3176 ; AVX2-FAST-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
3177 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm3[4,5],ymm0[6,7]
3178 ; AVX2-FAST-NEXT: vmovdqa 64(%r8), %xmm14
3179 ; AVX2-FAST-NEXT: vpmovzxdq {{.*#+}} xmm3 = xmm14[0],zero,xmm14[1],zero
3180 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm3[2,3],ymm0[4,5,6,7]
3181 ; AVX2-FAST-NEXT: vpbroadcastd 68(%r9), %ymm3
3182 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm3[3],ymm0[4,5,6,7]
3183 ; AVX2-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3184 ; AVX2-FAST-NEXT: vmovdqa 96(%rcx), %xmm15
3185 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm0 = xmm15[1,2,2,3]
3186 ; AVX2-FAST-NEXT: vmovdqa %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3187 ; AVX2-FAST-NEXT: vmovdqa 96(%rdx), %xmm3
3188 ; AVX2-FAST-NEXT: vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3189 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,2,2,3]
3190 ; AVX2-FAST-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm3[0],xmm0[0],xmm3[1],xmm0[1]
3191 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm10 = ymm0[0,1,2,1]
3192 ; AVX2-FAST-NEXT: vmovdqa 96(%rsi), %xmm3
3193 ; AVX2-FAST-NEXT: vmovdqa 96(%rdi), %xmm0
3194 ; AVX2-FAST-NEXT: vpunpckhdq {{.*#+}} xmm5 = xmm0[2],xmm3[2],xmm0[3],xmm3[3]
3195 ; AVX2-FAST-NEXT: vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3196 ; AVX2-FAST-NEXT: vinserti128 $1, %xmm5, %ymm0, %ymm11
3197 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm10 = ymm10[0,1,2,3],ymm11[4,5],ymm10[6,7]
3198 ; AVX2-FAST-NEXT: vmovdqa 96(%r8), %xmm11
3199 ; AVX2-FAST-NEXT: vpmovzxdq {{.*#+}} xmm12 = xmm11[0],zero,xmm11[1],zero
3200 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm10 = ymm10[0,1],ymm12[2,3],ymm10[4,5,6,7]
3201 ; AVX2-FAST-NEXT: vpbroadcastd 100(%r9), %ymm12
3202 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm5 = ymm10[0,1,2],ymm12[3],ymm10[4,5,6,7]
3203 ; AVX2-FAST-NEXT: vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3204 ; AVX2-FAST-NEXT: vpbroadcastd (%rcx), %xmm10
3205 ; AVX2-FAST-NEXT: vpbroadcastd (%rdx), %xmm12
3206 ; AVX2-FAST-NEXT: vpunpckldq {{.*#+}} xmm10 = xmm12[0],xmm10[0],xmm12[1],xmm10[1]
3207 ; AVX2-FAST-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm7[0],xmm2[0],xmm7[1],xmm2[1]
3208 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,1,2,1]
3209 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1],ymm10[2,3],ymm2[4,5,6,7]
3210 ; AVX2-FAST-NEXT: vpbroadcastq %xmm8, %ymm7
3211 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm7[4,5],ymm2[6,7]
3212 ; AVX2-FAST-NEXT: vpbroadcastd (%r9), %ymm7
3213 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm7[5],ymm2[6,7]
3214 ; AVX2-FAST-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3215 ; AVX2-FAST-NEXT: vmovdqa (%rdx), %ymm7
3216 ; AVX2-FAST-NEXT: vmovdqa (%rcx), %ymm5
3217 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} ymm8 = ymm5[0,1,2,2,4,5,6,6]
3218 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} ymm10 = ymm7[1,1,2,3,5,5,6,7]
3219 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm8 = ymm10[0],ymm8[1],ymm10[2],ymm8[3],ymm10[4],ymm8[5],ymm10[6],ymm8[7]
3220 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,1,2,3]
3221 ; AVX2-FAST-NEXT: vmovdqa (%rdi), %ymm2
3222 ; AVX2-FAST-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3223 ; AVX2-FAST-NEXT: vmovdqa (%rsi), %ymm10
3224 ; AVX2-FAST-NEXT: vmovdqu %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3225 ; AVX2-FAST-NEXT: vpunpckhdq {{.*#+}} ymm2 = ymm2[2],ymm10[2],ymm2[3],ymm10[3],ymm2[6],ymm10[6],ymm2[7],ymm10[7]
3226 ; AVX2-FAST-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3227 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm2[4,5],ymm8[6,7]
3228 ; AVX2-FAST-NEXT: vpmovzxdq {{.*#+}} xmm12 = mem[0],zero,mem[1],zero
3229 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm8 = ymm8[0,1],ymm12[2,3],ymm8[4,5,6,7]
3230 ; AVX2-FAST-NEXT: vpbroadcastd 20(%r9), %ymm12
3231 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm8[0,1,2],ymm12[3],ymm8[4,5,6,7]
3232 ; AVX2-FAST-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3233 ; AVX2-FAST-NEXT: vpbroadcastd 32(%rcx), %xmm8
3234 ; AVX2-FAST-NEXT: vpbroadcastd 32(%rdx), %xmm12
3235 ; AVX2-FAST-NEXT: vpunpckldq {{.*#+}} xmm8 = xmm12[0],xmm8[0],xmm12[1],xmm8[1]
3236 ; AVX2-FAST-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1]
3237 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,1,2,1]
3238 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1],ymm8[2,3],ymm1[4,5,6,7]
3239 ; AVX2-FAST-NEXT: vpbroadcastq %xmm13, %ymm4
3240 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm4[4,5],ymm1[6,7]
3241 ; AVX2-FAST-NEXT: vpbroadcastd 32(%r9), %ymm4
3242 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm4[5],ymm1[6,7]
3243 ; AVX2-FAST-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3244 ; AVX2-FAST-NEXT: vmovdqa 32(%rdx), %ymm10
3245 ; AVX2-FAST-NEXT: vmovdqa 32(%rcx), %ymm8
3246 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} ymm1 = ymm8[0,1,2,2,4,5,6,6]
3247 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} ymm4 = ymm10[1,1,2,3,5,5,6,7]
3248 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm1 = ymm4[0],ymm1[1],ymm4[2],ymm1[3],ymm4[4],ymm1[5],ymm4[6],ymm1[7]
3249 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,1,2,3]
3250 ; AVX2-FAST-NEXT: vmovdqa 32(%rdi), %ymm2
3251 ; AVX2-FAST-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3252 ; AVX2-FAST-NEXT: vmovdqa 32(%rsi), %ymm4
3253 ; AVX2-FAST-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3254 ; AVX2-FAST-NEXT: vpunpckhdq {{.*#+}} ymm2 = ymm2[2],ymm4[2],ymm2[3],ymm4[3],ymm2[6],ymm4[6],ymm2[7],ymm4[7]
3255 ; AVX2-FAST-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3256 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5],ymm1[6,7]
3257 ; AVX2-FAST-NEXT: vpmovzxdq {{.*#+}} xmm4 = mem[0],zero,mem[1],zero
3258 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1],ymm4[2,3],ymm1[4,5,6,7]
3259 ; AVX2-FAST-NEXT: vpbroadcastd 52(%r9), %ymm4
3260 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm4[3],ymm1[4,5,6,7]
3261 ; AVX2-FAST-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3262 ; AVX2-FAST-NEXT: vpbroadcastd 64(%rcx), %xmm1
3263 ; AVX2-FAST-NEXT: vpbroadcastd 64(%rdx), %xmm4
3264 ; AVX2-FAST-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
3265 ; AVX2-FAST-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm9[0],xmm6[0],xmm9[1],xmm6[1]
3266 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,1,2,1]
3267 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm1 = ymm4[0,1],ymm1[2,3],ymm4[4,5,6,7]
3268 ; AVX2-FAST-NEXT: vpbroadcastq %xmm14, %ymm4
3269 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm4[4,5],ymm1[6,7]
3270 ; AVX2-FAST-NEXT: vpbroadcastd 64(%r9), %ymm4
3271 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm4[5],ymm1[6,7]
3272 ; AVX2-FAST-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3273 ; AVX2-FAST-NEXT: vmovdqa 64(%rdx), %ymm6
3274 ; AVX2-FAST-NEXT: vmovdqa 64(%rcx), %ymm4
3275 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} ymm1 = ymm4[0,1,2,2,4,5,6,6]
3276 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} ymm9 = ymm6[1,1,2,3,5,5,6,7]
3277 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm1 = ymm9[0],ymm1[1],ymm9[2],ymm1[3],ymm9[4],ymm1[5],ymm9[6],ymm1[7]
3278 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,1,2,3]
3279 ; AVX2-FAST-NEXT: vmovdqa 64(%rdi), %ymm2
3280 ; AVX2-FAST-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3281 ; AVX2-FAST-NEXT: vmovdqa 64(%rsi), %ymm12
3282 ; AVX2-FAST-NEXT: vpunpckhdq {{.*#+}} ymm2 = ymm2[2],ymm12[2],ymm2[3],ymm12[3],ymm2[6],ymm12[6],ymm2[7],ymm12[7]
3283 ; AVX2-FAST-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3284 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5],ymm1[6,7]
3285 ; AVX2-FAST-NEXT: vpmovzxdq {{.*#+}} xmm9 = mem[0],zero,mem[1],zero
3286 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1],ymm9[2,3],ymm1[4,5,6,7]
3287 ; AVX2-FAST-NEXT: vpbroadcastd 84(%r9), %ymm9
3288 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm9[3],ymm1[4,5,6,7]
3289 ; AVX2-FAST-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3290 ; AVX2-FAST-NEXT: vpbroadcastd %xmm15, %xmm1
3291 ; AVX2-FAST-NEXT: vpbroadcastd {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Folded Reload
3292 ; AVX2-FAST-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm9[0],xmm1[0],xmm9[1],xmm1[1]
3293 ; AVX2-FAST-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
3294 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,2,1]
3295 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
3296 ; AVX2-FAST-NEXT: vpbroadcastq %xmm11, %ymm1
3297 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
3298 ; AVX2-FAST-NEXT: vpbroadcastd 96(%r9), %ymm1
3299 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
3300 ; AVX2-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3301 ; AVX2-FAST-NEXT: vmovdqa 96(%rdx), %ymm3
3302 ; AVX2-FAST-NEXT: vmovdqa 96(%rcx), %ymm2
3303 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} ymm0 = ymm2[0,1,2,2,4,5,6,6]
3304 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} ymm9 = ymm3[1,1,2,3,5,5,6,7]
3305 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm9[0],ymm0[1],ymm9[2],ymm0[3],ymm9[4],ymm0[5],ymm9[6],ymm0[7]
3306 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,1,2,3]
3307 ; AVX2-FAST-NEXT: vmovdqa 96(%rdi), %ymm1
3308 ; AVX2-FAST-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3309 ; AVX2-FAST-NEXT: vmovdqa 96(%rsi), %ymm9
3310 ; AVX2-FAST-NEXT: vpunpckhdq {{.*#+}} ymm1 = ymm1[2],ymm9[2],ymm1[3],ymm9[3],ymm1[6],ymm9[6],ymm1[7],ymm9[7]
3311 ; AVX2-FAST-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3312 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
3313 ; AVX2-FAST-NEXT: vpmovzxdq {{.*#+}} xmm13 = mem[0],zero,mem[1],zero
3314 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm13[2,3],ymm0[4,5,6,7]
3315 ; AVX2-FAST-NEXT: vpbroadcastd 116(%r9), %ymm13
3316 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm13[3],ymm0[4,5,6,7]
3317 ; AVX2-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3318 ; AVX2-FAST-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3319 ; AVX2-FAST-NEXT: vpunpckhdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
3320 ; AVX2-FAST-NEXT: # xmm0 = xmm0[2],mem[2],xmm0[3],mem[3]
3321 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
3322 ; AVX2-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
3323 ; AVX2-FAST-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm13
3324 ; AVX2-FAST-NEXT: vmovdqa (%r8), %ymm0
3325 ; AVX2-FAST-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [2,2,3,3,2,2,3,3]
3326 ; AVX2-FAST-NEXT: # ymm1 = mem[0,1,0,1]
3327 ; AVX2-FAST-NEXT: vpermd %ymm0, %ymm1, %ymm14
3328 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm13 = ymm14[0,1],ymm13[2,3,4,5],ymm14[6,7]
3329 ; AVX2-FAST-NEXT: vmovdqa (%r9), %ymm14
3330 ; AVX2-FAST-NEXT: vpermd %ymm14, %ymm1, %ymm11
3331 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm11 = ymm13[0],ymm11[1],ymm13[2,3,4,5,6],ymm11[7]
3332 ; AVX2-FAST-NEXT: vmovdqu %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3333 ; AVX2-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
3334 ; AVX2-FAST-NEXT: vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm11 # 32-byte Folded Reload
3335 ; AVX2-FAST-NEXT: # ymm11 = ymm11[0],mem[0],ymm11[1],mem[1],ymm11[4],mem[4],ymm11[5],mem[5]
3336 ; AVX2-FAST-NEXT: vpunpckldq {{.*#+}} ymm13 = ymm7[0],ymm5[0],ymm7[1],ymm5[1],ymm7[4],ymm5[4],ymm7[5],ymm5[5]
3337 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm13 = ymm13[2,2,2,2]
3338 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,1,2,3]
3339 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm11 = ymm11[0,1],ymm13[2,3],ymm11[4,5,6,7]
3340 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm11 = ymm11[0,1,2,3],ymm0[4,5],ymm11[6,7]
3341 ; AVX2-FAST-NEXT: vpbroadcastd 16(%r9), %ymm13
3342 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm11 = ymm11[0,1,2,3,4],ymm13[5],ymm11[6,7]
3343 ; AVX2-FAST-NEXT: vmovdqu %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3344 ; AVX2-FAST-NEXT: vpunpckhdq {{.*#+}} ymm5 = ymm7[2],ymm5[2],ymm7[3],ymm5[3],ymm7[6],ymm5[6],ymm7[7],ymm5[7]
3345 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} ymm5 = ymm5[2,3,2,3,6,7,6,7]
3346 ; AVX2-FAST-NEXT: vperm2i128 $19, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm5 # 32-byte Folded Reload
3347 ; AVX2-FAST-NEXT: # ymm5 = mem[2,3],ymm5[2,3]
3348 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm7 = [6,5,3,3,6,5,7,7]
3349 ; AVX2-FAST-NEXT: vpermd %ymm0, %ymm7, %ymm0
3350 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm5[2,3,4,5],ymm0[6,7]
3351 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm15 = [4,6,2,3,4,6,6,7]
3352 ; AVX2-FAST-NEXT: vpermd %ymm14, %ymm15, %ymm5
3353 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm5[1],ymm0[2,3,4,5,6],ymm5[7]
3354 ; AVX2-FAST-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
3355 ; AVX2-FAST-NEXT: vpunpckhdq (%rsp), %xmm5, %xmm5 # 16-byte Folded Reload
3356 ; AVX2-FAST-NEXT: # xmm5 = xmm5[2],mem[2],xmm5[3],mem[3]
3357 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm5 = xmm5[2,3,2,3]
3358 ; AVX2-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
3359 ; AVX2-FAST-NEXT: vinserti128 $1, %xmm5, %ymm11, %ymm5
3360 ; AVX2-FAST-NEXT: vmovdqa 32(%r8), %ymm11
3361 ; AVX2-FAST-NEXT: vpermd %ymm11, %ymm1, %ymm13
3362 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm5 = ymm13[0,1],ymm5[2,3,4,5],ymm13[6,7]
3363 ; AVX2-FAST-NEXT: vmovdqa 32(%r9), %ymm14
3364 ; AVX2-FAST-NEXT: vpermd %ymm14, %ymm1, %ymm13
3365 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm5 = ymm5[0],ymm13[1],ymm5[2,3,4,5,6],ymm13[7]
3366 ; AVX2-FAST-NEXT: vmovdqu %ymm5, (%rsp) # 32-byte Spill
3367 ; AVX2-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
3368 ; AVX2-FAST-NEXT: vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm5 # 32-byte Folded Reload
3369 ; AVX2-FAST-NEXT: # ymm5 = ymm5[0],mem[0],ymm5[1],mem[1],ymm5[4],mem[4],ymm5[5],mem[5]
3370 ; AVX2-FAST-NEXT: vpunpckldq {{.*#+}} ymm13 = ymm10[0],ymm8[0],ymm10[1],ymm8[1],ymm10[4],ymm8[4],ymm10[5],ymm8[5]
3371 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm13 = ymm13[2,2,2,2]
3372 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm5 = ymm5[2,1,2,3]
3373 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm5 = ymm5[0,1],ymm13[2,3],ymm5[4,5,6,7]
3374 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm11[4,5],ymm5[6,7]
3375 ; AVX2-FAST-NEXT: vpbroadcastd 48(%r9), %ymm13
3376 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm13 = ymm5[0,1,2,3,4],ymm13[5],ymm5[6,7]
3377 ; AVX2-FAST-NEXT: vpunpckhdq {{.*#+}} ymm5 = ymm10[2],ymm8[2],ymm10[3],ymm8[3],ymm10[6],ymm8[6],ymm10[7],ymm8[7]
3378 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} ymm5 = ymm5[2,3,2,3,6,7,6,7]
3379 ; AVX2-FAST-NEXT: vperm2i128 $19, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm5 # 32-byte Folded Reload
3380 ; AVX2-FAST-NEXT: # ymm5 = mem[2,3],ymm5[2,3]
3381 ; AVX2-FAST-NEXT: vpermd %ymm11, %ymm7, %ymm8
3382 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm5 = ymm8[0,1],ymm5[2,3,4,5],ymm8[6,7]
3383 ; AVX2-FAST-NEXT: vpermd %ymm14, %ymm15, %ymm8
3384 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm5 = ymm5[0],ymm8[1],ymm5[2,3,4,5,6],ymm8[7]
3385 ; AVX2-FAST-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
3386 ; AVX2-FAST-NEXT: vpunpckhdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm8, %xmm8 # 16-byte Folded Reload
3387 ; AVX2-FAST-NEXT: # xmm8 = xmm8[2],mem[2],xmm8[3],mem[3]
3388 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm8 = xmm8[2,3,2,3]
3389 ; AVX2-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
3390 ; AVX2-FAST-NEXT: vinserti128 $1, %xmm8, %ymm10, %ymm8
3391 ; AVX2-FAST-NEXT: vmovdqa 64(%r8), %ymm10
3392 ; AVX2-FAST-NEXT: vpermd %ymm10, %ymm1, %ymm11
3393 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm8 = ymm11[0,1],ymm8[2,3,4,5],ymm11[6,7]
3394 ; AVX2-FAST-NEXT: vmovdqa 64(%r9), %ymm11
3395 ; AVX2-FAST-NEXT: vpermd %ymm11, %ymm1, %ymm14
3396 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm8 = ymm8[0],ymm14[1],ymm8[2,3,4,5,6],ymm14[7]
3397 ; AVX2-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
3398 ; AVX2-FAST-NEXT: vpunpckldq {{.*#+}} ymm12 = ymm14[0],ymm12[0],ymm14[1],ymm12[1],ymm14[4],ymm12[4],ymm14[5],ymm12[5]
3399 ; AVX2-FAST-NEXT: vpunpckldq {{.*#+}} ymm14 = ymm6[0],ymm4[0],ymm6[1],ymm4[1],ymm6[4],ymm4[4],ymm6[5],ymm4[5]
3400 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm14 = ymm14[2,2,2,2]
3401 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm12 = ymm12[2,1,2,3]
3402 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm12 = ymm12[0,1],ymm14[2,3],ymm12[4,5,6,7]
3403 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm12 = ymm12[0,1,2,3],ymm10[4,5],ymm12[6,7]
3404 ; AVX2-FAST-NEXT: vpbroadcastd 80(%r9), %ymm14
3405 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm12 = ymm12[0,1,2,3,4],ymm14[5],ymm12[6,7]
3406 ; AVX2-FAST-NEXT: vpunpckhdq {{.*#+}} ymm4 = ymm6[2],ymm4[2],ymm6[3],ymm4[3],ymm6[6],ymm4[6],ymm6[7],ymm4[7]
3407 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} ymm4 = ymm4[2,3,2,3,6,7,6,7]
3408 ; AVX2-FAST-NEXT: vperm2i128 $19, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm4 # 32-byte Folded Reload
3409 ; AVX2-FAST-NEXT: # ymm4 = mem[2,3],ymm4[2,3]
3410 ; AVX2-FAST-NEXT: vpermd %ymm10, %ymm7, %ymm6
3411 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm4 = ymm6[0,1],ymm4[2,3,4,5],ymm6[6,7]
3412 ; AVX2-FAST-NEXT: vpermd %ymm11, %ymm15, %ymm6
3413 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0],ymm6[1],ymm4[2,3,4,5,6],ymm6[7]
3414 ; AVX2-FAST-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
3415 ; AVX2-FAST-NEXT: vpunpckhdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm6, %xmm6 # 16-byte Folded Reload
3416 ; AVX2-FAST-NEXT: # xmm6 = xmm6[2],mem[2],xmm6[3],mem[3]
3417 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm6 = xmm6[2,3,2,3]
3418 ; AVX2-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
3419 ; AVX2-FAST-NEXT: vinserti128 $1, %xmm6, %ymm10, %ymm6
3420 ; AVX2-FAST-NEXT: vmovdqa 96(%r8), %ymm10
3421 ; AVX2-FAST-NEXT: vpermd %ymm10, %ymm1, %ymm11
3422 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm6 = ymm11[0,1],ymm6[2,3,4,5],ymm11[6,7]
3423 ; AVX2-FAST-NEXT: vmovdqa 96(%r9), %ymm11
3424 ; AVX2-FAST-NEXT: vpermd %ymm11, %ymm1, %ymm1
3425 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm1 = ymm6[0],ymm1[1],ymm6[2,3,4,5,6],ymm1[7]
3426 ; AVX2-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
3427 ; AVX2-FAST-NEXT: vpunpckldq {{.*#+}} ymm6 = ymm6[0],ymm9[0],ymm6[1],ymm9[1],ymm6[4],ymm9[4],ymm6[5],ymm9[5]
3428 ; AVX2-FAST-NEXT: vpunpckldq {{.*#+}} ymm9 = ymm3[0],ymm2[0],ymm3[1],ymm2[1],ymm3[4],ymm2[4],ymm3[5],ymm2[5]
3429 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,2,2,2]
3430 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm6 = ymm6[2,1,2,3]
3431 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm6 = ymm6[0,1],ymm9[2,3],ymm6[4,5,6,7]
3432 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm10[4,5],ymm6[6,7]
3433 ; AVX2-FAST-NEXT: vpbroadcastd 112(%r9), %ymm9
3434 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3,4],ymm9[5],ymm6[6,7]
3435 ; AVX2-FAST-NEXT: vpunpckhdq {{.*#+}} ymm2 = ymm3[2],ymm2[2],ymm3[3],ymm2[3],ymm3[6],ymm2[6],ymm3[7],ymm2[7]
3436 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} ymm2 = ymm2[2,3,2,3,6,7,6,7]
3437 ; AVX2-FAST-NEXT: vperm2i128 $19, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
3438 ; AVX2-FAST-NEXT: # ymm2 = mem[2,3],ymm2[2,3]
3439 ; AVX2-FAST-NEXT: vpermd %ymm10, %ymm7, %ymm3
3440 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3,4,5],ymm3[6,7]
3441 ; AVX2-FAST-NEXT: vpermd %ymm11, %ymm15, %ymm3
3442 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0],ymm3[1],ymm2[2,3,4,5,6],ymm3[7]
3443 ; AVX2-FAST-NEXT: movq {{[0-9]+}}(%rsp), %rax
3444 ; AVX2-FAST-NEXT: vmovdqa %ymm2, 736(%rax)
3445 ; AVX2-FAST-NEXT: vmovdqa %ymm6, 672(%rax)
3446 ; AVX2-FAST-NEXT: vmovdqa %ymm1, 640(%rax)
3447 ; AVX2-FAST-NEXT: vmovdqa %ymm4, 544(%rax)
3448 ; AVX2-FAST-NEXT: vmovdqa %ymm12, 480(%rax)
3449 ; AVX2-FAST-NEXT: vmovdqa %ymm8, 448(%rax)
3450 ; AVX2-FAST-NEXT: vmovdqa %ymm5, 352(%rax)
3451 ; AVX2-FAST-NEXT: vmovdqa %ymm13, 288(%rax)
3452 ; AVX2-FAST-NEXT: vmovups (%rsp), %ymm1 # 32-byte Reload
3453 ; AVX2-FAST-NEXT: vmovaps %ymm1, 256(%rax)
3454 ; AVX2-FAST-NEXT: vmovdqa %ymm0, 160(%rax)
3455 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3456 ; AVX2-FAST-NEXT: vmovaps %ymm0, 96(%rax)
3457 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3458 ; AVX2-FAST-NEXT: vmovaps %ymm0, 64(%rax)
3459 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3460 ; AVX2-FAST-NEXT: vmovaps %ymm0, 704(%rax)
3461 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3462 ; AVX2-FAST-NEXT: vmovaps %ymm0, 576(%rax)
3463 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3464 ; AVX2-FAST-NEXT: vmovaps %ymm0, 512(%rax)
3465 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3466 ; AVX2-FAST-NEXT: vmovaps %ymm0, 384(%rax)
3467 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3468 ; AVX2-FAST-NEXT: vmovaps %ymm0, 320(%rax)
3469 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3470 ; AVX2-FAST-NEXT: vmovaps %ymm0, 192(%rax)
3471 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3472 ; AVX2-FAST-NEXT: vmovaps %ymm0, 128(%rax)
3473 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3474 ; AVX2-FAST-NEXT: vmovaps %ymm0, (%rax)
3475 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3476 ; AVX2-FAST-NEXT: vmovaps %ymm0, 608(%rax)
3477 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3478 ; AVX2-FAST-NEXT: vmovaps %ymm0, 416(%rax)
3479 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3480 ; AVX2-FAST-NEXT: vmovaps %ymm0, 224(%rax)
3481 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3482 ; AVX2-FAST-NEXT: vmovaps %ymm0, 32(%rax)
3483 ; AVX2-FAST-NEXT: addq $872, %rsp # imm = 0x368
3484 ; AVX2-FAST-NEXT: vzeroupper
3485 ; AVX2-FAST-NEXT: retq
3487 ; AVX2-FAST-PERLANE-LABEL: store_i32_stride6_vf32:
3488 ; AVX2-FAST-PERLANE: # %bb.0:
3489 ; AVX2-FAST-PERLANE-NEXT: subq $904, %rsp # imm = 0x388
3490 ; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rsi), %xmm0
3491 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 32(%rsi), %xmm2
3492 ; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rdi), %xmm1
3493 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 32(%rdi), %xmm3
3494 ; AVX2-FAST-PERLANE-NEXT: vpunpckhdq {{.*#+}} xmm4 = xmm1[2],xmm0[2],xmm1[3],xmm0[3]
3495 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3496 ; AVX2-FAST-PERLANE-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4
3497 ; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rcx), %xmm5
3498 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3499 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 32(%rcx), %xmm8
3500 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3501 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 64(%rcx), %xmm7
3502 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3503 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm5 = xmm5[1,2,2,3]
3504 ; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rdx), %xmm6
3505 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3506 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 32(%rdx), %xmm9
3507 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3508 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm6 = xmm6[1,2,2,3]
3509 ; AVX2-FAST-PERLANE-NEXT: vpunpckldq {{.*#+}} xmm5 = xmm6[0],xmm5[0],xmm6[1],xmm5[1]
3510 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm5 = ymm5[0,1,2,1]
3511 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm4 = ymm5[0,1,2,3],ymm4[4,5],ymm5[6,7]
3512 ; AVX2-FAST-PERLANE-NEXT: vmovdqa (%r8), %xmm10
3513 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 32(%r8), %xmm6
3514 ; AVX2-FAST-PERLANE-NEXT: vpmovzxdq {{.*#+}} xmm5 = xmm10[0],zero,xmm10[1],zero
3515 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %xmm10, %xmm11
3516 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3517 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1],ymm5[2,3],ymm4[4,5,6,7]
3518 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastd 4(%r9), %ymm5
3519 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1,2],ymm5[3],ymm4[4,5,6,7]
3520 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3521 ; AVX2-FAST-PERLANE-NEXT: vpunpckhdq {{.*#+}} xmm10 = xmm3[2],xmm2[2],xmm3[3],xmm2[3]
3522 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3523 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm4 = xmm8[1,2,2,3]
3524 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm5 = xmm9[1,2,2,3]
3525 ; AVX2-FAST-PERLANE-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm5[0],xmm4[0],xmm5[1],xmm4[1]
3526 ; AVX2-FAST-PERLANE-NEXT: vinserti128 $1, %xmm10, %ymm0, %ymm5
3527 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,1,2,1]
3528 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm5[4,5],ymm4[6,7]
3529 ; AVX2-FAST-PERLANE-NEXT: vpmovzxdq {{.*#+}} xmm5 = xmm6[0],zero,xmm6[1],zero
3530 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %xmm6, %xmm12
3531 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3532 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1],ymm5[2,3],ymm4[4,5,6,7]
3533 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastd 36(%r9), %ymm5
3534 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1,2],ymm5[3],ymm4[4,5,6,7]
3535 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3536 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 64(%rdx), %xmm5
3537 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3538 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm4 = xmm7[1,2,2,3]
3539 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm5 = xmm5[1,2,2,3]
3540 ; AVX2-FAST-PERLANE-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm5[0],xmm4[0],xmm5[1],xmm4[1]
3541 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm6 = ymm4[0,1,2,1]
3542 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 64(%rsi), %xmm4
3543 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 64(%rdi), %xmm5
3544 ; AVX2-FAST-PERLANE-NEXT: vpunpckhdq {{.*#+}} xmm7 = xmm5[2],xmm4[2],xmm5[3],xmm4[3]
3545 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3546 ; AVX2-FAST-PERLANE-NEXT: vinserti128 $1, %xmm7, %ymm0, %ymm7
3547 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm7[4,5],ymm6[6,7]
3548 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 64(%r8), %xmm8
3549 ; AVX2-FAST-PERLANE-NEXT: vpmovzxdq {{.*#+}} xmm7 = xmm8[0],zero,xmm8[1],zero
3550 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %xmm8, %xmm15
3551 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3552 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm6 = ymm6[0,1],ymm7[2,3],ymm6[4,5,6,7]
3553 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastd 68(%r9), %ymm7
3554 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm6 = ymm6[0,1,2],ymm7[3],ymm6[4,5,6,7]
3555 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3556 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 96(%rcx), %xmm6
3557 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3558 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm6 = xmm6[1,2,2,3]
3559 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 96(%rdx), %xmm7
3560 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3561 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm7 = xmm7[1,2,2,3]
3562 ; AVX2-FAST-PERLANE-NEXT: vpunpckldq {{.*#+}} xmm6 = xmm7[0],xmm6[0],xmm7[1],xmm6[1]
3563 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm8 = ymm6[0,1,2,1]
3564 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 96(%rsi), %xmm14
3565 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 96(%rdi), %xmm7
3566 ; AVX2-FAST-PERLANE-NEXT: vpunpckhdq {{.*#+}} xmm6 = xmm7[2],xmm14[2],xmm7[3],xmm14[3]
3567 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3568 ; AVX2-FAST-PERLANE-NEXT: vinserti128 $1, %xmm6, %ymm0, %ymm9
3569 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm9[4,5],ymm8[6,7]
3570 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 96(%r8), %xmm6
3571 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3572 ; AVX2-FAST-PERLANE-NEXT: vpmovzxdq {{.*#+}} xmm9 = xmm6[0],zero,xmm6[1],zero
3573 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm8 = ymm8[0,1],ymm9[2,3],ymm8[4,5,6,7]
3574 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastd 100(%r9), %ymm9
3575 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm6 = ymm8[0,1,2],ymm9[3],ymm8[4,5,6,7]
3576 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3577 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastd (%rcx), %xmm8
3578 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastd (%rdx), %xmm9
3579 ; AVX2-FAST-PERLANE-NEXT: vpunpckldq {{.*#+}} xmm8 = xmm9[0],xmm8[0],xmm9[1],xmm8[1]
3580 ; AVX2-FAST-PERLANE-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
3581 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,2,1]
3582 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm8[2,3],ymm0[4,5,6,7]
3583 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastq %xmm11, %ymm1
3584 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
3585 ; AVX2-FAST-PERLANE-NEXT: vmovdqa (%r9), %xmm1
3586 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3587 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastd %xmm1, %ymm1
3588 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
3589 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3590 ; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rdx), %ymm0
3591 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3592 ; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rcx), %ymm6
3593 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm8 = ymm6[0,1,2,2,4,5,6,6]
3594 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm9 = ymm0[1,1,2,3,5,5,6,7]
3595 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm8 = ymm9[0],ymm8[1],ymm9[2],ymm8[3],ymm9[4],ymm8[5],ymm9[6],ymm8[7]
3596 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm10 = ymm8[2,1,2,3]
3597 ; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rdi), %ymm9
3598 ; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rsi), %ymm8
3599 ; AVX2-FAST-PERLANE-NEXT: vpunpckhdq {{.*#+}} ymm0 = ymm9[2],ymm8[2],ymm9[3],ymm8[3],ymm9[6],ymm8[6],ymm9[7],ymm8[7]
3600 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3601 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm10 = ymm10[0,1,2,3],ymm0[4,5],ymm10[6,7]
3602 ; AVX2-FAST-PERLANE-NEXT: vpmovzxdq {{.*#+}} xmm11 = mem[0],zero,mem[1],zero
3603 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm10 = ymm10[0,1],ymm11[2,3],ymm10[4,5,6,7]
3604 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastd 20(%r9), %ymm11
3605 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm10[0,1,2],ymm11[3],ymm10[4,5,6,7]
3606 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3607 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastd 32(%rcx), %xmm10
3608 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastd 32(%rdx), %xmm11
3609 ; AVX2-FAST-PERLANE-NEXT: vpunpckldq {{.*#+}} xmm10 = xmm11[0],xmm10[0],xmm11[1],xmm10[1]
3610 ; AVX2-FAST-PERLANE-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
3611 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,1,2,1]
3612 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1],ymm10[2,3],ymm2[4,5,6,7]
3613 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastq %xmm12, %ymm3
3614 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5],ymm2[6,7]
3615 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 32(%r9), %xmm0
3616 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3617 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastd %xmm0, %ymm3
3618 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3,4],ymm3[5],ymm2[6,7]
3619 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3620 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 32(%rdx), %ymm3
3621 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 32(%rcx), %ymm2
3622 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm10 = ymm2[0,1,2,2,4,5,6,6]
3623 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm11 = ymm3[1,1,2,3,5,5,6,7]
3624 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm10 = ymm11[0],ymm10[1],ymm11[2],ymm10[3],ymm11[4],ymm10[5],ymm11[6],ymm10[7]
3625 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm12 = ymm10[2,1,2,3]
3626 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 32(%rdi), %ymm11
3627 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 32(%rsi), %ymm10
3628 ; AVX2-FAST-PERLANE-NEXT: vpunpckhdq {{.*#+}} ymm0 = ymm11[2],ymm10[2],ymm11[3],ymm10[3],ymm11[6],ymm10[6],ymm11[7],ymm10[7]
3629 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3630 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm12 = ymm12[0,1,2,3],ymm0[4,5],ymm12[6,7]
3631 ; AVX2-FAST-PERLANE-NEXT: vpmovzxdq {{.*#+}} xmm13 = mem[0],zero,mem[1],zero
3632 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm12 = ymm12[0,1],ymm13[2,3],ymm12[4,5,6,7]
3633 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastd 52(%r9), %ymm13
3634 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm12[0,1,2],ymm13[3],ymm12[4,5,6,7]
3635 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3636 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastd 64(%rcx), %xmm12
3637 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastd 64(%rdx), %xmm13
3638 ; AVX2-FAST-PERLANE-NEXT: vpunpckldq {{.*#+}} xmm12 = xmm13[0],xmm12[0],xmm13[1],xmm12[1]
3639 ; AVX2-FAST-PERLANE-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm5[0],xmm4[0],xmm5[1],xmm4[1]
3640 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,1,2,1]
3641 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1],ymm12[2,3],ymm4[4,5,6,7]
3642 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastq %xmm15, %ymm5
3643 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm5[4,5],ymm4[6,7]
3644 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 64(%r9), %xmm0
3645 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill
3646 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastd %xmm0, %ymm5
3647 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm4[0,1,2,3,4],ymm5[5],ymm4[6,7]
3648 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3649 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 64(%rdx), %ymm5
3650 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 64(%rcx), %ymm4
3651 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm12 = ymm4[0,1,2,2,4,5,6,6]
3652 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm13 = ymm5[1,1,2,3,5,5,6,7]
3653 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm12 = ymm13[0],ymm12[1],ymm13[2],ymm12[3],ymm13[4],ymm12[5],ymm13[6],ymm12[7]
3654 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm0 = ymm12[2,1,2,3]
3655 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 64(%rdi), %ymm13
3656 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 64(%rsi), %ymm12
3657 ; AVX2-FAST-PERLANE-NEXT: vpunpckhdq {{.*#+}} ymm15 = ymm13[2],ymm12[2],ymm13[3],ymm12[3],ymm13[6],ymm12[6],ymm13[7],ymm12[7]
3658 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm15, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3659 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm15[4,5],ymm0[6,7]
3660 ; AVX2-FAST-PERLANE-NEXT: vpmovzxdq {{.*#+}} xmm15 = mem[0],zero,mem[1],zero
3661 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm15[2,3],ymm0[4,5,6,7]
3662 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastd 84(%r9), %ymm15
3663 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm15[3],ymm0[4,5,6,7]
3664 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3665 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
3666 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastd {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Folded Reload
3667 ; AVX2-FAST-PERLANE-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm15[0],xmm0[0],xmm15[1],xmm0[1]
3668 ; AVX2-FAST-PERLANE-NEXT: vpunpckldq {{.*#+}} xmm7 = xmm7[0],xmm14[0],xmm7[1],xmm14[1]
3669 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm7 = ymm7[0,1,2,1]
3670 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm7[0,1],ymm0[2,3],ymm7[4,5,6,7]
3671 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastq {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 16-byte Folded Reload
3672 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm7[4,5],ymm0[6,7]
3673 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 96(%r9), %xmm7
3674 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3675 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastd %xmm7, %ymm7
3676 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm7[5],ymm0[6,7]
3677 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3678 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 96(%rdx), %ymm0
3679 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3680 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 96(%rcx), %ymm7
3681 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm14 = ymm7[0,1,2,2,4,5,6,6]
3682 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm15 = ymm0[1,1,2,3,5,5,6,7]
3683 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm14 = ymm15[0],ymm14[1],ymm15[2],ymm14[3],ymm15[4],ymm14[5],ymm15[6],ymm14[7]
3684 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm14 = ymm14[2,1,2,3]
3685 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 96(%rdi), %ymm1
3686 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3687 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 96(%rsi), %ymm0
3688 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3689 ; AVX2-FAST-PERLANE-NEXT: vpunpckhdq {{.*#+}} ymm15 = ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[6],ymm0[6],ymm1[7],ymm0[7]
3690 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm15, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3691 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm14 = ymm14[0,1,2,3],ymm15[4,5],ymm14[6,7]
3692 ; AVX2-FAST-PERLANE-NEXT: vpmovzxdq {{.*#+}} xmm15 = mem[0],zero,mem[1],zero
3693 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm14 = ymm14[0,1],ymm15[2,3],ymm14[4,5,6,7]
3694 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastd 116(%r9), %ymm15
3695 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm14 = ymm14[0,1,2],ymm15[3],ymm14[4,5,6,7]
3696 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3697 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
3698 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm14, %xmm14 # 16-byte Folded Reload
3699 ; AVX2-FAST-PERLANE-NEXT: # xmm14 = xmm14[2],mem[2],xmm14[3],mem[3]
3700 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm14 = xmm14[2,3,2,3]
3701 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
3702 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm14, %ymm15, %ymm14
3703 ; AVX2-FAST-PERLANE-NEXT: vpermilps $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Folded Reload
3704 ; AVX2-FAST-PERLANE-NEXT: # xmm15 = mem[2,2,3,3]
3705 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm15 = ymm15[0,1,2,1]
3706 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm14 = ymm15[0,1],ymm14[2,3,4,5],ymm15[6,7]
3707 ; AVX2-FAST-PERLANE-NEXT: vpermilps $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Folded Reload
3708 ; AVX2-FAST-PERLANE-NEXT: # xmm15 = mem[2,2,3,3]
3709 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm15 = ymm15[0,1,2,1]
3710 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm15 = ymm14[0],ymm15[1],ymm14[2,3,4,5,6],ymm15[7]
3711 ; AVX2-FAST-PERLANE-NEXT: vpunpckldq {{.*#+}} ymm8 = ymm9[0],ymm8[0],ymm9[1],ymm8[1],ymm9[4],ymm8[4],ymm9[5],ymm8[5]
3712 ; AVX2-FAST-PERLANE-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3713 ; AVX2-FAST-PERLANE-NEXT: vpunpckldq {{.*#+}} ymm9 = ymm0[0],ymm6[0],ymm0[1],ymm6[1],ymm0[4],ymm6[4],ymm0[5],ymm6[5]
3714 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,2,2,2]
3715 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,1,2,3]
3716 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm8 = ymm8[0,1],ymm9[2,3],ymm8[4,5,6,7]
3717 ; AVX2-FAST-PERLANE-NEXT: vmovdqa (%r8), %ymm9
3718 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm9[4,5],ymm8[6,7]
3719 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastd 16(%r9), %ymm14
3720 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3,4],ymm14[5],ymm8[6,7]
3721 ; AVX2-FAST-PERLANE-NEXT: vpunpckhdq {{.*#+}} ymm1 = ymm0[2],ymm6[2],ymm0[3],ymm6[3],ymm0[6],ymm6[6],ymm0[7],ymm6[7]
3722 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[2,3,2,3,6,7,6,7]
3723 ; AVX2-FAST-PERLANE-NEXT: vperm2i128 $19, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
3724 ; AVX2-FAST-PERLANE-NEXT: # ymm1 = mem[2,3],ymm1[2,3]
3725 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm6 = ymm9[2,1,3,3,6,5,7,7]
3726 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm6 = ymm6[2,1,2,3]
3727 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm1 = ymm6[0,1],ymm1[2,3,4,5],ymm6[6,7]
3728 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm6 = mem[0,2,2,3,4,6,6,7]
3729 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm6 = ymm6[2,1,2,3]
3730 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm6 = ymm1[0],ymm6[1],ymm1[2,3,4,5,6],ymm6[7]
3731 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
3732 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
3733 ; AVX2-FAST-PERLANE-NEXT: # xmm1 = xmm1[2],mem[2],xmm1[3],mem[3]
3734 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm1 = xmm1[2,3,2,3]
3735 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
3736 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm1, %ymm9, %ymm1
3737 ; AVX2-FAST-PERLANE-NEXT: vpermilps $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Folded Reload
3738 ; AVX2-FAST-PERLANE-NEXT: # xmm9 = mem[2,2,3,3]
3739 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm9 = ymm9[0,1,2,1]
3740 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm9[0,1],ymm1[2,3,4,5],ymm9[6,7]
3741 ; AVX2-FAST-PERLANE-NEXT: vpermilps $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Folded Reload
3742 ; AVX2-FAST-PERLANE-NEXT: # xmm9 = mem[2,2,3,3]
3743 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm9 = ymm9[0,1,2,1]
3744 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm9[1],ymm1[2,3,4,5,6],ymm9[7]
3745 ; AVX2-FAST-PERLANE-NEXT: vpunpckldq {{.*#+}} ymm9 = ymm11[0],ymm10[0],ymm11[1],ymm10[1],ymm11[4],ymm10[4],ymm11[5],ymm10[5]
3746 ; AVX2-FAST-PERLANE-NEXT: vpunpckldq {{.*#+}} ymm10 = ymm3[0],ymm2[0],ymm3[1],ymm2[1],ymm3[4],ymm2[4],ymm3[5],ymm2[5]
3747 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm10 = ymm10[2,2,2,2]
3748 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,1,2,3]
3749 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm9 = ymm9[0,1],ymm10[2,3],ymm9[4,5,6,7]
3750 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 32(%r8), %ymm10
3751 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3],ymm10[4,5],ymm9[6,7]
3752 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastd 48(%r9), %ymm11
3753 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3,4],ymm11[5],ymm9[6,7]
3754 ; AVX2-FAST-PERLANE-NEXT: vpunpckhdq {{.*#+}} ymm2 = ymm3[2],ymm2[2],ymm3[3],ymm2[3],ymm3[6],ymm2[6],ymm3[7],ymm2[7]
3755 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm2 = ymm2[2,3,2,3,6,7,6,7]
3756 ; AVX2-FAST-PERLANE-NEXT: vperm2i128 $19, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
3757 ; AVX2-FAST-PERLANE-NEXT: # ymm2 = mem[2,3],ymm2[2,3]
3758 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm3 = ymm10[2,1,3,3,6,5,7,7]
3759 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,1,2,3]
3760 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3,4,5],ymm3[6,7]
3761 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm3 = mem[0,2,2,3,4,6,6,7]
3762 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,1,2,3]
3763 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0],ymm3[1],ymm2[2,3,4,5,6],ymm3[7]
3764 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
3765 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm3 # 16-byte Folded Reload
3766 ; AVX2-FAST-PERLANE-NEXT: # xmm3 = xmm3[2],mem[2],xmm3[3],mem[3]
3767 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm3 = xmm3[2,3,2,3]
3768 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
3769 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm3, %ymm10, %ymm3
3770 ; AVX2-FAST-PERLANE-NEXT: vpermilps $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Folded Reload
3771 ; AVX2-FAST-PERLANE-NEXT: # xmm10 = mem[2,2,3,3]
3772 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm10 = ymm10[0,1,2,1]
3773 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm3 = ymm10[0,1],ymm3[2,3,4,5],ymm10[6,7]
3774 ; AVX2-FAST-PERLANE-NEXT: vpermilps $250, (%rsp), %xmm10 # 16-byte Folded Reload
3775 ; AVX2-FAST-PERLANE-NEXT: # xmm10 = mem[2,2,3,3]
3776 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm10 = ymm10[0,1,2,1]
3777 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0],ymm10[1],ymm3[2,3,4,5,6],ymm10[7]
3778 ; AVX2-FAST-PERLANE-NEXT: vpunpckldq {{.*#+}} ymm10 = ymm13[0],ymm12[0],ymm13[1],ymm12[1],ymm13[4],ymm12[4],ymm13[5],ymm12[5]
3779 ; AVX2-FAST-PERLANE-NEXT: vpunpckldq {{.*#+}} ymm11 = ymm5[0],ymm4[0],ymm5[1],ymm4[1],ymm5[4],ymm4[4],ymm5[5],ymm4[5]
3780 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,2,2,2]
3781 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm10 = ymm10[2,1,2,3]
3782 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm10 = ymm10[0,1],ymm11[2,3],ymm10[4,5,6,7]
3783 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 64(%r8), %ymm11
3784 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm10 = ymm10[0,1,2,3],ymm11[4,5],ymm10[6,7]
3785 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastd 80(%r9), %ymm12
3786 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm10 = ymm10[0,1,2,3,4],ymm12[5],ymm10[6,7]
3787 ; AVX2-FAST-PERLANE-NEXT: vpunpckhdq {{.*#+}} ymm4 = ymm5[2],ymm4[2],ymm5[3],ymm4[3],ymm5[6],ymm4[6],ymm5[7],ymm4[7]
3788 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm4 = ymm4[2,3,2,3,6,7,6,7]
3789 ; AVX2-FAST-PERLANE-NEXT: vperm2i128 $19, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm4 # 32-byte Folded Reload
3790 ; AVX2-FAST-PERLANE-NEXT: # ymm4 = mem[2,3],ymm4[2,3]
3791 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm5 = ymm11[2,1,3,3,6,5,7,7]
3792 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm5 = ymm5[2,1,2,3]
3793 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm4 = ymm5[0,1],ymm4[2,3,4,5],ymm5[6,7]
3794 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm5 = mem[0,2,2,3,4,6,6,7]
3795 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm5 = ymm5[2,1,2,3]
3796 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0],ymm5[1],ymm4[2,3,4,5,6],ymm5[7]
3797 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
3798 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5, %xmm5 # 16-byte Folded Reload
3799 ; AVX2-FAST-PERLANE-NEXT: # xmm5 = xmm5[2],mem[2],xmm5[3],mem[3]
3800 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm5 = xmm5[2,3,2,3]
3801 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
3802 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm5, %ymm11, %ymm5
3803 ; AVX2-FAST-PERLANE-NEXT: vpermilps $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Folded Reload
3804 ; AVX2-FAST-PERLANE-NEXT: # xmm11 = mem[2,2,3,3]
3805 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm11 = ymm11[0,1,2,1]
3806 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm5 = ymm11[0,1],ymm5[2,3,4,5],ymm11[6,7]
3807 ; AVX2-FAST-PERLANE-NEXT: vpermilps $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Folded Reload
3808 ; AVX2-FAST-PERLANE-NEXT: # xmm11 = mem[2,2,3,3]
3809 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm11 = ymm11[0,1,2,1]
3810 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0],ymm11[1],ymm5[2,3,4,5,6],ymm11[7]
3811 ; AVX2-FAST-PERLANE-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3812 ; AVX2-FAST-PERLANE-NEXT: vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm11 # 32-byte Folded Reload
3813 ; AVX2-FAST-PERLANE-NEXT: # ymm11 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5]
3814 ; AVX2-FAST-PERLANE-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3815 ; AVX2-FAST-PERLANE-NEXT: vpunpckldq {{.*#+}} ymm12 = ymm0[0],ymm7[0],ymm0[1],ymm7[1],ymm0[4],ymm7[4],ymm0[5],ymm7[5]
3816 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm12 = ymm12[2,2,2,2]
3817 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,1,2,3]
3818 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm11 = ymm11[0,1],ymm12[2,3],ymm11[4,5,6,7]
3819 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 96(%r8), %ymm12
3820 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm11 = ymm11[0,1,2,3],ymm12[4,5],ymm11[6,7]
3821 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastd 112(%r9), %ymm13
3822 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm11 = ymm11[0,1,2,3,4],ymm13[5],ymm11[6,7]
3823 ; AVX2-FAST-PERLANE-NEXT: vpunpckhdq {{.*#+}} ymm0 = ymm0[2],ymm7[2],ymm0[3],ymm7[3],ymm0[6],ymm7[6],ymm0[7],ymm7[7]
3824 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[2,3,2,3,6,7,6,7]
3825 ; AVX2-FAST-PERLANE-NEXT: vperm2i128 $19, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
3826 ; AVX2-FAST-PERLANE-NEXT: # ymm0 = mem[2,3],ymm0[2,3]
3827 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm7 = ymm12[2,1,3,3,6,5,7,7]
3828 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,1,2,3]
3829 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm7[0,1],ymm0[2,3,4,5],ymm7[6,7]
3830 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm7 = mem[0,2,2,3,4,6,6,7]
3831 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,1,2,3]
3832 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm7[1],ymm0[2,3,4,5,6],ymm7[7]
3833 ; AVX2-FAST-PERLANE-NEXT: movq {{[0-9]+}}(%rsp), %rax
3834 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm0, 736(%rax)
3835 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm11, 672(%rax)
3836 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm5, 640(%rax)
3837 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm4, 544(%rax)
3838 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm10, 480(%rax)
3839 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm3, 448(%rax)
3840 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm2, 352(%rax)
3841 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm9, 288(%rax)
3842 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm1, 256(%rax)
3843 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm6, 160(%rax)
3844 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm8, 96(%rax)
3845 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm15, 64(%rax)
3846 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3847 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 704(%rax)
3848 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3849 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 576(%rax)
3850 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3851 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 512(%rax)
3852 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3853 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 384(%rax)
3854 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3855 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 320(%rax)
3856 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3857 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 192(%rax)
3858 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3859 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 128(%rax)
3860 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3861 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, (%rax)
3862 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3863 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 608(%rax)
3864 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3865 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 416(%rax)
3866 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3867 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 224(%rax)
3868 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3869 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 32(%rax)
3870 ; AVX2-FAST-PERLANE-NEXT: addq $904, %rsp # imm = 0x388
3871 ; AVX2-FAST-PERLANE-NEXT: vzeroupper
3872 ; AVX2-FAST-PERLANE-NEXT: retq
3874 ; AVX512F-SLOW-LABEL: store_i32_stride6_vf32:
3875 ; AVX512F-SLOW: # %bb.0:
3876 ; AVX512F-SLOW-NEXT: movq {{[0-9]+}}(%rsp), %rax
3877 ; AVX512F-SLOW-NEXT: vmovdqa64 64(%rdi), %zmm5
3878 ; AVX512F-SLOW-NEXT: vmovdqa64 (%rdi), %zmm17
3879 ; AVX512F-SLOW-NEXT: vmovdqa64 (%rsi), %zmm18
3880 ; AVX512F-SLOW-NEXT: vmovdqa64 64(%rsi), %zmm7
3881 ; AVX512F-SLOW-NEXT: vmovdqa64 (%rdx), %zmm1
3882 ; AVX512F-SLOW-NEXT: vmovdqa64 64(%rdx), %zmm0
3883 ; AVX512F-SLOW-NEXT: vmovdqa64 (%rcx), %zmm6
3884 ; AVX512F-SLOW-NEXT: vmovdqa64 64(%rcx), %zmm10
3885 ; AVX512F-SLOW-NEXT: vmovdqa64 (%r8), %zmm3
3886 ; AVX512F-SLOW-NEXT: vmovdqa64 64(%r8), %zmm8
3887 ; AVX512F-SLOW-NEXT: vmovdqa64 (%r9), %zmm4
3888 ; AVX512F-SLOW-NEXT: vmovdqa64 64(%r9), %zmm9
3889 ; AVX512F-SLOW-NEXT: vbroadcasti64x4 {{.*#+}} zmm2 = [4,20,3,19,0,0,5,21,4,20,3,19,0,0,5,21]
3890 ; AVX512F-SLOW-NEXT: # zmm2 = mem[0,1,2,3,0,1,2,3]
3891 ; AVX512F-SLOW-NEXT: vbroadcasti64x4 {{.*#+}} zmm12 = [12,28,11,27,0,0,13,29,12,28,11,27,0,0,13,29]
3892 ; AVX512F-SLOW-NEXT: # zmm12 = mem[0,1,2,3,0,1,2,3]
3893 ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm5, %zmm11
3894 ; AVX512F-SLOW-NEXT: vpermt2d %zmm7, %zmm12, %zmm11
3895 ; AVX512F-SLOW-NEXT: vbroadcasti64x4 {{.*#+}} zmm14 = [8,24,0,0,10,26,9,25,8,24,0,0,10,26,9,25]
3896 ; AVX512F-SLOW-NEXT: # zmm14 = mem[0,1,2,3,0,1,2,3]
3897 ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm5, %zmm13
3898 ; AVX512F-SLOW-NEXT: vpermt2d %zmm7, %zmm14, %zmm13
3899 ; AVX512F-SLOW-NEXT: vbroadcasti64x4 {{.*#+}} zmm16 = [0,16,0,0,2,18,1,17,0,16,0,0,2,18,1,17]
3900 ; AVX512F-SLOW-NEXT: # zmm16 = mem[0,1,2,3,0,1,2,3]
3901 ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm5, %zmm15
3902 ; AVX512F-SLOW-NEXT: vpermt2d %zmm7, %zmm16, %zmm15
3903 ; AVX512F-SLOW-NEXT: vpermi2d %zmm18, %zmm17, %zmm12
3904 ; AVX512F-SLOW-NEXT: vpermi2d %zmm18, %zmm17, %zmm14
3905 ; AVX512F-SLOW-NEXT: vpermi2d %zmm18, %zmm17, %zmm16
3906 ; AVX512F-SLOW-NEXT: vpunpckhdq {{.*#+}} zmm19 = zmm17[2],zmm18[2],zmm17[3],zmm18[3],zmm17[6],zmm18[6],zmm17[7],zmm18[7],zmm17[10],zmm18[10],zmm17[11],zmm18[11],zmm17[14],zmm18[14],zmm17[15],zmm18[15]
3907 ; AVX512F-SLOW-NEXT: vpermt2d %zmm18, %zmm2, %zmm17
3908 ; AVX512F-SLOW-NEXT: vmovdqa64 (%rdx), %ymm18
3909 ; AVX512F-SLOW-NEXT: vmovdqa64 64(%rdx), %ymm20
3910 ; AVX512F-SLOW-NEXT: vmovdqa64 {{.*#+}} ymm21 = [3,11,0,8,7,15,4,12]
3911 ; AVX512F-SLOW-NEXT: vpermt2d (%rcx), %ymm21, %ymm18
3912 ; AVX512F-SLOW-NEXT: movb $36, %dl
3913 ; AVX512F-SLOW-NEXT: kmovw %edx, %k1
3914 ; AVX512F-SLOW-NEXT: vshufi64x2 {{.*#+}} zmm17 {%k1} = zmm18[0,1,0,1,2,3,6,7]
3915 ; AVX512F-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm18 = <18,u,2,3,4,5,19,u,8,9,10,11,20,u,14,15>
3916 ; AVX512F-SLOW-NEXT: vpermt2d %zmm3, %zmm18, %zmm17
3917 ; AVX512F-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm22 = [0,18,2,3,4,5,6,19,8,9,10,11,12,20,14,15]
3918 ; AVX512F-SLOW-NEXT: vpermt2d %zmm4, %zmm22, %zmm17
3919 ; AVX512F-SLOW-NEXT: vpermi2d %zmm7, %zmm5, %zmm2
3920 ; AVX512F-SLOW-NEXT: vpermt2d 64(%rcx), %ymm21, %ymm20
3921 ; AVX512F-SLOW-NEXT: vshufi64x2 {{.*#+}} zmm2 {%k1} = zmm20[0,1,0,1,2,3,6,7]
3922 ; AVX512F-SLOW-NEXT: vpermt2d %zmm8, %zmm18, %zmm2
3923 ; AVX512F-SLOW-NEXT: vpermt2d %zmm9, %zmm22, %zmm2
3924 ; AVX512F-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm18 = [3,19,0,16,11,27,8,24,15,31,12,28,15,31,12,28]
3925 ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm0, %zmm20
3926 ; AVX512F-SLOW-NEXT: vpermt2d %zmm10, %zmm18, %zmm20
3927 ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm20, %zmm11 {%k1}
3928 ; AVX512F-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm20 = <26,u,2,3,4,5,27,u,8,9,10,11,28,u,14,15>
3929 ; AVX512F-SLOW-NEXT: vpermt2d %zmm8, %zmm20, %zmm11
3930 ; AVX512F-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm21 = [0,26,2,3,4,5,6,27,8,9,10,11,12,28,14,15]
3931 ; AVX512F-SLOW-NEXT: vpermt2d %zmm9, %zmm21, %zmm11
3932 ; AVX512F-SLOW-NEXT: vbroadcasti64x4 {{.*#+}} zmm22 = [9,25,8,24,0,0,10,26,9,25,8,24,0,0,10,26]
3933 ; AVX512F-SLOW-NEXT: # zmm22 = mem[0,1,2,3,0,1,2,3]
3934 ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm0, %zmm23
3935 ; AVX512F-SLOW-NEXT: vpermt2d %zmm10, %zmm22, %zmm23
3936 ; AVX512F-SLOW-NEXT: movb $-110, %cl
3937 ; AVX512F-SLOW-NEXT: kmovw %ecx, %k2
3938 ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm23, %zmm13 {%k2}
3939 ; AVX512F-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm23 = <0,1,2,3,24,u,6,7,8,9,25,u,12,13,14,15>
3940 ; AVX512F-SLOW-NEXT: vpermt2d %zmm8, %zmm23, %zmm13
3941 ; AVX512F-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm24 = [0,1,2,3,4,24,6,7,8,9,10,25,12,13,14,15]
3942 ; AVX512F-SLOW-NEXT: vpermt2d %zmm9, %zmm24, %zmm13
3943 ; AVX512F-SLOW-NEXT: vbroadcasti64x4 {{.*#+}} zmm25 = [1,17,0,16,0,0,2,18,1,17,0,16,0,0,2,18]
3944 ; AVX512F-SLOW-NEXT: # zmm25 = mem[0,1,2,3,0,1,2,3]
3945 ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm0, %zmm26
3946 ; AVX512F-SLOW-NEXT: vpermt2d %zmm10, %zmm25, %zmm26
3947 ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm26, %zmm15 {%k2}
3948 ; AVX512F-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm26 = <0,1,2,3,16,u,6,7,8,9,17,u,12,13,14,15>
3949 ; AVX512F-SLOW-NEXT: vpermt2d %zmm8, %zmm26, %zmm15
3950 ; AVX512F-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm27 = [0,1,2,3,4,16,6,7,8,9,10,17,12,13,14,15]
3951 ; AVX512F-SLOW-NEXT: vpermt2d %zmm9, %zmm27, %zmm15
3952 ; AVX512F-SLOW-NEXT: vpermi2d %zmm6, %zmm1, %zmm18
3953 ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm18, %zmm12 {%k1}
3954 ; AVX512F-SLOW-NEXT: vpermt2d %zmm3, %zmm20, %zmm12
3955 ; AVX512F-SLOW-NEXT: vpermt2d %zmm4, %zmm21, %zmm12
3956 ; AVX512F-SLOW-NEXT: vpermi2d %zmm6, %zmm1, %zmm22
3957 ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm22, %zmm14 {%k2}
3958 ; AVX512F-SLOW-NEXT: vpermt2d %zmm3, %zmm23, %zmm14
3959 ; AVX512F-SLOW-NEXT: vpermt2d %zmm4, %zmm24, %zmm14
3960 ; AVX512F-SLOW-NEXT: vpermi2d %zmm6, %zmm1, %zmm25
3961 ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm25, %zmm16 {%k2}
3962 ; AVX512F-SLOW-NEXT: vpermt2d %zmm3, %zmm26, %zmm16
3963 ; AVX512F-SLOW-NEXT: vpermt2d %zmm4, %zmm27, %zmm16
3964 ; AVX512F-SLOW-NEXT: vbroadcasti64x4 {{.*#+}} zmm18 = [5,21,0,0,7,23,6,22,5,21,0,0,7,23,6,22]
3965 ; AVX512F-SLOW-NEXT: # zmm18 = mem[0,1,2,3,0,1,2,3]
3966 ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm0, %zmm20
3967 ; AVX512F-SLOW-NEXT: vpermt2d %zmm10, %zmm18, %zmm20
3968 ; AVX512F-SLOW-NEXT: vmovdqa64 (%rdi), %ymm21
3969 ; AVX512F-SLOW-NEXT: vmovdqa64 64(%rdi), %ymm22
3970 ; AVX512F-SLOW-NEXT: vpunpckhdq {{.*#+}} ymm22 = ymm22[2],mem[2],ymm22[3],mem[3],ymm22[6],mem[6],ymm22[7],mem[7]
3971 ; AVX512F-SLOW-NEXT: vshufi64x2 {{.*#+}} zmm20 {%k1} = zmm22[2,3,2,3,2,3,2,3]
3972 ; AVX512F-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm22 = <0,1,21,u,4,5,6,7,22,u,10,11,12,13,23,u>
3973 ; AVX512F-SLOW-NEXT: vpermt2d %zmm8, %zmm22, %zmm20
3974 ; AVX512F-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm23 = [0,1,2,21,4,5,6,7,8,22,10,11,12,13,14,23]
3975 ; AVX512F-SLOW-NEXT: vpermt2d %zmm9, %zmm23, %zmm20
3976 ; AVX512F-SLOW-NEXT: vpermi2d %zmm6, %zmm1, %zmm18
3977 ; AVX512F-SLOW-NEXT: vpunpckhdq {{.*#+}} ymm21 = ymm21[2],mem[2],ymm21[3],mem[3],ymm21[6],mem[6],ymm21[7],mem[7]
3978 ; AVX512F-SLOW-NEXT: vshufi64x2 {{.*#+}} zmm18 {%k1} = zmm21[2,3,2,3,2,3,2,3]
3979 ; AVX512F-SLOW-NEXT: vpermt2d %zmm3, %zmm22, %zmm18
3980 ; AVX512F-SLOW-NEXT: vpermt2d %zmm4, %zmm23, %zmm18
3981 ; AVX512F-SLOW-NEXT: vbroadcasti64x4 {{.*#+}} zmm21 = [13,29,0,0,15,31,14,30,13,29,0,0,15,31,14,30]
3982 ; AVX512F-SLOW-NEXT: # zmm21 = mem[0,1,2,3,0,1,2,3]
3983 ; AVX512F-SLOW-NEXT: vpermt2d %zmm10, %zmm21, %zmm0
3984 ; AVX512F-SLOW-NEXT: vpunpckhdq {{.*#+}} zmm5 = zmm5[2],zmm7[2],zmm5[3],zmm7[3],zmm5[6],zmm7[6],zmm5[7],zmm7[7],zmm5[10],zmm7[10],zmm5[11],zmm7[11],zmm5[14],zmm7[14],zmm5[15],zmm7[15]
3985 ; AVX512F-SLOW-NEXT: vshufi64x2 {{.*#+}} zmm0 {%k1} = zmm5[6,7,6,7,6,7,6,7]
3986 ; AVX512F-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm5 = <0,1,29,u,4,5,6,7,30,u,10,11,12,13,31,u>
3987 ; AVX512F-SLOW-NEXT: vpermt2d %zmm8, %zmm5, %zmm0
3988 ; AVX512F-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm7 = [0,1,2,29,4,5,6,7,8,30,10,11,12,13,14,31]
3989 ; AVX512F-SLOW-NEXT: vpermt2d %zmm9, %zmm7, %zmm0
3990 ; AVX512F-SLOW-NEXT: vpermt2d %zmm6, %zmm21, %zmm1
3991 ; AVX512F-SLOW-NEXT: vshufi64x2 {{.*#+}} zmm1 {%k1} = zmm19[6,7,6,7,6,7,6,7]
3992 ; AVX512F-SLOW-NEXT: vpermt2d %zmm3, %zmm5, %zmm1
3993 ; AVX512F-SLOW-NEXT: vpermt2d %zmm4, %zmm7, %zmm1
3994 ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm16, (%rax)
3995 ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm14, 192(%rax)
3996 ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm1, 320(%rax)
3997 ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm12, 256(%rax)
3998 ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm15, 384(%rax)
3999 ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm13, 576(%rax)
4000 ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm0, 704(%rax)
4001 ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm11, 640(%rax)
4002 ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm18, 128(%rax)
4003 ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm2, 448(%rax)
4004 ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm20, 512(%rax)
4005 ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm17, 64(%rax)
4006 ; AVX512F-SLOW-NEXT: vzeroupper
4007 ; AVX512F-SLOW-NEXT: retq
4009 ; AVX512F-FAST-LABEL: store_i32_stride6_vf32:
4010 ; AVX512F-FAST: # %bb.0:
4011 ; AVX512F-FAST-NEXT: vmovdqa64 (%rdi), %zmm1
4012 ; AVX512F-FAST-NEXT: vmovdqa64 64(%rdi), %zmm0
4013 ; AVX512F-FAST-NEXT: vmovdqa64 (%rsi), %zmm11
4014 ; AVX512F-FAST-NEXT: vmovdqa64 64(%rsi), %zmm13
4015 ; AVX512F-FAST-NEXT: vmovdqa64 (%rdx), %zmm4
4016 ; AVX512F-FAST-NEXT: vmovdqa64 64(%rdx), %zmm7
4017 ; AVX512F-FAST-NEXT: vmovdqa64 (%rcx), %zmm18
4018 ; AVX512F-FAST-NEXT: vmovdqa64 64(%rcx), %zmm24
4019 ; AVX512F-FAST-NEXT: vbroadcasti64x4 {{.*#+}} zmm26 = [1,17,0,16,0,0,2,18,1,17,0,16,0,0,2,18]
4020 ; AVX512F-FAST-NEXT: # zmm26 = mem[0,1,2,3,0,1,2,3]
4021 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm4, %zmm14
4022 ; AVX512F-FAST-NEXT: vpermt2d %zmm18, %zmm26, %zmm14
4023 ; AVX512F-FAST-NEXT: vbroadcasti64x4 {{.*#+}} zmm19 = [0,16,0,0,2,18,1,17,0,16,0,0,2,18,1,17]
4024 ; AVX512F-FAST-NEXT: # zmm19 = mem[0,1,2,3,0,1,2,3]
4025 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm1, %zmm2
4026 ; AVX512F-FAST-NEXT: vpermt2d %zmm11, %zmm19, %zmm2
4027 ; AVX512F-FAST-NEXT: vbroadcasti32x4 {{.*#+}} zmm12 = [14,30,15,31,14,30,15,31,14,30,15,31,14,30,15,31]
4028 ; AVX512F-FAST-NEXT: # zmm12 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
4029 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm0, %zmm20
4030 ; AVX512F-FAST-NEXT: vpermt2d %zmm13, %zmm12, %zmm20
4031 ; AVX512F-FAST-NEXT: vbroadcasti64x4 {{.*#+}} zmm5 = [13,29,0,0,15,31,14,30,13,29,0,0,15,31,14,30]
4032 ; AVX512F-FAST-NEXT: # zmm5 = mem[0,1,2,3,0,1,2,3]
4033 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm7, %zmm3
4034 ; AVX512F-FAST-NEXT: vpermt2d %zmm24, %zmm5, %zmm3
4035 ; AVX512F-FAST-NEXT: vmovdqa64 {{.*#+}} zmm17 = [3,19,0,16,11,27,8,24,15,31,12,28,15,31,12,28]
4036 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm7, %zmm22
4037 ; AVX512F-FAST-NEXT: vpermt2d %zmm24, %zmm17, %zmm22
4038 ; AVX512F-FAST-NEXT: vbroadcasti64x4 {{.*#+}} zmm8 = [12,28,11,27,0,0,13,29,12,28,11,27,0,0,13,29]
4039 ; AVX512F-FAST-NEXT: # zmm8 = mem[0,1,2,3,0,1,2,3]
4040 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm0, %zmm6
4041 ; AVX512F-FAST-NEXT: vpermt2d %zmm13, %zmm8, %zmm6
4042 ; AVX512F-FAST-NEXT: vbroadcasti64x4 {{.*#+}} zmm21 = [9,25,8,24,0,0,10,26,9,25,8,24,0,0,10,26]
4043 ; AVX512F-FAST-NEXT: # zmm21 = mem[0,1,2,3,0,1,2,3]
4044 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm7, %zmm23
4045 ; AVX512F-FAST-NEXT: vpermt2d %zmm24, %zmm21, %zmm23
4046 ; AVX512F-FAST-NEXT: vbroadcasti64x4 {{.*#+}} zmm10 = [8,24,0,0,10,26,9,25,8,24,0,0,10,26,9,25]
4047 ; AVX512F-FAST-NEXT: # zmm10 = mem[0,1,2,3,0,1,2,3]
4048 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm0, %zmm9
4049 ; AVX512F-FAST-NEXT: vpermt2d %zmm13, %zmm10, %zmm9
4050 ; AVX512F-FAST-NEXT: vbroadcasti64x4 {{.*#+}} zmm16 = [5,21,0,0,7,23,6,22,5,21,0,0,7,23,6,22]
4051 ; AVX512F-FAST-NEXT: # zmm16 = mem[0,1,2,3,0,1,2,3]
4052 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm7, %zmm15
4053 ; AVX512F-FAST-NEXT: vpermt2d %zmm24, %zmm16, %zmm15
4054 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm7, %zmm25
4055 ; AVX512F-FAST-NEXT: vpermt2d %zmm24, %zmm26, %zmm7
4056 ; AVX512F-FAST-NEXT: vmovdqa64 {{.*#+}} zmm26 = <3,19,0,16,3,19,0,16,7,23,4,20,u,u,u,u>
4057 ; AVX512F-FAST-NEXT: vpermt2d %zmm24, %zmm26, %zmm25
4058 ; AVX512F-FAST-NEXT: vbroadcasti32x4 {{.*#+}} zmm24 = [6,22,7,23,6,22,7,23,6,22,7,23,6,22,7,23]
4059 ; AVX512F-FAST-NEXT: # zmm24 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
4060 ; AVX512F-FAST-NEXT: vpermi2d %zmm18, %zmm4, %zmm5
4061 ; AVX512F-FAST-NEXT: vpermi2d %zmm18, %zmm4, %zmm17
4062 ; AVX512F-FAST-NEXT: vpermi2d %zmm18, %zmm4, %zmm21
4063 ; AVX512F-FAST-NEXT: vpermi2d %zmm18, %zmm4, %zmm16
4064 ; AVX512F-FAST-NEXT: vpermt2d %zmm18, %zmm26, %zmm4
4065 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm0, %zmm26
4066 ; AVX512F-FAST-NEXT: vpermt2d %zmm13, %zmm24, %zmm26
4067 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm0, %zmm18
4068 ; AVX512F-FAST-NEXT: vpermt2d %zmm13, %zmm19, %zmm0
4069 ; AVX512F-FAST-NEXT: vbroadcasti64x4 {{.*#+}} zmm19 = [4,20,3,19,0,0,5,21,4,20,3,19,0,0,5,21]
4070 ; AVX512F-FAST-NEXT: # zmm19 = mem[0,1,2,3,0,1,2,3]
4071 ; AVX512F-FAST-NEXT: vpermt2d %zmm13, %zmm19, %zmm18
4072 ; AVX512F-FAST-NEXT: vpermi2d %zmm11, %zmm1, %zmm12
4073 ; AVX512F-FAST-NEXT: vpermi2d %zmm11, %zmm1, %zmm8
4074 ; AVX512F-FAST-NEXT: vpermi2d %zmm11, %zmm1, %zmm10
4075 ; AVX512F-FAST-NEXT: vpermi2d %zmm11, %zmm1, %zmm24
4076 ; AVX512F-FAST-NEXT: vpermt2d %zmm11, %zmm19, %zmm1
4077 ; AVX512F-FAST-NEXT: movb $-110, %al
4078 ; AVX512F-FAST-NEXT: kmovw %eax, %k2
4079 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm14, %zmm2 {%k2}
4080 ; AVX512F-FAST-NEXT: vmovdqa64 (%r8), %zmm11
4081 ; AVX512F-FAST-NEXT: movb $36, %al
4082 ; AVX512F-FAST-NEXT: kmovw %eax, %k1
4083 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm20, %zmm3 {%k1}
4084 ; AVX512F-FAST-NEXT: vmovdqa64 64(%r8), %zmm13
4085 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm22, %zmm6 {%k1}
4086 ; AVX512F-FAST-NEXT: vmovdqa64 {{.*#+}} zmm14 = <0,1,2,3,16,u,6,7,8,9,17,u,12,13,14,15>
4087 ; AVX512F-FAST-NEXT: vpermt2d %zmm11, %zmm14, %zmm2
4088 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm23, %zmm9 {%k2}
4089 ; AVX512F-FAST-NEXT: vmovdqa64 {{.*#+}} zmm19 = <0,1,29,u,4,5,6,7,30,u,10,11,12,13,31,u>
4090 ; AVX512F-FAST-NEXT: vpermt2d %zmm13, %zmm19, %zmm3
4091 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm26, %zmm15 {%k1}
4092 ; AVX512F-FAST-NEXT: vmovdqa64 {{.*#+}} zmm20 = <26,u,2,3,4,5,27,u,8,9,10,11,28,u,14,15>
4093 ; AVX512F-FAST-NEXT: vpermt2d %zmm13, %zmm20, %zmm6
4094 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm25, %zmm18 {%k1}
4095 ; AVX512F-FAST-NEXT: vmovdqa64 {{.*#+}} zmm22 = <0,1,2,3,24,u,6,7,8,9,25,u,12,13,14,15>
4096 ; AVX512F-FAST-NEXT: vpermt2d %zmm13, %zmm22, %zmm9
4097 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm7, %zmm0 {%k2}
4098 ; AVX512F-FAST-NEXT: vmovdqa64 {{.*#+}} zmm7 = <0,1,21,u,4,5,6,7,22,u,10,11,12,13,23,u>
4099 ; AVX512F-FAST-NEXT: vpermt2d %zmm13, %zmm7, %zmm15
4100 ; AVX512F-FAST-NEXT: vpermt2d %zmm13, %zmm14, %zmm0
4101 ; AVX512F-FAST-NEXT: vmovdqa64 {{.*#+}} zmm14 = <18,u,2,3,4,5,19,u,8,9,10,11,20,u,14,15>
4102 ; AVX512F-FAST-NEXT: vpermt2d %zmm13, %zmm14, %zmm18
4103 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm12, %zmm5 {%k1}
4104 ; AVX512F-FAST-NEXT: vpermt2d %zmm11, %zmm19, %zmm5
4105 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm17, %zmm8 {%k1}
4106 ; AVX512F-FAST-NEXT: vpermt2d %zmm11, %zmm20, %zmm8
4107 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm21, %zmm10 {%k2}
4108 ; AVX512F-FAST-NEXT: vmovdqa64 (%r9), %zmm12
4109 ; AVX512F-FAST-NEXT: vpermt2d %zmm11, %zmm22, %zmm10
4110 ; AVX512F-FAST-NEXT: vmovdqa64 64(%r9), %zmm13
4111 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm24, %zmm16 {%k1}
4112 ; AVX512F-FAST-NEXT: vmovdqa64 {{.*#+}} zmm17 = [0,1,2,3,4,16,6,7,8,9,10,17,12,13,14,15]
4113 ; AVX512F-FAST-NEXT: vpermt2d %zmm12, %zmm17, %zmm2
4114 ; AVX512F-FAST-NEXT: vpermt2d %zmm11, %zmm7, %zmm16
4115 ; AVX512F-FAST-NEXT: vmovdqa64 {{.*#+}} zmm7 = [0,1,2,29,4,5,6,7,8,30,10,11,12,13,14,31]
4116 ; AVX512F-FAST-NEXT: vpermt2d %zmm13, %zmm7, %zmm3
4117 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm4, %zmm1 {%k1}
4118 ; AVX512F-FAST-NEXT: vmovdqa64 {{.*#+}} zmm4 = [0,26,2,3,4,5,6,27,8,9,10,11,12,28,14,15]
4119 ; AVX512F-FAST-NEXT: vpermt2d %zmm13, %zmm4, %zmm6
4120 ; AVX512F-FAST-NEXT: vpermt2d %zmm11, %zmm14, %zmm1
4121 ; AVX512F-FAST-NEXT: vmovdqa64 {{.*#+}} zmm11 = [0,1,2,3,4,24,6,7,8,9,10,25,12,13,14,15]
4122 ; AVX512F-FAST-NEXT: vpermt2d %zmm13, %zmm11, %zmm9
4123 ; AVX512F-FAST-NEXT: vmovdqa64 {{.*#+}} zmm14 = [0,1,2,21,4,5,6,7,8,22,10,11,12,13,14,23]
4124 ; AVX512F-FAST-NEXT: vpermt2d %zmm13, %zmm14, %zmm15
4125 ; AVX512F-FAST-NEXT: vpermt2d %zmm13, %zmm17, %zmm0
4126 ; AVX512F-FAST-NEXT: vmovdqa64 {{.*#+}} zmm17 = [0,18,2,3,4,5,6,19,8,9,10,11,12,20,14,15]
4127 ; AVX512F-FAST-NEXT: vpermt2d %zmm13, %zmm17, %zmm18
4128 ; AVX512F-FAST-NEXT: vpermt2d %zmm12, %zmm7, %zmm5
4129 ; AVX512F-FAST-NEXT: vpermt2d %zmm12, %zmm4, %zmm8
4130 ; AVX512F-FAST-NEXT: vpermt2d %zmm12, %zmm11, %zmm10
4131 ; AVX512F-FAST-NEXT: vpermt2d %zmm12, %zmm14, %zmm16
4132 ; AVX512F-FAST-NEXT: vpermt2d %zmm12, %zmm17, %zmm1
4133 ; AVX512F-FAST-NEXT: movq {{[0-9]+}}(%rsp), %rax
4134 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm1, 64(%rax)
4135 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm16, 128(%rax)
4136 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm10, 192(%rax)
4137 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm8, 256(%rax)
4138 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm5, 320(%rax)
4139 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm0, 384(%rax)
4140 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm18, 448(%rax)
4141 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm15, 512(%rax)
4142 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm9, 576(%rax)
4143 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm6, 640(%rax)
4144 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm3, 704(%rax)
4145 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm2, (%rax)
4146 ; AVX512F-FAST-NEXT: vzeroupper
4147 ; AVX512F-FAST-NEXT: retq
4149 ; AVX512BW-SLOW-LABEL: store_i32_stride6_vf32:
4150 ; AVX512BW-SLOW: # %bb.0:
4151 ; AVX512BW-SLOW-NEXT: movq {{[0-9]+}}(%rsp), %rax
4152 ; AVX512BW-SLOW-NEXT: vmovdqa64 64(%rdi), %zmm5
4153 ; AVX512BW-SLOW-NEXT: vmovdqa64 (%rdi), %zmm17
4154 ; AVX512BW-SLOW-NEXT: vmovdqa64 (%rsi), %zmm18
4155 ; AVX512BW-SLOW-NEXT: vmovdqa64 64(%rsi), %zmm7
4156 ; AVX512BW-SLOW-NEXT: vmovdqa64 (%rdx), %zmm1
4157 ; AVX512BW-SLOW-NEXT: vmovdqa64 64(%rdx), %zmm0
4158 ; AVX512BW-SLOW-NEXT: vmovdqa64 (%rcx), %zmm6
4159 ; AVX512BW-SLOW-NEXT: vmovdqa64 64(%rcx), %zmm10
4160 ; AVX512BW-SLOW-NEXT: vmovdqa64 (%r8), %zmm3
4161 ; AVX512BW-SLOW-NEXT: vmovdqa64 64(%r8), %zmm8
4162 ; AVX512BW-SLOW-NEXT: vmovdqa64 (%r9), %zmm4
4163 ; AVX512BW-SLOW-NEXT: vmovdqa64 64(%r9), %zmm9
4164 ; AVX512BW-SLOW-NEXT: vbroadcasti64x4 {{.*#+}} zmm2 = [4,20,3,19,0,0,5,21,4,20,3,19,0,0,5,21]
4165 ; AVX512BW-SLOW-NEXT: # zmm2 = mem[0,1,2,3,0,1,2,3]
4166 ; AVX512BW-SLOW-NEXT: vbroadcasti64x4 {{.*#+}} zmm12 = [12,28,11,27,0,0,13,29,12,28,11,27,0,0,13,29]
4167 ; AVX512BW-SLOW-NEXT: # zmm12 = mem[0,1,2,3,0,1,2,3]
4168 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm5, %zmm11
4169 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm7, %zmm12, %zmm11
4170 ; AVX512BW-SLOW-NEXT: vbroadcasti64x4 {{.*#+}} zmm14 = [8,24,0,0,10,26,9,25,8,24,0,0,10,26,9,25]
4171 ; AVX512BW-SLOW-NEXT: # zmm14 = mem[0,1,2,3,0,1,2,3]
4172 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm5, %zmm13
4173 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm7, %zmm14, %zmm13
4174 ; AVX512BW-SLOW-NEXT: vbroadcasti64x4 {{.*#+}} zmm16 = [0,16,0,0,2,18,1,17,0,16,0,0,2,18,1,17]
4175 ; AVX512BW-SLOW-NEXT: # zmm16 = mem[0,1,2,3,0,1,2,3]
4176 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm5, %zmm15
4177 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm7, %zmm16, %zmm15
4178 ; AVX512BW-SLOW-NEXT: vpermi2d %zmm18, %zmm17, %zmm12
4179 ; AVX512BW-SLOW-NEXT: vpermi2d %zmm18, %zmm17, %zmm14
4180 ; AVX512BW-SLOW-NEXT: vpermi2d %zmm18, %zmm17, %zmm16
4181 ; AVX512BW-SLOW-NEXT: vpunpckhdq {{.*#+}} zmm19 = zmm17[2],zmm18[2],zmm17[3],zmm18[3],zmm17[6],zmm18[6],zmm17[7],zmm18[7],zmm17[10],zmm18[10],zmm17[11],zmm18[11],zmm17[14],zmm18[14],zmm17[15],zmm18[15]
4182 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm18, %zmm2, %zmm17
4183 ; AVX512BW-SLOW-NEXT: vmovdqa64 (%rdx), %ymm18
4184 ; AVX512BW-SLOW-NEXT: vmovdqa64 64(%rdx), %ymm20
4185 ; AVX512BW-SLOW-NEXT: vmovdqa64 {{.*#+}} ymm21 = [3,11,0,8,7,15,4,12]
4186 ; AVX512BW-SLOW-NEXT: vpermt2d (%rcx), %ymm21, %ymm18
4187 ; AVX512BW-SLOW-NEXT: movb $36, %dl
4188 ; AVX512BW-SLOW-NEXT: kmovd %edx, %k1
4189 ; AVX512BW-SLOW-NEXT: vshufi64x2 {{.*#+}} zmm17 {%k1} = zmm18[0,1,0,1,2,3,6,7]
4190 ; AVX512BW-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm18 = <18,u,2,3,4,5,19,u,8,9,10,11,20,u,14,15>
4191 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm3, %zmm18, %zmm17
4192 ; AVX512BW-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm22 = [0,18,2,3,4,5,6,19,8,9,10,11,12,20,14,15]
4193 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm4, %zmm22, %zmm17
4194 ; AVX512BW-SLOW-NEXT: vpermi2d %zmm7, %zmm5, %zmm2
4195 ; AVX512BW-SLOW-NEXT: vpermt2d 64(%rcx), %ymm21, %ymm20
4196 ; AVX512BW-SLOW-NEXT: vshufi64x2 {{.*#+}} zmm2 {%k1} = zmm20[0,1,0,1,2,3,6,7]
4197 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm8, %zmm18, %zmm2
4198 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm9, %zmm22, %zmm2
4199 ; AVX512BW-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm18 = [3,19,0,16,11,27,8,24,15,31,12,28,15,31,12,28]
4200 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm0, %zmm20
4201 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm10, %zmm18, %zmm20
4202 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm20, %zmm11 {%k1}
4203 ; AVX512BW-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm20 = <26,u,2,3,4,5,27,u,8,9,10,11,28,u,14,15>
4204 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm8, %zmm20, %zmm11
4205 ; AVX512BW-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm21 = [0,26,2,3,4,5,6,27,8,9,10,11,12,28,14,15]
4206 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm9, %zmm21, %zmm11
4207 ; AVX512BW-SLOW-NEXT: vbroadcasti64x4 {{.*#+}} zmm22 = [9,25,8,24,0,0,10,26,9,25,8,24,0,0,10,26]
4208 ; AVX512BW-SLOW-NEXT: # zmm22 = mem[0,1,2,3,0,1,2,3]
4209 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm0, %zmm23
4210 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm10, %zmm22, %zmm23
4211 ; AVX512BW-SLOW-NEXT: movb $-110, %cl
4212 ; AVX512BW-SLOW-NEXT: kmovd %ecx, %k2
4213 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm23, %zmm13 {%k2}
4214 ; AVX512BW-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm23 = <0,1,2,3,24,u,6,7,8,9,25,u,12,13,14,15>
4215 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm8, %zmm23, %zmm13
4216 ; AVX512BW-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm24 = [0,1,2,3,4,24,6,7,8,9,10,25,12,13,14,15]
4217 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm9, %zmm24, %zmm13
4218 ; AVX512BW-SLOW-NEXT: vbroadcasti64x4 {{.*#+}} zmm25 = [1,17,0,16,0,0,2,18,1,17,0,16,0,0,2,18]
4219 ; AVX512BW-SLOW-NEXT: # zmm25 = mem[0,1,2,3,0,1,2,3]
4220 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm0, %zmm26
4221 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm10, %zmm25, %zmm26
4222 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm26, %zmm15 {%k2}
4223 ; AVX512BW-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm26 = <0,1,2,3,16,u,6,7,8,9,17,u,12,13,14,15>
4224 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm8, %zmm26, %zmm15
4225 ; AVX512BW-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm27 = [0,1,2,3,4,16,6,7,8,9,10,17,12,13,14,15]
4226 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm9, %zmm27, %zmm15
4227 ; AVX512BW-SLOW-NEXT: vpermi2d %zmm6, %zmm1, %zmm18
4228 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm18, %zmm12 {%k1}
4229 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm3, %zmm20, %zmm12
4230 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm4, %zmm21, %zmm12
4231 ; AVX512BW-SLOW-NEXT: vpermi2d %zmm6, %zmm1, %zmm22
4232 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm22, %zmm14 {%k2}
4233 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm3, %zmm23, %zmm14
4234 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm4, %zmm24, %zmm14
4235 ; AVX512BW-SLOW-NEXT: vpermi2d %zmm6, %zmm1, %zmm25
4236 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm25, %zmm16 {%k2}
4237 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm3, %zmm26, %zmm16
4238 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm4, %zmm27, %zmm16
4239 ; AVX512BW-SLOW-NEXT: vbroadcasti64x4 {{.*#+}} zmm18 = [5,21,0,0,7,23,6,22,5,21,0,0,7,23,6,22]
4240 ; AVX512BW-SLOW-NEXT: # zmm18 = mem[0,1,2,3,0,1,2,3]
4241 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm0, %zmm20
4242 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm10, %zmm18, %zmm20
4243 ; AVX512BW-SLOW-NEXT: vmovdqa64 (%rdi), %ymm21
4244 ; AVX512BW-SLOW-NEXT: vmovdqa64 64(%rdi), %ymm22
4245 ; AVX512BW-SLOW-NEXT: vpunpckhdq {{.*#+}} ymm22 = ymm22[2],mem[2],ymm22[3],mem[3],ymm22[6],mem[6],ymm22[7],mem[7]
4246 ; AVX512BW-SLOW-NEXT: vshufi64x2 {{.*#+}} zmm20 {%k1} = zmm22[2,3,2,3,2,3,2,3]
4247 ; AVX512BW-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm22 = <0,1,21,u,4,5,6,7,22,u,10,11,12,13,23,u>
4248 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm8, %zmm22, %zmm20
4249 ; AVX512BW-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm23 = [0,1,2,21,4,5,6,7,8,22,10,11,12,13,14,23]
4250 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm9, %zmm23, %zmm20
4251 ; AVX512BW-SLOW-NEXT: vpermi2d %zmm6, %zmm1, %zmm18
4252 ; AVX512BW-SLOW-NEXT: vpunpckhdq {{.*#+}} ymm21 = ymm21[2],mem[2],ymm21[3],mem[3],ymm21[6],mem[6],ymm21[7],mem[7]
4253 ; AVX512BW-SLOW-NEXT: vshufi64x2 {{.*#+}} zmm18 {%k1} = zmm21[2,3,2,3,2,3,2,3]
4254 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm3, %zmm22, %zmm18
4255 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm4, %zmm23, %zmm18
4256 ; AVX512BW-SLOW-NEXT: vbroadcasti64x4 {{.*#+}} zmm21 = [13,29,0,0,15,31,14,30,13,29,0,0,15,31,14,30]
4257 ; AVX512BW-SLOW-NEXT: # zmm21 = mem[0,1,2,3,0,1,2,3]
4258 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm10, %zmm21, %zmm0
4259 ; AVX512BW-SLOW-NEXT: vpunpckhdq {{.*#+}} zmm5 = zmm5[2],zmm7[2],zmm5[3],zmm7[3],zmm5[6],zmm7[6],zmm5[7],zmm7[7],zmm5[10],zmm7[10],zmm5[11],zmm7[11],zmm5[14],zmm7[14],zmm5[15],zmm7[15]
4260 ; AVX512BW-SLOW-NEXT: vshufi64x2 {{.*#+}} zmm0 {%k1} = zmm5[6,7,6,7,6,7,6,7]
4261 ; AVX512BW-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm5 = <0,1,29,u,4,5,6,7,30,u,10,11,12,13,31,u>
4262 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm8, %zmm5, %zmm0
4263 ; AVX512BW-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm7 = [0,1,2,29,4,5,6,7,8,30,10,11,12,13,14,31]
4264 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm9, %zmm7, %zmm0
4265 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm6, %zmm21, %zmm1
4266 ; AVX512BW-SLOW-NEXT: vshufi64x2 {{.*#+}} zmm1 {%k1} = zmm19[6,7,6,7,6,7,6,7]
4267 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm3, %zmm5, %zmm1
4268 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm4, %zmm7, %zmm1
4269 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm16, (%rax)
4270 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm14, 192(%rax)
4271 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm1, 320(%rax)
4272 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm12, 256(%rax)
4273 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm15, 384(%rax)
4274 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm13, 576(%rax)
4275 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm0, 704(%rax)
4276 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm11, 640(%rax)
4277 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm18, 128(%rax)
4278 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm2, 448(%rax)
4279 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm20, 512(%rax)
4280 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm17, 64(%rax)
4281 ; AVX512BW-SLOW-NEXT: vzeroupper
4282 ; AVX512BW-SLOW-NEXT: retq
4284 ; AVX512BW-FAST-LABEL: store_i32_stride6_vf32:
4285 ; AVX512BW-FAST: # %bb.0:
4286 ; AVX512BW-FAST-NEXT: vmovdqa64 (%rdi), %zmm1
4287 ; AVX512BW-FAST-NEXT: vmovdqa64 64(%rdi), %zmm0
4288 ; AVX512BW-FAST-NEXT: vmovdqa64 (%rsi), %zmm11
4289 ; AVX512BW-FAST-NEXT: vmovdqa64 64(%rsi), %zmm13
4290 ; AVX512BW-FAST-NEXT: vmovdqa64 (%rdx), %zmm4
4291 ; AVX512BW-FAST-NEXT: vmovdqa64 64(%rdx), %zmm7
4292 ; AVX512BW-FAST-NEXT: vmovdqa64 (%rcx), %zmm18
4293 ; AVX512BW-FAST-NEXT: vmovdqa64 64(%rcx), %zmm24
4294 ; AVX512BW-FAST-NEXT: vbroadcasti64x4 {{.*#+}} zmm26 = [1,17,0,16,0,0,2,18,1,17,0,16,0,0,2,18]
4295 ; AVX512BW-FAST-NEXT: # zmm26 = mem[0,1,2,3,0,1,2,3]
4296 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm4, %zmm14
4297 ; AVX512BW-FAST-NEXT: vpermt2d %zmm18, %zmm26, %zmm14
4298 ; AVX512BW-FAST-NEXT: vbroadcasti64x4 {{.*#+}} zmm19 = [0,16,0,0,2,18,1,17,0,16,0,0,2,18,1,17]
4299 ; AVX512BW-FAST-NEXT: # zmm19 = mem[0,1,2,3,0,1,2,3]
4300 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm1, %zmm2
4301 ; AVX512BW-FAST-NEXT: vpermt2d %zmm11, %zmm19, %zmm2
4302 ; AVX512BW-FAST-NEXT: vbroadcasti32x4 {{.*#+}} zmm12 = [14,30,15,31,14,30,15,31,14,30,15,31,14,30,15,31]
4303 ; AVX512BW-FAST-NEXT: # zmm12 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
4304 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm0, %zmm20
4305 ; AVX512BW-FAST-NEXT: vpermt2d %zmm13, %zmm12, %zmm20
4306 ; AVX512BW-FAST-NEXT: vbroadcasti64x4 {{.*#+}} zmm5 = [13,29,0,0,15,31,14,30,13,29,0,0,15,31,14,30]
4307 ; AVX512BW-FAST-NEXT: # zmm5 = mem[0,1,2,3,0,1,2,3]
4308 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm7, %zmm3
4309 ; AVX512BW-FAST-NEXT: vpermt2d %zmm24, %zmm5, %zmm3
4310 ; AVX512BW-FAST-NEXT: vmovdqa64 {{.*#+}} zmm17 = [3,19,0,16,11,27,8,24,15,31,12,28,15,31,12,28]
4311 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm7, %zmm22
4312 ; AVX512BW-FAST-NEXT: vpermt2d %zmm24, %zmm17, %zmm22
4313 ; AVX512BW-FAST-NEXT: vbroadcasti64x4 {{.*#+}} zmm8 = [12,28,11,27,0,0,13,29,12,28,11,27,0,0,13,29]
4314 ; AVX512BW-FAST-NEXT: # zmm8 = mem[0,1,2,3,0,1,2,3]
4315 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm0, %zmm6
4316 ; AVX512BW-FAST-NEXT: vpermt2d %zmm13, %zmm8, %zmm6
4317 ; AVX512BW-FAST-NEXT: vbroadcasti64x4 {{.*#+}} zmm21 = [9,25,8,24,0,0,10,26,9,25,8,24,0,0,10,26]
4318 ; AVX512BW-FAST-NEXT: # zmm21 = mem[0,1,2,3,0,1,2,3]
4319 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm7, %zmm23
4320 ; AVX512BW-FAST-NEXT: vpermt2d %zmm24, %zmm21, %zmm23
4321 ; AVX512BW-FAST-NEXT: vbroadcasti64x4 {{.*#+}} zmm10 = [8,24,0,0,10,26,9,25,8,24,0,0,10,26,9,25]
4322 ; AVX512BW-FAST-NEXT: # zmm10 = mem[0,1,2,3,0,1,2,3]
4323 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm0, %zmm9
4324 ; AVX512BW-FAST-NEXT: vpermt2d %zmm13, %zmm10, %zmm9
4325 ; AVX512BW-FAST-NEXT: vbroadcasti64x4 {{.*#+}} zmm16 = [5,21,0,0,7,23,6,22,5,21,0,0,7,23,6,22]
4326 ; AVX512BW-FAST-NEXT: # zmm16 = mem[0,1,2,3,0,1,2,3]
4327 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm7, %zmm15
4328 ; AVX512BW-FAST-NEXT: vpermt2d %zmm24, %zmm16, %zmm15
4329 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm7, %zmm25
4330 ; AVX512BW-FAST-NEXT: vpermt2d %zmm24, %zmm26, %zmm7
4331 ; AVX512BW-FAST-NEXT: vmovdqa64 {{.*#+}} zmm26 = <3,19,0,16,3,19,0,16,7,23,4,20,u,u,u,u>
4332 ; AVX512BW-FAST-NEXT: vpermt2d %zmm24, %zmm26, %zmm25
4333 ; AVX512BW-FAST-NEXT: vbroadcasti32x4 {{.*#+}} zmm24 = [6,22,7,23,6,22,7,23,6,22,7,23,6,22,7,23]
4334 ; AVX512BW-FAST-NEXT: # zmm24 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
4335 ; AVX512BW-FAST-NEXT: vpermi2d %zmm18, %zmm4, %zmm5
4336 ; AVX512BW-FAST-NEXT: vpermi2d %zmm18, %zmm4, %zmm17
4337 ; AVX512BW-FAST-NEXT: vpermi2d %zmm18, %zmm4, %zmm21
4338 ; AVX512BW-FAST-NEXT: vpermi2d %zmm18, %zmm4, %zmm16
4339 ; AVX512BW-FAST-NEXT: vpermt2d %zmm18, %zmm26, %zmm4
4340 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm0, %zmm26
4341 ; AVX512BW-FAST-NEXT: vpermt2d %zmm13, %zmm24, %zmm26
4342 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm0, %zmm18
4343 ; AVX512BW-FAST-NEXT: vpermt2d %zmm13, %zmm19, %zmm0
4344 ; AVX512BW-FAST-NEXT: vbroadcasti64x4 {{.*#+}} zmm19 = [4,20,3,19,0,0,5,21,4,20,3,19,0,0,5,21]
4345 ; AVX512BW-FAST-NEXT: # zmm19 = mem[0,1,2,3,0,1,2,3]
4346 ; AVX512BW-FAST-NEXT: vpermt2d %zmm13, %zmm19, %zmm18
4347 ; AVX512BW-FAST-NEXT: vpermi2d %zmm11, %zmm1, %zmm12
4348 ; AVX512BW-FAST-NEXT: vpermi2d %zmm11, %zmm1, %zmm8
4349 ; AVX512BW-FAST-NEXT: vpermi2d %zmm11, %zmm1, %zmm10
4350 ; AVX512BW-FAST-NEXT: vpermi2d %zmm11, %zmm1, %zmm24
4351 ; AVX512BW-FAST-NEXT: vpermt2d %zmm11, %zmm19, %zmm1
4352 ; AVX512BW-FAST-NEXT: movb $-110, %al
4353 ; AVX512BW-FAST-NEXT: kmovd %eax, %k2
4354 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm14, %zmm2 {%k2}
4355 ; AVX512BW-FAST-NEXT: vmovdqa64 (%r8), %zmm11
4356 ; AVX512BW-FAST-NEXT: movb $36, %al
4357 ; AVX512BW-FAST-NEXT: kmovd %eax, %k1
4358 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm20, %zmm3 {%k1}
4359 ; AVX512BW-FAST-NEXT: vmovdqa64 64(%r8), %zmm13
4360 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm22, %zmm6 {%k1}
4361 ; AVX512BW-FAST-NEXT: vmovdqa64 {{.*#+}} zmm14 = <0,1,2,3,16,u,6,7,8,9,17,u,12,13,14,15>
4362 ; AVX512BW-FAST-NEXT: vpermt2d %zmm11, %zmm14, %zmm2
4363 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm23, %zmm9 {%k2}
4364 ; AVX512BW-FAST-NEXT: vmovdqa64 {{.*#+}} zmm19 = <0,1,29,u,4,5,6,7,30,u,10,11,12,13,31,u>
4365 ; AVX512BW-FAST-NEXT: vpermt2d %zmm13, %zmm19, %zmm3
4366 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm26, %zmm15 {%k1}
4367 ; AVX512BW-FAST-NEXT: vmovdqa64 {{.*#+}} zmm20 = <26,u,2,3,4,5,27,u,8,9,10,11,28,u,14,15>
4368 ; AVX512BW-FAST-NEXT: vpermt2d %zmm13, %zmm20, %zmm6
4369 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm25, %zmm18 {%k1}
4370 ; AVX512BW-FAST-NEXT: vmovdqa64 {{.*#+}} zmm22 = <0,1,2,3,24,u,6,7,8,9,25,u,12,13,14,15>
4371 ; AVX512BW-FAST-NEXT: vpermt2d %zmm13, %zmm22, %zmm9
4372 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm7, %zmm0 {%k2}
4373 ; AVX512BW-FAST-NEXT: vmovdqa64 {{.*#+}} zmm7 = <0,1,21,u,4,5,6,7,22,u,10,11,12,13,23,u>
4374 ; AVX512BW-FAST-NEXT: vpermt2d %zmm13, %zmm7, %zmm15
4375 ; AVX512BW-FAST-NEXT: vpermt2d %zmm13, %zmm14, %zmm0
4376 ; AVX512BW-FAST-NEXT: vmovdqa64 {{.*#+}} zmm14 = <18,u,2,3,4,5,19,u,8,9,10,11,20,u,14,15>
4377 ; AVX512BW-FAST-NEXT: vpermt2d %zmm13, %zmm14, %zmm18
4378 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm12, %zmm5 {%k1}
4379 ; AVX512BW-FAST-NEXT: vpermt2d %zmm11, %zmm19, %zmm5
4380 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm17, %zmm8 {%k1}
4381 ; AVX512BW-FAST-NEXT: vpermt2d %zmm11, %zmm20, %zmm8
4382 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm21, %zmm10 {%k2}
4383 ; AVX512BW-FAST-NEXT: vmovdqa64 (%r9), %zmm12
4384 ; AVX512BW-FAST-NEXT: vpermt2d %zmm11, %zmm22, %zmm10
4385 ; AVX512BW-FAST-NEXT: vmovdqa64 64(%r9), %zmm13
4386 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm24, %zmm16 {%k1}
4387 ; AVX512BW-FAST-NEXT: vmovdqa64 {{.*#+}} zmm17 = [0,1,2,3,4,16,6,7,8,9,10,17,12,13,14,15]
4388 ; AVX512BW-FAST-NEXT: vpermt2d %zmm12, %zmm17, %zmm2
4389 ; AVX512BW-FAST-NEXT: vpermt2d %zmm11, %zmm7, %zmm16
4390 ; AVX512BW-FAST-NEXT: vmovdqa64 {{.*#+}} zmm7 = [0,1,2,29,4,5,6,7,8,30,10,11,12,13,14,31]
4391 ; AVX512BW-FAST-NEXT: vpermt2d %zmm13, %zmm7, %zmm3
4392 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm4, %zmm1 {%k1}
4393 ; AVX512BW-FAST-NEXT: vmovdqa64 {{.*#+}} zmm4 = [0,26,2,3,4,5,6,27,8,9,10,11,12,28,14,15]
4394 ; AVX512BW-FAST-NEXT: vpermt2d %zmm13, %zmm4, %zmm6
4395 ; AVX512BW-FAST-NEXT: vpermt2d %zmm11, %zmm14, %zmm1
4396 ; AVX512BW-FAST-NEXT: vmovdqa64 {{.*#+}} zmm11 = [0,1,2,3,4,24,6,7,8,9,10,25,12,13,14,15]
4397 ; AVX512BW-FAST-NEXT: vpermt2d %zmm13, %zmm11, %zmm9
4398 ; AVX512BW-FAST-NEXT: vmovdqa64 {{.*#+}} zmm14 = [0,1,2,21,4,5,6,7,8,22,10,11,12,13,14,23]
4399 ; AVX512BW-FAST-NEXT: vpermt2d %zmm13, %zmm14, %zmm15
4400 ; AVX512BW-FAST-NEXT: vpermt2d %zmm13, %zmm17, %zmm0
4401 ; AVX512BW-FAST-NEXT: vmovdqa64 {{.*#+}} zmm17 = [0,18,2,3,4,5,6,19,8,9,10,11,12,20,14,15]
4402 ; AVX512BW-FAST-NEXT: vpermt2d %zmm13, %zmm17, %zmm18
4403 ; AVX512BW-FAST-NEXT: vpermt2d %zmm12, %zmm7, %zmm5
4404 ; AVX512BW-FAST-NEXT: vpermt2d %zmm12, %zmm4, %zmm8
4405 ; AVX512BW-FAST-NEXT: vpermt2d %zmm12, %zmm11, %zmm10
4406 ; AVX512BW-FAST-NEXT: vpermt2d %zmm12, %zmm14, %zmm16
4407 ; AVX512BW-FAST-NEXT: vpermt2d %zmm12, %zmm17, %zmm1
4408 ; AVX512BW-FAST-NEXT: movq {{[0-9]+}}(%rsp), %rax
4409 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm1, 64(%rax)
4410 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm16, 128(%rax)
4411 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm10, 192(%rax)
4412 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm8, 256(%rax)
4413 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm5, 320(%rax)
4414 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm0, 384(%rax)
4415 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm18, 448(%rax)
4416 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm15, 512(%rax)
4417 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm9, 576(%rax)
4418 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm6, 640(%rax)
4419 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm3, 704(%rax)
4420 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm2, (%rax)
4421 ; AVX512BW-FAST-NEXT: vzeroupper
4422 ; AVX512BW-FAST-NEXT: retq
4423 %in.vec0 = load <32 x i32>, ptr %in.vecptr0, align 64
4424 %in.vec1 = load <32 x i32>, ptr %in.vecptr1, align 64
4425 %in.vec2 = load <32 x i32>, ptr %in.vecptr2, align 64
4426 %in.vec3 = load <32 x i32>, ptr %in.vecptr3, align 64
4427 %in.vec4 = load <32 x i32>, ptr %in.vecptr4, align 64
4428 %in.vec5 = load <32 x i32>, ptr %in.vecptr5, align 64
4429 %1 = shufflevector <32 x i32> %in.vec0, <32 x i32> %in.vec1, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
4430 %2 = shufflevector <32 x i32> %in.vec2, <32 x i32> %in.vec3, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
4431 %3 = shufflevector <32 x i32> %in.vec4, <32 x i32> %in.vec5, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
4432 %4 = shufflevector <64 x i32> %1, <64 x i32> %2, <128 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95, i32 96, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 112, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127>
4433 %5 = shufflevector <64 x i32> %3, <64 x i32> poison, <128 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
4434 %6 = shufflevector <128 x i32> %4, <128 x i32> %5, <192 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95, i32 96, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 112, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127, i32 128, i32 129, i32 130, i32 131, i32 132, i32 133, i32 134, i32 135, i32 136, i32 137, i32 138, i32 139, i32 140, i32 141, i32 142, i32 143, i32 144, i32 145, i32 146, i32 147, i32 148, i32 149, i32 150, i32 151, i32 152, i32 153, i32 154, i32 155, i32 156, i32 157, i32 158, i32 159, i32 160, i32 161, i32 162, i32 163, i32 164, i32 165, i32 166, i32 167, i32 168, i32 169, i32 170, i32 171, i32 172, i32 173, i32 174, i32 175, i32 176, i32 177, i32 178, i32 179, i32 180, i32 181, i32 182, i32 183, i32 184, i32 185, i32 186, i32 187, i32 188, i32 189, i32 190, i32 191>
4435 %interleaved.vec = shufflevector <192 x i32> %6, <192 x i32> poison, <192 x i32> <i32 0, i32 32, i32 64, i32 96, i32 128, i32 160, i32 1, i32 33, i32 65, i32 97, i32 129, i32 161, i32 2, i32 34, i32 66, i32 98, i32 130, i32 162, i32 3, i32 35, i32 67, i32 99, i32 131, i32 163, i32 4, i32 36, i32 68, i32 100, i32 132, i32 164, i32 5, i32 37, i32 69, i32 101, i32 133, i32 165, i32 6, i32 38, i32 70, i32 102, i32 134, i32 166, i32 7, i32 39, i32 71, i32 103, i32 135, i32 167, i32 8, i32 40, i32 72, i32 104, i32 136, i32 168, i32 9, i32 41, i32 73, i32 105, i32 137, i32 169, i32 10, i32 42, i32 74, i32 106, i32 138, i32 170, i32 11, i32 43, i32 75, i32 107, i32 139, i32 171, i32 12, i32 44, i32 76, i32 108, i32 140, i32 172, i32 13, i32 45, i32 77, i32 109, i32 141, i32 173, i32 14, i32 46, i32 78, i32 110, i32 142, i32 174, i32 15, i32 47, i32 79, i32 111, i32 143, i32 175, i32 16, i32 48, i32 80, i32 112, i32 144, i32 176, i32 17, i32 49, i32 81, i32 113, i32 145, i32 177, i32 18, i32 50, i32 82, i32 114, i32 146, i32 178, i32 19, i32 51, i32 83, i32 115, i32 147, i32 179, i32 20, i32 52, i32 84, i32 116, i32 148, i32 180, i32 21, i32 53, i32 85, i32 117, i32 149, i32 181, i32 22, i32 54, i32 86, i32 118, i32 150, i32 182, i32 23, i32 55, i32 87, i32 119, i32 151, i32 183, i32 24, i32 56, i32 88, i32 120, i32 152, i32 184, i32 25, i32 57, i32 89, i32 121, i32 153, i32 185, i32 26, i32 58, i32 90, i32 122, i32 154, i32 186, i32 27, i32 59, i32 91, i32 123, i32 155, i32 187, i32 28, i32 60, i32 92, i32 124, i32 156, i32 188, i32 29, i32 61, i32 93, i32 125, i32 157, i32 189, i32 30, i32 62, i32 94, i32 126, i32 158, i32 190, i32 31, i32 63, i32 95, i32 127, i32 159, i32 191>
4436 store <192 x i32> %interleaved.vec, ptr %out.vec, align 64
4440 define void @store_i32_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecptr2, ptr %in.vecptr3, ptr %in.vecptr4, ptr %in.vecptr5, ptr %out.vec) nounwind {
4441 ; SSE-LABEL: store_i32_stride6_vf64:
4443 ; SSE-NEXT: subq $1224, %rsp # imm = 0x4C8
4444 ; SSE-NEXT: movaps (%rdi), %xmm9
4445 ; SSE-NEXT: movaps 16(%rdi), %xmm10
4446 ; SSE-NEXT: movaps (%rsi), %xmm2
4447 ; SSE-NEXT: movaps 16(%rsi), %xmm0
4448 ; SSE-NEXT: movaps (%rdx), %xmm11
4449 ; SSE-NEXT: movaps 16(%rdx), %xmm12
4450 ; SSE-NEXT: movaps (%rcx), %xmm4
4451 ; SSE-NEXT: movaps 16(%rcx), %xmm1
4452 ; SSE-NEXT: movaps (%r8), %xmm6
4453 ; SSE-NEXT: movaps 16(%r8), %xmm3
4454 ; SSE-NEXT: movaps (%r9), %xmm7
4455 ; SSE-NEXT: movaps 16(%r9), %xmm5
4456 ; SSE-NEXT: movaps %xmm11, %xmm13
4457 ; SSE-NEXT: unpcklps {{.*#+}} xmm13 = xmm13[0],xmm4[0],xmm13[1],xmm4[1]
4458 ; SSE-NEXT: movaps %xmm9, %xmm8
4459 ; SSE-NEXT: unpcklps {{.*#+}} xmm8 = xmm8[0],xmm2[0],xmm8[1],xmm2[1]
4460 ; SSE-NEXT: movaps %xmm7, %xmm14
4461 ; SSE-NEXT: movlhps {{.*#+}} xmm14 = xmm14[0],xmm6[0]
4462 ; SSE-NEXT: shufps {{.*#+}} xmm14 = xmm14[2,0],xmm8[2,3]
4463 ; SSE-NEXT: movaps %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4464 ; SSE-NEXT: movlhps {{.*#+}} xmm8 = xmm8[0],xmm13[0]
4465 ; SSE-NEXT: movaps %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4466 ; SSE-NEXT: movaps %xmm6, %xmm8
4467 ; SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[1,1],xmm7[1,1]
4468 ; SSE-NEXT: shufps {{.*#+}} xmm13 = xmm13[2,3],xmm8[0,2]
4469 ; SSE-NEXT: movaps %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4470 ; SSE-NEXT: movaps %xmm4, %xmm8
4471 ; SSE-NEXT: unpckhpd {{.*#+}} xmm8 = xmm8[1],xmm11[1]
4472 ; SSE-NEXT: unpckhps {{.*#+}} xmm9 = xmm9[2],xmm2[2],xmm9[3],xmm2[3]
4473 ; SSE-NEXT: movaps %xmm6, %xmm2
4474 ; SSE-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm7[1]
4475 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm9[2,3]
4476 ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4477 ; SSE-NEXT: shufps {{.*#+}} xmm9 = xmm9[0,1],xmm8[2,0]
4478 ; SSE-NEXT: movaps %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4479 ; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[3,3],xmm7[3,3]
4480 ; SSE-NEXT: unpckhps {{.*#+}} xmm11 = xmm11[2],xmm4[2],xmm11[3],xmm4[3]
4481 ; SSE-NEXT: shufps {{.*#+}} xmm11 = xmm11[2,3],xmm6[0,2]
4482 ; SSE-NEXT: movaps %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4483 ; SSE-NEXT: movaps %xmm12, %xmm4
4484 ; SSE-NEXT: unpcklps {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
4485 ; SSE-NEXT: movaps %xmm10, %xmm2
4486 ; SSE-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
4487 ; SSE-NEXT: movaps %xmm5, %xmm6
4488 ; SSE-NEXT: movlhps {{.*#+}} xmm6 = xmm6[0],xmm3[0]
4489 ; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[2,0],xmm2[2,3]
4490 ; SSE-NEXT: movaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4491 ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm4[0]
4492 ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4493 ; SSE-NEXT: movaps %xmm3, %xmm2
4494 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1],xmm5[1,1]
4495 ; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[2,3],xmm2[0,2]
4496 ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4497 ; SSE-NEXT: movaps %xmm1, %xmm2
4498 ; SSE-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm12[1]
4499 ; SSE-NEXT: unpckhps {{.*#+}} xmm10 = xmm10[2],xmm0[2],xmm10[3],xmm0[3]
4500 ; SSE-NEXT: movaps %xmm3, %xmm0
4501 ; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm5[1]
4502 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm10[2,3]
4503 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4504 ; SSE-NEXT: shufps {{.*#+}} xmm10 = xmm10[0,1],xmm2[2,0]
4505 ; SSE-NEXT: movaps %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4506 ; SSE-NEXT: movaps 32(%rdi), %xmm6
4507 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,3],xmm5[3,3]
4508 ; SSE-NEXT: movaps 32(%rdx), %xmm5
4509 ; SSE-NEXT: unpckhps {{.*#+}} xmm12 = xmm12[2],xmm1[2],xmm12[3],xmm1[3]
4510 ; SSE-NEXT: movaps 32(%rcx), %xmm0
4511 ; SSE-NEXT: shufps {{.*#+}} xmm12 = xmm12[2,3],xmm3[0,2]
4512 ; SSE-NEXT: movaps %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4513 ; SSE-NEXT: movaps %xmm5, %xmm7
4514 ; SSE-NEXT: unpcklps {{.*#+}} xmm7 = xmm7[0],xmm0[0],xmm7[1],xmm0[1]
4515 ; SSE-NEXT: movaps 32(%rsi), %xmm1
4516 ; SSE-NEXT: movaps %xmm6, %xmm4
4517 ; SSE-NEXT: unpcklps {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
4518 ; SSE-NEXT: movaps 32(%r8), %xmm2
4519 ; SSE-NEXT: movaps 32(%r9), %xmm3
4520 ; SSE-NEXT: movaps %xmm3, %xmm8
4521 ; SSE-NEXT: movlhps {{.*#+}} xmm8 = xmm8[0],xmm2[0]
4522 ; SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[2,0],xmm4[2,3]
4523 ; SSE-NEXT: movaps %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4524 ; SSE-NEXT: movlhps {{.*#+}} xmm4 = xmm4[0],xmm7[0]
4525 ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4526 ; SSE-NEXT: movaps %xmm2, %xmm4
4527 ; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[1,1],xmm3[1,1]
4528 ; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[2,3],xmm4[0,2]
4529 ; SSE-NEXT: movaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4530 ; SSE-NEXT: unpckhps {{.*#+}} xmm6 = xmm6[2],xmm1[2],xmm6[3],xmm1[3]
4531 ; SSE-NEXT: movaps %xmm0, %xmm1
4532 ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm5[1]
4533 ; SSE-NEXT: movaps %xmm2, %xmm4
4534 ; SSE-NEXT: unpckhpd {{.*#+}} xmm4 = xmm4[1],xmm3[1]
4535 ; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,2],xmm6[2,3]
4536 ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4537 ; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,1],xmm1[2,0]
4538 ; SSE-NEXT: movaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4539 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,3],xmm3[3,3]
4540 ; SSE-NEXT: unpckhps {{.*#+}} xmm5 = xmm5[2],xmm0[2],xmm5[3],xmm0[3]
4541 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[2,3],xmm2[0,2]
4542 ; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4543 ; SSE-NEXT: movaps 48(%rdx), %xmm6
4544 ; SSE-NEXT: movaps 48(%rcx), %xmm0
4545 ; SSE-NEXT: movaps %xmm6, %xmm5
4546 ; SSE-NEXT: unpcklps {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1]
4547 ; SSE-NEXT: movaps 48(%rdi), %xmm7
4548 ; SSE-NEXT: movaps 48(%rsi), %xmm1
4549 ; SSE-NEXT: movaps %xmm7, %xmm4
4550 ; SSE-NEXT: unpcklps {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
4551 ; SSE-NEXT: movaps 48(%r8), %xmm2
4552 ; SSE-NEXT: movaps 48(%r9), %xmm3
4553 ; SSE-NEXT: movaps %xmm3, %xmm8
4554 ; SSE-NEXT: movlhps {{.*#+}} xmm8 = xmm8[0],xmm2[0]
4555 ; SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[2,0],xmm4[2,3]
4556 ; SSE-NEXT: movaps %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4557 ; SSE-NEXT: movlhps {{.*#+}} xmm4 = xmm4[0],xmm5[0]
4558 ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4559 ; SSE-NEXT: movaps %xmm2, %xmm4
4560 ; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[1,1],xmm3[1,1]
4561 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[2,3],xmm4[0,2]
4562 ; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4563 ; SSE-NEXT: unpckhps {{.*#+}} xmm7 = xmm7[2],xmm1[2],xmm7[3],xmm1[3]
4564 ; SSE-NEXT: movaps %xmm0, %xmm1
4565 ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm6[1]
4566 ; SSE-NEXT: movaps %xmm2, %xmm4
4567 ; SSE-NEXT: unpckhpd {{.*#+}} xmm4 = xmm4[1],xmm3[1]
4568 ; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,2],xmm7[2,3]
4569 ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4570 ; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,1],xmm1[2,0]
4571 ; SSE-NEXT: movaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4572 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,3],xmm3[3,3]
4573 ; SSE-NEXT: unpckhps {{.*#+}} xmm6 = xmm6[2],xmm0[2],xmm6[3],xmm0[3]
4574 ; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[2,3],xmm2[0,2]
4575 ; SSE-NEXT: movaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4576 ; SSE-NEXT: movaps 64(%rdx), %xmm6
4577 ; SSE-NEXT: movaps 64(%rcx), %xmm0
4578 ; SSE-NEXT: movaps %xmm6, %xmm5
4579 ; SSE-NEXT: unpcklps {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1]
4580 ; SSE-NEXT: movaps 64(%rdi), %xmm7
4581 ; SSE-NEXT: movaps 64(%rsi), %xmm1
4582 ; SSE-NEXT: movaps %xmm7, %xmm4
4583 ; SSE-NEXT: unpcklps {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
4584 ; SSE-NEXT: movaps 64(%r8), %xmm2
4585 ; SSE-NEXT: movaps 64(%r9), %xmm3
4586 ; SSE-NEXT: movaps %xmm3, %xmm8
4587 ; SSE-NEXT: movlhps {{.*#+}} xmm8 = xmm8[0],xmm2[0]
4588 ; SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[2,0],xmm4[2,3]
4589 ; SSE-NEXT: movaps %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4590 ; SSE-NEXT: movlhps {{.*#+}} xmm4 = xmm4[0],xmm5[0]
4591 ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4592 ; SSE-NEXT: movaps %xmm2, %xmm4
4593 ; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[1,1],xmm3[1,1]
4594 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[2,3],xmm4[0,2]
4595 ; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4596 ; SSE-NEXT: unpckhps {{.*#+}} xmm7 = xmm7[2],xmm1[2],xmm7[3],xmm1[3]
4597 ; SSE-NEXT: movaps %xmm0, %xmm1
4598 ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm6[1]
4599 ; SSE-NEXT: movaps %xmm2, %xmm4
4600 ; SSE-NEXT: unpckhpd {{.*#+}} xmm4 = xmm4[1],xmm3[1]
4601 ; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,2],xmm7[2,3]
4602 ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4603 ; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,1],xmm1[2,0]
4604 ; SSE-NEXT: movaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4605 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,3],xmm3[3,3]
4606 ; SSE-NEXT: unpckhps {{.*#+}} xmm6 = xmm6[2],xmm0[2],xmm6[3],xmm0[3]
4607 ; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[2,3],xmm2[0,2]
4608 ; SSE-NEXT: movaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4609 ; SSE-NEXT: movaps 80(%rdx), %xmm6
4610 ; SSE-NEXT: movaps 80(%rcx), %xmm0
4611 ; SSE-NEXT: movaps %xmm6, %xmm5
4612 ; SSE-NEXT: unpcklps {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1]
4613 ; SSE-NEXT: movaps 80(%rdi), %xmm7
4614 ; SSE-NEXT: movaps 80(%rsi), %xmm1
4615 ; SSE-NEXT: movaps %xmm7, %xmm4
4616 ; SSE-NEXT: unpcklps {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
4617 ; SSE-NEXT: movaps 80(%r8), %xmm2
4618 ; SSE-NEXT: movaps 80(%r9), %xmm3
4619 ; SSE-NEXT: movaps %xmm3, %xmm8
4620 ; SSE-NEXT: movlhps {{.*#+}} xmm8 = xmm8[0],xmm2[0]
4621 ; SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[2,0],xmm4[2,3]
4622 ; SSE-NEXT: movaps %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4623 ; SSE-NEXT: movlhps {{.*#+}} xmm4 = xmm4[0],xmm5[0]
4624 ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4625 ; SSE-NEXT: movaps %xmm2, %xmm4
4626 ; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[1,1],xmm3[1,1]
4627 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[2,3],xmm4[0,2]
4628 ; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4629 ; SSE-NEXT: unpckhps {{.*#+}} xmm7 = xmm7[2],xmm1[2],xmm7[3],xmm1[3]
4630 ; SSE-NEXT: movaps %xmm0, %xmm1
4631 ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm6[1]
4632 ; SSE-NEXT: movaps %xmm2, %xmm4
4633 ; SSE-NEXT: unpckhpd {{.*#+}} xmm4 = xmm4[1],xmm3[1]
4634 ; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,2],xmm7[2,3]
4635 ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4636 ; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,1],xmm1[2,0]
4637 ; SSE-NEXT: movaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4638 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,3],xmm3[3,3]
4639 ; SSE-NEXT: unpckhps {{.*#+}} xmm6 = xmm6[2],xmm0[2],xmm6[3],xmm0[3]
4640 ; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[2,3],xmm2[0,2]
4641 ; SSE-NEXT: movaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4642 ; SSE-NEXT: movaps 96(%rdx), %xmm6
4643 ; SSE-NEXT: movaps 96(%rcx), %xmm0
4644 ; SSE-NEXT: movaps %xmm6, %xmm5
4645 ; SSE-NEXT: unpcklps {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1]
4646 ; SSE-NEXT: movaps 96(%rdi), %xmm7
4647 ; SSE-NEXT: movaps 96(%rsi), %xmm1
4648 ; SSE-NEXT: movaps %xmm7, %xmm4
4649 ; SSE-NEXT: unpcklps {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
4650 ; SSE-NEXT: movaps 96(%r8), %xmm2
4651 ; SSE-NEXT: movaps 96(%r9), %xmm3
4652 ; SSE-NEXT: movaps %xmm3, %xmm8
4653 ; SSE-NEXT: movlhps {{.*#+}} xmm8 = xmm8[0],xmm2[0]
4654 ; SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[2,0],xmm4[2,3]
4655 ; SSE-NEXT: movaps %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4656 ; SSE-NEXT: movlhps {{.*#+}} xmm4 = xmm4[0],xmm5[0]
4657 ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4658 ; SSE-NEXT: movaps %xmm2, %xmm4
4659 ; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[1,1],xmm3[1,1]
4660 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[2,3],xmm4[0,2]
4661 ; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4662 ; SSE-NEXT: unpckhps {{.*#+}} xmm7 = xmm7[2],xmm1[2],xmm7[3],xmm1[3]
4663 ; SSE-NEXT: movaps %xmm0, %xmm1
4664 ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm6[1]
4665 ; SSE-NEXT: movaps %xmm2, %xmm4
4666 ; SSE-NEXT: unpckhpd {{.*#+}} xmm4 = xmm4[1],xmm3[1]
4667 ; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,2],xmm7[2,3]
4668 ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4669 ; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,1],xmm1[2,0]
4670 ; SSE-NEXT: movaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4671 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,3],xmm3[3,3]
4672 ; SSE-NEXT: unpckhps {{.*#+}} xmm6 = xmm6[2],xmm0[2],xmm6[3],xmm0[3]
4673 ; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[2,3],xmm2[0,2]
4674 ; SSE-NEXT: movaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4675 ; SSE-NEXT: movaps 112(%rdx), %xmm6
4676 ; SSE-NEXT: movaps 112(%rcx), %xmm0
4677 ; SSE-NEXT: movaps %xmm6, %xmm5
4678 ; SSE-NEXT: unpcklps {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1]
4679 ; SSE-NEXT: movaps 112(%rdi), %xmm7
4680 ; SSE-NEXT: movaps 112(%rsi), %xmm1
4681 ; SSE-NEXT: movaps %xmm7, %xmm4
4682 ; SSE-NEXT: unpcklps {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
4683 ; SSE-NEXT: movaps 112(%r8), %xmm2
4684 ; SSE-NEXT: movaps 112(%r9), %xmm3
4685 ; SSE-NEXT: movaps %xmm3, %xmm8
4686 ; SSE-NEXT: movlhps {{.*#+}} xmm8 = xmm8[0],xmm2[0]
4687 ; SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[2,0],xmm4[2,3]
4688 ; SSE-NEXT: movaps %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4689 ; SSE-NEXT: movlhps {{.*#+}} xmm4 = xmm4[0],xmm5[0]
4690 ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4691 ; SSE-NEXT: movaps %xmm2, %xmm4
4692 ; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[1,1],xmm3[1,1]
4693 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[2,3],xmm4[0,2]
4694 ; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4695 ; SSE-NEXT: unpckhps {{.*#+}} xmm7 = xmm7[2],xmm1[2],xmm7[3],xmm1[3]
4696 ; SSE-NEXT: movaps %xmm0, %xmm1
4697 ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm6[1]
4698 ; SSE-NEXT: movaps %xmm2, %xmm4
4699 ; SSE-NEXT: unpckhpd {{.*#+}} xmm4 = xmm4[1],xmm3[1]
4700 ; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,2],xmm7[2,3]
4701 ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4702 ; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,1],xmm1[2,0]
4703 ; SSE-NEXT: movaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4704 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,3],xmm3[3,3]
4705 ; SSE-NEXT: unpckhps {{.*#+}} xmm6 = xmm6[2],xmm0[2],xmm6[3],xmm0[3]
4706 ; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[2,3],xmm2[0,2]
4707 ; SSE-NEXT: movaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4708 ; SSE-NEXT: movaps 128(%rdx), %xmm6
4709 ; SSE-NEXT: movaps 128(%rcx), %xmm0
4710 ; SSE-NEXT: movaps %xmm6, %xmm5
4711 ; SSE-NEXT: unpcklps {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1]
4712 ; SSE-NEXT: movaps 128(%rdi), %xmm7
4713 ; SSE-NEXT: movaps 128(%rsi), %xmm1
4714 ; SSE-NEXT: movaps %xmm7, %xmm4
4715 ; SSE-NEXT: unpcklps {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
4716 ; SSE-NEXT: movaps 128(%r8), %xmm2
4717 ; SSE-NEXT: movaps 128(%r9), %xmm3
4718 ; SSE-NEXT: movaps %xmm3, %xmm8
4719 ; SSE-NEXT: movlhps {{.*#+}} xmm8 = xmm8[0],xmm2[0]
4720 ; SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[2,0],xmm4[2,3]
4721 ; SSE-NEXT: movaps %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4722 ; SSE-NEXT: movlhps {{.*#+}} xmm4 = xmm4[0],xmm5[0]
4723 ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4724 ; SSE-NEXT: movaps %xmm2, %xmm4
4725 ; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[1,1],xmm3[1,1]
4726 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[2,3],xmm4[0,2]
4727 ; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4728 ; SSE-NEXT: unpckhps {{.*#+}} xmm7 = xmm7[2],xmm1[2],xmm7[3],xmm1[3]
4729 ; SSE-NEXT: movaps %xmm0, %xmm1
4730 ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm6[1]
4731 ; SSE-NEXT: movaps %xmm2, %xmm4
4732 ; SSE-NEXT: unpckhpd {{.*#+}} xmm4 = xmm4[1],xmm3[1]
4733 ; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,2],xmm7[2,3]
4734 ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4735 ; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,1],xmm1[2,0]
4736 ; SSE-NEXT: movaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4737 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,3],xmm3[3,3]
4738 ; SSE-NEXT: unpckhps {{.*#+}} xmm6 = xmm6[2],xmm0[2],xmm6[3],xmm0[3]
4739 ; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[2,3],xmm2[0,2]
4740 ; SSE-NEXT: movaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4741 ; SSE-NEXT: movaps 144(%rdx), %xmm6
4742 ; SSE-NEXT: movaps 144(%rcx), %xmm0
4743 ; SSE-NEXT: movaps %xmm6, %xmm5
4744 ; SSE-NEXT: unpcklps {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1]
4745 ; SSE-NEXT: movaps 144(%rdi), %xmm7
4746 ; SSE-NEXT: movaps 144(%rsi), %xmm1
4747 ; SSE-NEXT: movaps %xmm7, %xmm4
4748 ; SSE-NEXT: unpcklps {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
4749 ; SSE-NEXT: movaps 144(%r8), %xmm2
4750 ; SSE-NEXT: movaps 144(%r9), %xmm3
4751 ; SSE-NEXT: movaps %xmm3, %xmm8
4752 ; SSE-NEXT: movlhps {{.*#+}} xmm8 = xmm8[0],xmm2[0]
4753 ; SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[2,0],xmm4[2,3]
4754 ; SSE-NEXT: movaps %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4755 ; SSE-NEXT: movlhps {{.*#+}} xmm4 = xmm4[0],xmm5[0]
4756 ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4757 ; SSE-NEXT: movaps %xmm2, %xmm4
4758 ; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[1,1],xmm3[1,1]
4759 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[2,3],xmm4[0,2]
4760 ; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4761 ; SSE-NEXT: unpckhps {{.*#+}} xmm7 = xmm7[2],xmm1[2],xmm7[3],xmm1[3]
4762 ; SSE-NEXT: movaps %xmm0, %xmm1
4763 ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm6[1]
4764 ; SSE-NEXT: movaps %xmm2, %xmm4
4765 ; SSE-NEXT: unpckhpd {{.*#+}} xmm4 = xmm4[1],xmm3[1]
4766 ; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,2],xmm7[2,3]
4767 ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4768 ; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,1],xmm1[2,0]
4769 ; SSE-NEXT: movaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4770 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,3],xmm3[3,3]
4771 ; SSE-NEXT: unpckhps {{.*#+}} xmm6 = xmm6[2],xmm0[2],xmm6[3],xmm0[3]
4772 ; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[2,3],xmm2[0,2]
4773 ; SSE-NEXT: movaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4774 ; SSE-NEXT: movaps 160(%rdx), %xmm6
4775 ; SSE-NEXT: movaps 160(%rcx), %xmm0
4776 ; SSE-NEXT: movaps %xmm6, %xmm5
4777 ; SSE-NEXT: unpcklps {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1]
4778 ; SSE-NEXT: movaps 160(%rdi), %xmm7
4779 ; SSE-NEXT: movaps 160(%rsi), %xmm1
4780 ; SSE-NEXT: movaps %xmm7, %xmm4
4781 ; SSE-NEXT: unpcklps {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
4782 ; SSE-NEXT: movaps 160(%r8), %xmm2
4783 ; SSE-NEXT: movaps 160(%r9), %xmm3
4784 ; SSE-NEXT: movaps %xmm3, %xmm8
4785 ; SSE-NEXT: movlhps {{.*#+}} xmm8 = xmm8[0],xmm2[0]
4786 ; SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[2,0],xmm4[2,3]
4787 ; SSE-NEXT: movaps %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4788 ; SSE-NEXT: movlhps {{.*#+}} xmm4 = xmm4[0],xmm5[0]
4789 ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4790 ; SSE-NEXT: movaps %xmm2, %xmm4
4791 ; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[1,1],xmm3[1,1]
4792 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[2,3],xmm4[0,2]
4793 ; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4794 ; SSE-NEXT: unpckhps {{.*#+}} xmm7 = xmm7[2],xmm1[2],xmm7[3],xmm1[3]
4795 ; SSE-NEXT: movaps %xmm0, %xmm1
4796 ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm6[1]
4797 ; SSE-NEXT: movaps %xmm2, %xmm4
4798 ; SSE-NEXT: unpckhpd {{.*#+}} xmm4 = xmm4[1],xmm3[1]
4799 ; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,2],xmm7[2,3]
4800 ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4801 ; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,1],xmm1[2,0]
4802 ; SSE-NEXT: movaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4803 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,3],xmm3[3,3]
4804 ; SSE-NEXT: unpckhps {{.*#+}} xmm6 = xmm6[2],xmm0[2],xmm6[3],xmm0[3]
4805 ; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[2,3],xmm2[0,2]
4806 ; SSE-NEXT: movaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4807 ; SSE-NEXT: movaps 176(%rdx), %xmm6
4808 ; SSE-NEXT: movaps 176(%rcx), %xmm0
4809 ; SSE-NEXT: movaps %xmm6, %xmm5
4810 ; SSE-NEXT: unpcklps {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1]
4811 ; SSE-NEXT: movaps 176(%rdi), %xmm7
4812 ; SSE-NEXT: movaps 176(%rsi), %xmm1
4813 ; SSE-NEXT: movaps %xmm7, %xmm4
4814 ; SSE-NEXT: unpcklps {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
4815 ; SSE-NEXT: movaps 176(%r8), %xmm2
4816 ; SSE-NEXT: movaps 176(%r9), %xmm3
4817 ; SSE-NEXT: movaps %xmm3, %xmm8
4818 ; SSE-NEXT: movlhps {{.*#+}} xmm8 = xmm8[0],xmm2[0]
4819 ; SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[2,0],xmm4[2,3]
4820 ; SSE-NEXT: movaps %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4821 ; SSE-NEXT: movlhps {{.*#+}} xmm4 = xmm4[0],xmm5[0]
4822 ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4823 ; SSE-NEXT: movaps %xmm2, %xmm4
4824 ; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[1,1],xmm3[1,1]
4825 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[2,3],xmm4[0,2]
4826 ; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4827 ; SSE-NEXT: unpckhps {{.*#+}} xmm7 = xmm7[2],xmm1[2],xmm7[3],xmm1[3]
4828 ; SSE-NEXT: movaps %xmm0, %xmm1
4829 ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm6[1]
4830 ; SSE-NEXT: movaps %xmm2, %xmm4
4831 ; SSE-NEXT: unpckhpd {{.*#+}} xmm4 = xmm4[1],xmm3[1]
4832 ; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,2],xmm7[2,3]
4833 ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4834 ; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,1],xmm1[2,0]
4835 ; SSE-NEXT: movaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4836 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,3],xmm3[3,3]
4837 ; SSE-NEXT: unpckhps {{.*#+}} xmm6 = xmm6[2],xmm0[2],xmm6[3],xmm0[3]
4838 ; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[2,3],xmm2[0,2]
4839 ; SSE-NEXT: movaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4840 ; SSE-NEXT: movaps 192(%rdx), %xmm6
4841 ; SSE-NEXT: movaps 192(%rcx), %xmm0
4842 ; SSE-NEXT: movaps %xmm6, %xmm5
4843 ; SSE-NEXT: unpcklps {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1]
4844 ; SSE-NEXT: movaps 192(%rdi), %xmm7
4845 ; SSE-NEXT: movaps 192(%rsi), %xmm1
4846 ; SSE-NEXT: movaps %xmm7, %xmm4
4847 ; SSE-NEXT: unpcklps {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
4848 ; SSE-NEXT: movaps 192(%r8), %xmm2
4849 ; SSE-NEXT: movaps 192(%r9), %xmm3
4850 ; SSE-NEXT: movaps %xmm3, %xmm8
4851 ; SSE-NEXT: movlhps {{.*#+}} xmm8 = xmm8[0],xmm2[0]
4852 ; SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[2,0],xmm4[2,3]
4853 ; SSE-NEXT: movaps %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4854 ; SSE-NEXT: movlhps {{.*#+}} xmm4 = xmm4[0],xmm5[0]
4855 ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4856 ; SSE-NEXT: movaps %xmm2, %xmm4
4857 ; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[1,1],xmm3[1,1]
4858 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[2,3],xmm4[0,2]
4859 ; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4860 ; SSE-NEXT: unpckhps {{.*#+}} xmm7 = xmm7[2],xmm1[2],xmm7[3],xmm1[3]
4861 ; SSE-NEXT: movaps %xmm0, %xmm1
4862 ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm6[1]
4863 ; SSE-NEXT: movaps %xmm2, %xmm4
4864 ; SSE-NEXT: unpckhpd {{.*#+}} xmm4 = xmm4[1],xmm3[1]
4865 ; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,2],xmm7[2,3]
4866 ; SSE-NEXT: movaps %xmm4, (%rsp) # 16-byte Spill
4867 ; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,1],xmm1[2,0]
4868 ; SSE-NEXT: movaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4869 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,3],xmm3[3,3]
4870 ; SSE-NEXT: unpckhps {{.*#+}} xmm6 = xmm6[2],xmm0[2],xmm6[3],xmm0[3]
4871 ; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[2,3],xmm2[0,2]
4872 ; SSE-NEXT: movaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4873 ; SSE-NEXT: movaps 208(%rdx), %xmm6
4874 ; SSE-NEXT: movaps 208(%rcx), %xmm0
4875 ; SSE-NEXT: movaps %xmm6, %xmm5
4876 ; SSE-NEXT: unpcklps {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1]
4877 ; SSE-NEXT: movaps 208(%rdi), %xmm7
4878 ; SSE-NEXT: movaps 208(%rsi), %xmm1
4879 ; SSE-NEXT: movaps %xmm7, %xmm4
4880 ; SSE-NEXT: unpcklps {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
4881 ; SSE-NEXT: movaps 208(%r8), %xmm2
4882 ; SSE-NEXT: movaps 208(%r9), %xmm3
4883 ; SSE-NEXT: movaps %xmm3, %xmm8
4884 ; SSE-NEXT: movlhps {{.*#+}} xmm8 = xmm8[0],xmm2[0]
4885 ; SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[2,0],xmm4[2,3]
4886 ; SSE-NEXT: movaps %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4887 ; SSE-NEXT: movlhps {{.*#+}} xmm4 = xmm4[0],xmm5[0]
4888 ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4889 ; SSE-NEXT: movaps %xmm2, %xmm4
4890 ; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[1,1],xmm3[1,1]
4891 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[2,3],xmm4[0,2]
4892 ; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4893 ; SSE-NEXT: unpckhps {{.*#+}} xmm7 = xmm7[2],xmm1[2],xmm7[3],xmm1[3]
4894 ; SSE-NEXT: movaps %xmm0, %xmm1
4895 ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm6[1]
4896 ; SSE-NEXT: movaps %xmm2, %xmm4
4897 ; SSE-NEXT: unpckhpd {{.*#+}} xmm4 = xmm4[1],xmm3[1]
4898 ; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,2],xmm7[2,3]
4899 ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4900 ; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,1],xmm1[2,0]
4901 ; SSE-NEXT: movaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4902 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,3],xmm3[3,3]
4903 ; SSE-NEXT: unpckhps {{.*#+}} xmm6 = xmm6[2],xmm0[2],xmm6[3],xmm0[3]
4904 ; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[2,3],xmm2[0,2]
4905 ; SSE-NEXT: movaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4906 ; SSE-NEXT: movaps 224(%rdx), %xmm9
4907 ; SSE-NEXT: movaps 224(%rcx), %xmm0
4908 ; SSE-NEXT: movaps %xmm9, %xmm14
4909 ; SSE-NEXT: unpcklps {{.*#+}} xmm14 = xmm14[0],xmm0[0],xmm14[1],xmm0[1]
4910 ; SSE-NEXT: movaps 224(%rdi), %xmm11
4911 ; SSE-NEXT: movaps 224(%rsi), %xmm1
4912 ; SSE-NEXT: movaps %xmm11, %xmm13
4913 ; SSE-NEXT: unpcklps {{.*#+}} xmm13 = xmm13[0],xmm1[0],xmm13[1],xmm1[1]
4914 ; SSE-NEXT: movaps 224(%r8), %xmm2
4915 ; SSE-NEXT: movaps 224(%r9), %xmm3
4916 ; SSE-NEXT: movaps %xmm3, %xmm15
4917 ; SSE-NEXT: movlhps {{.*#+}} xmm15 = xmm15[0],xmm2[0]
4918 ; SSE-NEXT: shufps {{.*#+}} xmm15 = xmm15[2,0],xmm13[2,3]
4919 ; SSE-NEXT: movlhps {{.*#+}} xmm13 = xmm13[0],xmm14[0]
4920 ; SSE-NEXT: movaps %xmm2, %xmm4
4921 ; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[1,1],xmm3[1,1]
4922 ; SSE-NEXT: shufps {{.*#+}} xmm14 = xmm14[2,3],xmm4[0,2]
4923 ; SSE-NEXT: unpckhps {{.*#+}} xmm11 = xmm11[2],xmm1[2],xmm11[3],xmm1[3]
4924 ; SSE-NEXT: movaps %xmm0, %xmm1
4925 ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm9[1]
4926 ; SSE-NEXT: movaps %xmm2, %xmm8
4927 ; SSE-NEXT: unpckhpd {{.*#+}} xmm8 = xmm8[1],xmm3[1]
4928 ; SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[0,2],xmm11[2,3]
4929 ; SSE-NEXT: shufps {{.*#+}} xmm11 = xmm11[0,1],xmm1[2,0]
4930 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,3],xmm3[3,3]
4931 ; SSE-NEXT: unpckhps {{.*#+}} xmm9 = xmm9[2],xmm0[2],xmm9[3],xmm0[3]
4932 ; SSE-NEXT: shufps {{.*#+}} xmm9 = xmm9[2,3],xmm2[0,2]
4933 ; SSE-NEXT: movaps 240(%rdx), %xmm3
4934 ; SSE-NEXT: movaps 240(%rcx), %xmm12
4935 ; SSE-NEXT: movaps %xmm3, %xmm5
4936 ; SSE-NEXT: unpcklps {{.*#+}} xmm5 = xmm5[0],xmm12[0],xmm5[1],xmm12[1]
4937 ; SSE-NEXT: movaps 240(%rdi), %xmm2
4938 ; SSE-NEXT: movaps 240(%rsi), %xmm10
4939 ; SSE-NEXT: movaps %xmm2, %xmm4
4940 ; SSE-NEXT: unpcklps {{.*#+}} xmm4 = xmm4[0],xmm10[0],xmm4[1],xmm10[1]
4941 ; SSE-NEXT: movaps 240(%r8), %xmm1
4942 ; SSE-NEXT: movaps 240(%r9), %xmm7
4943 ; SSE-NEXT: movaps %xmm7, %xmm6
4944 ; SSE-NEXT: movlhps {{.*#+}} xmm6 = xmm6[0],xmm1[0]
4945 ; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[2,0],xmm4[2,3]
4946 ; SSE-NEXT: movlhps {{.*#+}} xmm4 = xmm4[0],xmm5[0]
4947 ; SSE-NEXT: movaps %xmm1, %xmm0
4948 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm7[1,1]
4949 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[2,3],xmm0[0,2]
4950 ; SSE-NEXT: unpckhps {{.*#+}} xmm2 = xmm2[2],xmm10[2],xmm2[3],xmm10[3]
4951 ; SSE-NEXT: movaps %xmm12, %xmm0
4952 ; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm3[1]
4953 ; SSE-NEXT: movaps %xmm1, %xmm10
4954 ; SSE-NEXT: unpckhpd {{.*#+}} xmm10 = xmm10[1],xmm7[1]
4955 ; SSE-NEXT: shufps {{.*#+}} xmm10 = xmm10[0,2],xmm2[2,3]
4956 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1],xmm0[2,0]
4957 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,3],xmm7[3,3]
4958 ; SSE-NEXT: unpckhps {{.*#+}} xmm3 = xmm3[2],xmm12[2],xmm3[3],xmm12[3]
4959 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[2,3],xmm1[0,2]
4960 ; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax
4961 ; SSE-NEXT: movaps %xmm3, 1520(%rax)
4962 ; SSE-NEXT: movaps %xmm10, 1504(%rax)
4963 ; SSE-NEXT: movaps %xmm2, 1488(%rax)
4964 ; SSE-NEXT: movaps %xmm5, 1472(%rax)
4965 ; SSE-NEXT: movaps %xmm6, 1456(%rax)
4966 ; SSE-NEXT: movaps %xmm4, 1440(%rax)
4967 ; SSE-NEXT: movaps %xmm9, 1424(%rax)
4968 ; SSE-NEXT: movaps %xmm8, 1408(%rax)
4969 ; SSE-NEXT: movaps %xmm11, 1392(%rax)
4970 ; SSE-NEXT: movaps %xmm14, 1376(%rax)
4971 ; SSE-NEXT: movaps %xmm15, 1360(%rax)
4972 ; SSE-NEXT: movaps %xmm13, 1344(%rax)
4973 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
4974 ; SSE-NEXT: movaps %xmm0, 1328(%rax)
4975 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
4976 ; SSE-NEXT: movaps %xmm0, 1312(%rax)
4977 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
4978 ; SSE-NEXT: movaps %xmm0, 1296(%rax)
4979 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
4980 ; SSE-NEXT: movaps %xmm0, 1280(%rax)
4981 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
4982 ; SSE-NEXT: movaps %xmm0, 1264(%rax)
4983 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
4984 ; SSE-NEXT: movaps %xmm0, 1248(%rax)
4985 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
4986 ; SSE-NEXT: movaps %xmm0, 1232(%rax)
4987 ; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
4988 ; SSE-NEXT: movaps %xmm0, 1216(%rax)
4989 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
4990 ; SSE-NEXT: movaps %xmm0, 1200(%rax)
4991 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
4992 ; SSE-NEXT: movaps %xmm0, 1184(%rax)
4993 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
4994 ; SSE-NEXT: movaps %xmm0, 1168(%rax)
4995 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
4996 ; SSE-NEXT: movaps %xmm0, 1152(%rax)
4997 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
4998 ; SSE-NEXT: movaps %xmm0, 1136(%rax)
4999 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5000 ; SSE-NEXT: movaps %xmm0, 1120(%rax)
5001 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5002 ; SSE-NEXT: movaps %xmm0, 1104(%rax)
5003 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5004 ; SSE-NEXT: movaps %xmm0, 1088(%rax)
5005 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5006 ; SSE-NEXT: movaps %xmm0, 1072(%rax)
5007 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5008 ; SSE-NEXT: movaps %xmm0, 1056(%rax)
5009 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5010 ; SSE-NEXT: movaps %xmm0, 1040(%rax)
5011 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5012 ; SSE-NEXT: movaps %xmm0, 1024(%rax)
5013 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5014 ; SSE-NEXT: movaps %xmm0, 1008(%rax)
5015 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5016 ; SSE-NEXT: movaps %xmm0, 992(%rax)
5017 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5018 ; SSE-NEXT: movaps %xmm0, 976(%rax)
5019 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5020 ; SSE-NEXT: movaps %xmm0, 960(%rax)
5021 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5022 ; SSE-NEXT: movaps %xmm0, 944(%rax)
5023 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5024 ; SSE-NEXT: movaps %xmm0, 928(%rax)
5025 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5026 ; SSE-NEXT: movaps %xmm0, 912(%rax)
5027 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5028 ; SSE-NEXT: movaps %xmm0, 896(%rax)
5029 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5030 ; SSE-NEXT: movaps %xmm0, 880(%rax)
5031 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5032 ; SSE-NEXT: movaps %xmm0, 864(%rax)
5033 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5034 ; SSE-NEXT: movaps %xmm0, 848(%rax)
5035 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5036 ; SSE-NEXT: movaps %xmm0, 832(%rax)
5037 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5038 ; SSE-NEXT: movaps %xmm0, 816(%rax)
5039 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5040 ; SSE-NEXT: movaps %xmm0, 800(%rax)
5041 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5042 ; SSE-NEXT: movaps %xmm0, 784(%rax)
5043 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5044 ; SSE-NEXT: movaps %xmm0, 768(%rax)
5045 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5046 ; SSE-NEXT: movaps %xmm0, 752(%rax)
5047 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5048 ; SSE-NEXT: movaps %xmm0, 736(%rax)
5049 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5050 ; SSE-NEXT: movaps %xmm0, 720(%rax)
5051 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5052 ; SSE-NEXT: movaps %xmm0, 704(%rax)
5053 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5054 ; SSE-NEXT: movaps %xmm0, 688(%rax)
5055 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5056 ; SSE-NEXT: movaps %xmm0, 672(%rax)
5057 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5058 ; SSE-NEXT: movaps %xmm0, 656(%rax)
5059 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5060 ; SSE-NEXT: movaps %xmm0, 640(%rax)
5061 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5062 ; SSE-NEXT: movaps %xmm0, 624(%rax)
5063 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5064 ; SSE-NEXT: movaps %xmm0, 608(%rax)
5065 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5066 ; SSE-NEXT: movaps %xmm0, 592(%rax)
5067 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5068 ; SSE-NEXT: movaps %xmm0, 576(%rax)
5069 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5070 ; SSE-NEXT: movaps %xmm0, 560(%rax)
5071 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5072 ; SSE-NEXT: movaps %xmm0, 544(%rax)
5073 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5074 ; SSE-NEXT: movaps %xmm0, 528(%rax)
5075 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5076 ; SSE-NEXT: movaps %xmm0, 512(%rax)
5077 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5078 ; SSE-NEXT: movaps %xmm0, 496(%rax)
5079 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5080 ; SSE-NEXT: movaps %xmm0, 480(%rax)
5081 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5082 ; SSE-NEXT: movaps %xmm0, 464(%rax)
5083 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5084 ; SSE-NEXT: movaps %xmm0, 448(%rax)
5085 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5086 ; SSE-NEXT: movaps %xmm0, 432(%rax)
5087 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5088 ; SSE-NEXT: movaps %xmm0, 416(%rax)
5089 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5090 ; SSE-NEXT: movaps %xmm0, 400(%rax)
5091 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5092 ; SSE-NEXT: movaps %xmm0, 384(%rax)
5093 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5094 ; SSE-NEXT: movaps %xmm0, 368(%rax)
5095 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5096 ; SSE-NEXT: movaps %xmm0, 352(%rax)
5097 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5098 ; SSE-NEXT: movaps %xmm0, 336(%rax)
5099 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5100 ; SSE-NEXT: movaps %xmm0, 320(%rax)
5101 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5102 ; SSE-NEXT: movaps %xmm0, 304(%rax)
5103 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5104 ; SSE-NEXT: movaps %xmm0, 288(%rax)
5105 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5106 ; SSE-NEXT: movaps %xmm0, 272(%rax)
5107 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5108 ; SSE-NEXT: movaps %xmm0, 256(%rax)
5109 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5110 ; SSE-NEXT: movaps %xmm0, 240(%rax)
5111 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5112 ; SSE-NEXT: movaps %xmm0, 224(%rax)
5113 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5114 ; SSE-NEXT: movaps %xmm0, 208(%rax)
5115 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5116 ; SSE-NEXT: movaps %xmm0, 192(%rax)
5117 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5118 ; SSE-NEXT: movaps %xmm0, 176(%rax)
5119 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5120 ; SSE-NEXT: movaps %xmm0, 160(%rax)
5121 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5122 ; SSE-NEXT: movaps %xmm0, 144(%rax)
5123 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5124 ; SSE-NEXT: movaps %xmm0, 128(%rax)
5125 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5126 ; SSE-NEXT: movaps %xmm0, 112(%rax)
5127 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5128 ; SSE-NEXT: movaps %xmm0, 96(%rax)
5129 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5130 ; SSE-NEXT: movaps %xmm0, 80(%rax)
5131 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5132 ; SSE-NEXT: movaps %xmm0, 64(%rax)
5133 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5134 ; SSE-NEXT: movaps %xmm0, 48(%rax)
5135 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5136 ; SSE-NEXT: movaps %xmm0, 32(%rax)
5137 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5138 ; SSE-NEXT: movaps %xmm0, 16(%rax)
5139 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5140 ; SSE-NEXT: movaps %xmm0, (%rax)
5141 ; SSE-NEXT: addq $1224, %rsp # imm = 0x4C8
5144 ; AVX1-ONLY-LABEL: store_i32_stride6_vf64:
5145 ; AVX1-ONLY: # %bb.0:
5146 ; AVX1-ONLY-NEXT: subq $2504, %rsp # imm = 0x9C8
5147 ; AVX1-ONLY-NEXT: vmovaps (%rdi), %ymm8
5148 ; AVX1-ONLY-NEXT: vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5149 ; AVX1-ONLY-NEXT: vmovaps (%rsi), %ymm9
5150 ; AVX1-ONLY-NEXT: vmovups %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5151 ; AVX1-ONLY-NEXT: vmovaps (%rdx), %ymm4
5152 ; AVX1-ONLY-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5153 ; AVX1-ONLY-NEXT: vmovaps (%rcx), %ymm5
5154 ; AVX1-ONLY-NEXT: vmovaps (%r8), %ymm6
5155 ; AVX1-ONLY-NEXT: vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5156 ; AVX1-ONLY-NEXT: vmovaps (%rcx), %xmm1
5157 ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5158 ; AVX1-ONLY-NEXT: vmovaps 32(%rcx), %xmm2
5159 ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5160 ; AVX1-ONLY-NEXT: vmovaps (%rdx), %xmm0
5161 ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5162 ; AVX1-ONLY-NEXT: vmovaps 32(%rdx), %xmm3
5163 ; AVX1-ONLY-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5164 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,2],xmm1[1,2]
5165 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2,1,3]
5166 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
5167 ; AVX1-ONLY-NEXT: vmovaps (%rsi), %xmm1
5168 ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5169 ; AVX1-ONLY-NEXT: vmovaps (%rdi), %xmm7
5170 ; AVX1-ONLY-NEXT: vmovaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5171 ; AVX1-ONLY-NEXT: vunpckhps {{.*#+}} xmm1 = xmm7[2],xmm1[2],xmm7[3],xmm1[3]
5172 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5173 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
5174 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
5175 ; AVX1-ONLY-NEXT: vbroadcastss 4(%r8), %xmm1
5176 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
5177 ; AVX1-ONLY-NEXT: vbroadcastss 4(%r9), %ymm1
5178 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3],ymm0[4,5,6,7]
5179 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5180 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} ymm0 = ymm8[0],ymm9[0],ymm8[1],ymm9[1],ymm8[4],ymm9[4],ymm8[5],ymm9[5]
5181 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
5182 ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm5[0],ymm4[0],ymm5[2],ymm4[2]
5183 ; AVX1-ONLY-NEXT: vmovaps %ymm5, %ymm15
5184 ; AVX1-ONLY-NEXT: vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5185 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,1,2,0,4,5,6,4]
5186 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm1, %xmm1
5187 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
5188 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm6[4,5],ymm0[6,7]
5189 ; AVX1-ONLY-NEXT: vbroadcastss 16(%r9), %ymm1
5190 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
5191 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5192 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm3[1,2],xmm2[1,2]
5193 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2,1,3]
5194 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
5195 ; AVX1-ONLY-NEXT: vmovaps 32(%rsi), %xmm1
5196 ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5197 ; AVX1-ONLY-NEXT: vmovaps 32(%rdi), %xmm2
5198 ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5199 ; AVX1-ONLY-NEXT: vunpckhps {{.*#+}} xmm1 = xmm2[2],xmm1[2],xmm2[3],xmm1[3]
5200 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5201 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
5202 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
5203 ; AVX1-ONLY-NEXT: vbroadcastss 36(%r8), %xmm1
5204 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
5205 ; AVX1-ONLY-NEXT: vbroadcastss 36(%r9), %ymm1
5206 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3],ymm0[4,5,6,7]
5207 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5208 ; AVX1-ONLY-NEXT: vmovaps 32(%rdi), %ymm1
5209 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5210 ; AVX1-ONLY-NEXT: vmovaps 32(%rsi), %ymm0
5211 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5212 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[4],ymm0[4],ymm1[5],ymm0[5]
5213 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
5214 ; AVX1-ONLY-NEXT: vmovaps 32(%rdx), %ymm2
5215 ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5216 ; AVX1-ONLY-NEXT: vmovaps 32(%rcx), %ymm1
5217 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5218 ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[2],ymm2[2]
5219 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,1,2,0,4,5,6,4]
5220 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm1, %xmm1
5221 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
5222 ; AVX1-ONLY-NEXT: vmovaps 32(%r8), %ymm1
5223 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5224 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
5225 ; AVX1-ONLY-NEXT: vbroadcastss 48(%r9), %ymm1
5226 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
5227 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5228 ; AVX1-ONLY-NEXT: vmovaps 64(%rcx), %xmm1
5229 ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5230 ; AVX1-ONLY-NEXT: vmovaps 64(%rdx), %xmm0
5231 ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5232 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,2],xmm1[1,2]
5233 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2,1,3]
5234 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
5235 ; AVX1-ONLY-NEXT: vmovaps 64(%rsi), %xmm1
5236 ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5237 ; AVX1-ONLY-NEXT: vmovaps 64(%rdi), %xmm2
5238 ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5239 ; AVX1-ONLY-NEXT: vunpckhps {{.*#+}} xmm1 = xmm2[2],xmm1[2],xmm2[3],xmm1[3]
5240 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5241 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
5242 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
5243 ; AVX1-ONLY-NEXT: vbroadcastss 68(%r8), %xmm1
5244 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
5245 ; AVX1-ONLY-NEXT: vbroadcastss 68(%r9), %ymm1
5246 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3],ymm0[4,5,6,7]
5247 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5248 ; AVX1-ONLY-NEXT: vmovaps 64(%rdi), %ymm1
5249 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5250 ; AVX1-ONLY-NEXT: vmovaps 64(%rsi), %ymm0
5251 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5252 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[4],ymm0[4],ymm1[5],ymm0[5]
5253 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
5254 ; AVX1-ONLY-NEXT: vmovaps 64(%rdx), %ymm2
5255 ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5256 ; AVX1-ONLY-NEXT: vmovaps 64(%rcx), %ymm1
5257 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5258 ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[2],ymm2[2]
5259 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,1,2,0,4,5,6,4]
5260 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm1, %xmm1
5261 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
5262 ; AVX1-ONLY-NEXT: vmovaps 64(%r8), %ymm1
5263 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5264 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
5265 ; AVX1-ONLY-NEXT: vbroadcastss 80(%r9), %ymm1
5266 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
5267 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5268 ; AVX1-ONLY-NEXT: vmovaps 96(%rcx), %xmm1
5269 ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5270 ; AVX1-ONLY-NEXT: vmovaps 96(%rdx), %xmm0
5271 ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5272 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,2],xmm1[1,2]
5273 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2,1,3]
5274 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
5275 ; AVX1-ONLY-NEXT: vmovaps 96(%rsi), %xmm1
5276 ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5277 ; AVX1-ONLY-NEXT: vmovaps 96(%rdi), %xmm2
5278 ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5279 ; AVX1-ONLY-NEXT: vunpckhps {{.*#+}} xmm1 = xmm2[2],xmm1[2],xmm2[3],xmm1[3]
5280 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5281 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
5282 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
5283 ; AVX1-ONLY-NEXT: vbroadcastss 100(%r8), %xmm1
5284 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
5285 ; AVX1-ONLY-NEXT: vbroadcastss 100(%r9), %ymm1
5286 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3],ymm0[4,5,6,7]
5287 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5288 ; AVX1-ONLY-NEXT: vmovaps 96(%rdi), %ymm1
5289 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5290 ; AVX1-ONLY-NEXT: vmovaps 96(%rsi), %ymm0
5291 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5292 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[4],ymm0[4],ymm1[5],ymm0[5]
5293 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
5294 ; AVX1-ONLY-NEXT: vmovaps 96(%rdx), %ymm2
5295 ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5296 ; AVX1-ONLY-NEXT: vmovaps 96(%rcx), %ymm1
5297 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5298 ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[2],ymm2[2]
5299 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,1,2,0,4,5,6,4]
5300 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm1, %xmm1
5301 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
5302 ; AVX1-ONLY-NEXT: vmovaps 96(%r8), %ymm1
5303 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5304 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
5305 ; AVX1-ONLY-NEXT: vbroadcastss 112(%r9), %ymm1
5306 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
5307 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5308 ; AVX1-ONLY-NEXT: vmovaps 128(%rcx), %xmm1
5309 ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5310 ; AVX1-ONLY-NEXT: vmovaps 128(%rdx), %xmm0
5311 ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5312 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,2],xmm1[1,2]
5313 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2,1,3]
5314 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
5315 ; AVX1-ONLY-NEXT: vmovaps 128(%rsi), %xmm1
5316 ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5317 ; AVX1-ONLY-NEXT: vmovaps 128(%rdi), %xmm2
5318 ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5319 ; AVX1-ONLY-NEXT: vunpckhps {{.*#+}} xmm1 = xmm2[2],xmm1[2],xmm2[3],xmm1[3]
5320 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5321 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
5322 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
5323 ; AVX1-ONLY-NEXT: vbroadcastss 132(%r8), %xmm1
5324 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
5325 ; AVX1-ONLY-NEXT: vbroadcastss 132(%r9), %ymm1
5326 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3],ymm0[4,5,6,7]
5327 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5328 ; AVX1-ONLY-NEXT: vmovaps 128(%rdi), %ymm0
5329 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5330 ; AVX1-ONLY-NEXT: vmovaps 128(%rsi), %ymm11
5331 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} ymm0 = ymm0[0],ymm11[0],ymm0[1],ymm11[1],ymm0[4],ymm11[4],ymm0[5],ymm11[5]
5332 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
5333 ; AVX1-ONLY-NEXT: vmovaps 128(%rdx), %ymm1
5334 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5335 ; AVX1-ONLY-NEXT: vmovaps 128(%rcx), %ymm2
5336 ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5337 ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm2[0],ymm1[0],ymm2[2],ymm1[2]
5338 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,1,2,0,4,5,6,4]
5339 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm1, %xmm1
5340 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
5341 ; AVX1-ONLY-NEXT: vmovaps 128(%r8), %ymm1
5342 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5343 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
5344 ; AVX1-ONLY-NEXT: vbroadcastss 144(%r9), %ymm1
5345 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
5346 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5347 ; AVX1-ONLY-NEXT: vmovaps 160(%rcx), %xmm0
5348 ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5349 ; AVX1-ONLY-NEXT: vmovaps 160(%rdx), %xmm1
5350 ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5351 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm1[1,2],xmm0[1,2]
5352 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2,1,3]
5353 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
5354 ; AVX1-ONLY-NEXT: vmovaps 160(%rsi), %xmm7
5355 ; AVX1-ONLY-NEXT: vmovaps 160(%rdi), %xmm6
5356 ; AVX1-ONLY-NEXT: vunpckhps {{.*#+}} xmm1 = xmm6[2],xmm7[2],xmm6[3],xmm7[3]
5357 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5358 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
5359 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
5360 ; AVX1-ONLY-NEXT: vbroadcastss 164(%r8), %xmm1
5361 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
5362 ; AVX1-ONLY-NEXT: vbroadcastss 164(%r9), %ymm1
5363 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3],ymm0[4,5,6,7]
5364 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5365 ; AVX1-ONLY-NEXT: vmovaps 160(%rdi), %ymm1
5366 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5367 ; AVX1-ONLY-NEXT: vmovaps 160(%rsi), %ymm0
5368 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5369 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[4],ymm0[4],ymm1[5],ymm0[5]
5370 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
5371 ; AVX1-ONLY-NEXT: vmovaps 160(%rdx), %ymm1
5372 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5373 ; AVX1-ONLY-NEXT: vmovaps 160(%rcx), %ymm8
5374 ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm8[0],ymm1[0],ymm8[2],ymm1[2]
5375 ; AVX1-ONLY-NEXT: vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5376 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,1,2,0,4,5,6,4]
5377 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm1, %xmm1
5378 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
5379 ; AVX1-ONLY-NEXT: vmovaps 160(%r8), %ymm1
5380 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5381 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
5382 ; AVX1-ONLY-NEXT: vbroadcastss 176(%r9), %ymm1
5383 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
5384 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5385 ; AVX1-ONLY-NEXT: vmovaps 192(%rcx), %xmm1
5386 ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5387 ; AVX1-ONLY-NEXT: vmovaps 192(%rdx), %xmm0
5388 ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5389 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,2],xmm1[1,2]
5390 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2,1,3]
5391 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
5392 ; AVX1-ONLY-NEXT: vmovaps 192(%rsi), %xmm3
5393 ; AVX1-ONLY-NEXT: vmovaps 192(%rdi), %xmm2
5394 ; AVX1-ONLY-NEXT: vunpckhps {{.*#+}} xmm1 = xmm2[2],xmm3[2],xmm2[3],xmm3[3]
5395 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5396 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
5397 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
5398 ; AVX1-ONLY-NEXT: vbroadcastss 196(%r8), %xmm1
5399 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
5400 ; AVX1-ONLY-NEXT: vbroadcastss 196(%r9), %ymm1
5401 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3],ymm0[4,5,6,7]
5402 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5403 ; AVX1-ONLY-NEXT: vmovaps 192(%rdi), %ymm1
5404 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5405 ; AVX1-ONLY-NEXT: vmovaps 192(%rsi), %ymm0
5406 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5407 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[4],ymm0[4],ymm1[5],ymm0[5]
5408 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
5409 ; AVX1-ONLY-NEXT: vmovaps 192(%rdx), %ymm4
5410 ; AVX1-ONLY-NEXT: vmovaps 192(%rcx), %ymm1
5411 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5412 ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm1[0],ymm4[0],ymm1[2],ymm4[2]
5413 ; AVX1-ONLY-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5414 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,1,2,0,4,5,6,4]
5415 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm1, %xmm1
5416 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
5417 ; AVX1-ONLY-NEXT: vmovaps 192(%r8), %ymm1
5418 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5419 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
5420 ; AVX1-ONLY-NEXT: vbroadcastss 208(%r9), %ymm1
5421 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
5422 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5423 ; AVX1-ONLY-NEXT: vmovaps 224(%rcx), %xmm1
5424 ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5425 ; AVX1-ONLY-NEXT: vmovaps 224(%rdx), %xmm0
5426 ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5427 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,2],xmm1[1,2]
5428 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2,1,3]
5429 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
5430 ; AVX1-ONLY-NEXT: vmovaps 224(%rsi), %xmm1
5431 ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5432 ; AVX1-ONLY-NEXT: vmovaps 224(%rdi), %xmm5
5433 ; AVX1-ONLY-NEXT: vmovaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5434 ; AVX1-ONLY-NEXT: vunpckhps {{.*#+}} xmm1 = xmm5[2],xmm1[2],xmm5[3],xmm1[3]
5435 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5436 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
5437 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
5438 ; AVX1-ONLY-NEXT: vbroadcastss 228(%r8), %xmm1
5439 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
5440 ; AVX1-ONLY-NEXT: vbroadcastss 228(%r9), %ymm1
5441 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3],ymm0[4,5,6,7]
5442 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5443 ; AVX1-ONLY-NEXT: vmovaps 224(%rdi), %ymm9
5444 ; AVX1-ONLY-NEXT: vmovaps 224(%rsi), %ymm0
5445 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} ymm14 = ymm9[0],ymm0[0],ymm9[1],ymm0[1],ymm9[4],ymm0[4],ymm9[5],ymm0[5]
5446 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm14 = ymm14[2,3,2,3]
5447 ; AVX1-ONLY-NEXT: vmovaps 224(%rdx), %ymm5
5448 ; AVX1-ONLY-NEXT: vmovaps 224(%rcx), %ymm1
5449 ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm13 = ymm1[0],ymm5[0],ymm1[2],ymm5[2]
5450 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5451 ; AVX1-ONLY-NEXT: vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5452 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm13 = ymm13[0,1,2,0,4,5,6,4]
5453 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm13, %xmm13
5454 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm13 = ymm14[0,1],ymm13[2,3],ymm14[4,5,6,7]
5455 ; AVX1-ONLY-NEXT: vmovaps 224(%r8), %ymm10
5456 ; AVX1-ONLY-NEXT: vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5457 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm13 = ymm13[0,1,2,3],ymm10[4,5],ymm13[6,7]
5458 ; AVX1-ONLY-NEXT: vbroadcastss 240(%r9), %ymm14
5459 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm12 = ymm13[0,1,2,3,4],ymm14[5],ymm13[6,7]
5460 ; AVX1-ONLY-NEXT: vmovups %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5461 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
5462 ; AVX1-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm14 # 32-byte Folded Reload
5463 ; AVX1-ONLY-NEXT: # ymm14 = ymm10[2],mem[2],ymm10[3],mem[3],ymm10[6],mem[6],ymm10[7],mem[7]
5464 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
5465 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm13 = ymm10[1,2],ymm15[1,2],ymm10[5,6],ymm15[5,6]
5466 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm13 = ymm13[2,3,2,3]
5467 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm13 = ymm13[0,2,1,3,4,6,5,7]
5468 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm13 = ymm13[0,1,2,3],ymm14[4,5],ymm13[6,7]
5469 ; AVX1-ONLY-NEXT: vbroadcastss 20(%r8), %xmm15
5470 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm13 = ymm13[0,1],ymm15[2,3],ymm13[4,5,6,7]
5471 ; AVX1-ONLY-NEXT: vbroadcastss 20(%r9), %ymm15
5472 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm12 = ymm13[0,1,2],ymm15[3],ymm13[4,5,6,7]
5473 ; AVX1-ONLY-NEXT: vmovups %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5474 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
5475 ; AVX1-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm13 # 32-byte Folded Reload
5476 ; AVX1-ONLY-NEXT: # ymm13 = ymm12[2],mem[2],ymm12[3],mem[3],ymm12[6],mem[6],ymm12[7],mem[7]
5477 ; AVX1-ONLY-NEXT: vmovups %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5478 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
5479 ; AVX1-ONLY-NEXT: vshufps $153, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm15 # 32-byte Folded Reload
5480 ; AVX1-ONLY-NEXT: # ymm15 = ymm12[1,2],mem[1,2],ymm12[5,6],mem[5,6]
5481 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm15 = ymm15[2,3,2,3]
5482 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm15 = ymm15[0,2,1,3,4,6,5,7]
5483 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm15 = ymm15[0,1,2,3],ymm13[4,5],ymm15[6,7]
5484 ; AVX1-ONLY-NEXT: vbroadcastss 52(%r8), %xmm12
5485 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm12 = ymm15[0,1],ymm12[2,3],ymm15[4,5,6,7]
5486 ; AVX1-ONLY-NEXT: vbroadcastss 52(%r9), %ymm15
5487 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm12 = ymm12[0,1,2],ymm15[3],ymm12[4,5,6,7]
5488 ; AVX1-ONLY-NEXT: vmovups %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5489 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
5490 ; AVX1-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm13 # 32-byte Folded Reload
5491 ; AVX1-ONLY-NEXT: # ymm13 = ymm12[2],mem[2],ymm12[3],mem[3],ymm12[6],mem[6],ymm12[7],mem[7]
5492 ; AVX1-ONLY-NEXT: vmovups %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5493 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
5494 ; AVX1-ONLY-NEXT: vshufps $153, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm12 # 32-byte Folded Reload
5495 ; AVX1-ONLY-NEXT: # ymm12 = ymm12[1,2],mem[1,2],ymm12[5,6],mem[5,6]
5496 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm12 = ymm12[2,3,2,3]
5497 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm12 = ymm12[0,2,1,3,4,6,5,7]
5498 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm12 = ymm12[0,1,2,3],ymm13[4,5],ymm12[6,7]
5499 ; AVX1-ONLY-NEXT: vbroadcastss 84(%r8), %xmm15
5500 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm12 = ymm12[0,1],ymm15[2,3],ymm12[4,5,6,7]
5501 ; AVX1-ONLY-NEXT: vbroadcastss 84(%r9), %ymm15
5502 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm12 = ymm12[0,1,2],ymm15[3],ymm12[4,5,6,7]
5503 ; AVX1-ONLY-NEXT: vmovups %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5504 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
5505 ; AVX1-ONLY-NEXT: vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm12, %xmm12 # 16-byte Folded Reload
5506 ; AVX1-ONLY-NEXT: # xmm12 = xmm12[0],mem[0],xmm12[1],mem[1]
5507 ; AVX1-ONLY-NEXT: vpermilps $0, {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Folded Reload
5508 ; AVX1-ONLY-NEXT: # xmm15 = mem[0,0,0,0]
5509 ; AVX1-ONLY-NEXT: vpermilps $0, {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Folded Reload
5510 ; AVX1-ONLY-NEXT: # xmm13 = mem[0,0,0,0]
5511 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} xmm13 = xmm13[0],xmm15[0],xmm13[1],xmm15[1]
5512 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm12, %ymm0, %ymm15
5513 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm13 = ymm15[0,1],ymm13[2,3],ymm15[4,5,6,7]
5514 ; AVX1-ONLY-NEXT: vinsertf128 $1, 96(%r8), %ymm12, %ymm12
5515 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm12 = ymm12[0,1],ymm13[2,3],ymm12[4,5],ymm13[6,7]
5516 ; AVX1-ONLY-NEXT: vbroadcastss 96(%r9), %ymm13
5517 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm12 = ymm12[0,1,2,3,4],ymm13[5],ymm12[6,7]
5518 ; AVX1-ONLY-NEXT: vmovups %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5519 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
5520 ; AVX1-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm12 # 32-byte Folded Reload
5521 ; AVX1-ONLY-NEXT: # ymm12 = ymm12[2],mem[2],ymm12[3],mem[3],ymm12[6],mem[6],ymm12[7],mem[7]
5522 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
5523 ; AVX1-ONLY-NEXT: vshufps $153, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm13 # 32-byte Folded Reload
5524 ; AVX1-ONLY-NEXT: # ymm13 = ymm13[1,2],mem[1,2],ymm13[5,6],mem[5,6]
5525 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm13 = ymm13[2,3,2,3]
5526 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm13 = ymm13[0,2,1,3,4,6,5,7]
5527 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm13 = ymm13[0,1,2,3],ymm12[4,5],ymm13[6,7]
5528 ; AVX1-ONLY-NEXT: vbroadcastss 116(%r8), %xmm15
5529 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm13 = ymm13[0,1],ymm15[2,3],ymm13[4,5,6,7]
5530 ; AVX1-ONLY-NEXT: vbroadcastss 116(%r9), %ymm15
5531 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm13 = ymm13[0,1,2],ymm15[3],ymm13[4,5,6,7]
5532 ; AVX1-ONLY-NEXT: vmovups %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5533 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
5534 ; AVX1-ONLY-NEXT: vunpckhps {{.*#+}} ymm13 = ymm13[2],ymm11[2],ymm13[3],ymm11[3],ymm13[6],ymm11[6],ymm13[7],ymm11[7]
5535 ; AVX1-ONLY-NEXT: vmovups %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5536 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
5537 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
5538 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm11 = ymm11[1,2],ymm15[1,2],ymm11[5,6],ymm15[5,6]
5539 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm11 = ymm11[2,3,2,3]
5540 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm11 = ymm11[0,2,1,3,4,6,5,7]
5541 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm11 = ymm11[0,1,2,3],ymm13[4,5],ymm11[6,7]
5542 ; AVX1-ONLY-NEXT: vbroadcastss 148(%r8), %xmm13
5543 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm11 = ymm11[0,1],ymm13[2,3],ymm11[4,5,6,7]
5544 ; AVX1-ONLY-NEXT: vbroadcastss 148(%r9), %ymm13
5545 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm11 = ymm11[0,1,2],ymm13[3],ymm11[4,5,6,7]
5546 ; AVX1-ONLY-NEXT: vmovups %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5547 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1]
5548 ; AVX1-ONLY-NEXT: vpermilps $0, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload
5549 ; AVX1-ONLY-NEXT: # xmm7 = mem[0,0,0,0]
5550 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
5551 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm11 = xmm13[0,0,0,0]
5552 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} xmm7 = xmm11[0],xmm7[0],xmm11[1],xmm7[1]
5553 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm6, %ymm0, %ymm11
5554 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm7 = ymm11[0,1],ymm7[2,3],ymm11[4,5,6,7]
5555 ; AVX1-ONLY-NEXT: vinsertf128 $1, 160(%r8), %ymm6, %ymm6
5556 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1],ymm7[2,3],ymm6[4,5],ymm7[6,7]
5557 ; AVX1-ONLY-NEXT: vbroadcastss 160(%r9), %ymm7
5558 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3,4],ymm7[5],ymm6[6,7]
5559 ; AVX1-ONLY-NEXT: vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5560 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
5561 ; AVX1-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm7 # 32-byte Folded Reload
5562 ; AVX1-ONLY-NEXT: # ymm7 = ymm6[2],mem[2],ymm6[3],mem[3],ymm6[6],mem[6],ymm6[7],mem[7]
5563 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
5564 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm6 = ymm6[1,2],ymm8[1,2],ymm6[5,6],ymm8[5,6]
5565 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm6 = ymm6[2,3,2,3]
5566 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm6 = ymm6[0,2,1,3,4,6,5,7]
5567 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm7[4,5],ymm6[6,7]
5568 ; AVX1-ONLY-NEXT: vbroadcastss 180(%r8), %xmm8
5569 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1],ymm8[2,3],ymm6[4,5,6,7]
5570 ; AVX1-ONLY-NEXT: vbroadcastss 180(%r9), %ymm8
5571 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2],ymm8[3],ymm6[4,5,6,7]
5572 ; AVX1-ONLY-NEXT: vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5573 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
5574 ; AVX1-ONLY-NEXT: vpermilps $0, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
5575 ; AVX1-ONLY-NEXT: # xmm3 = mem[0,0,0,0]
5576 ; AVX1-ONLY-NEXT: vpermilps $0, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
5577 ; AVX1-ONLY-NEXT: # xmm6 = mem[0,0,0,0]
5578 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} xmm3 = xmm6[0],xmm3[0],xmm6[1],xmm3[1]
5579 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm6
5580 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm3 = ymm6[0,1],ymm3[2,3],ymm6[4,5,6,7]
5581 ; AVX1-ONLY-NEXT: vinsertf128 $1, 192(%r8), %ymm2, %ymm2
5582 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm3[2,3],ymm2[4,5],ymm3[6,7]
5583 ; AVX1-ONLY-NEXT: vbroadcastss 192(%r9), %ymm3
5584 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm3[5],ymm2[6,7]
5585 ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5586 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
5587 ; AVX1-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm6 # 32-byte Folded Reload
5588 ; AVX1-ONLY-NEXT: # ymm6 = ymm2[2],mem[2],ymm2[3],mem[3],ymm2[6],mem[6],ymm2[7],mem[7]
5589 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
5590 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm3 = ymm4[1,2],ymm2[1,2],ymm4[5,6],ymm2[5,6]
5591 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm3 = ymm3[2,3,2,3]
5592 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm3 = ymm3[0,2,1,3,4,6,5,7]
5593 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm6[4,5],ymm3[6,7]
5594 ; AVX1-ONLY-NEXT: vbroadcastss 212(%r8), %xmm4
5595 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm4[2,3],ymm3[4,5,6,7]
5596 ; AVX1-ONLY-NEXT: vbroadcastss 212(%r9), %ymm4
5597 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2],ymm4[3],ymm3[4,5,6,7]
5598 ; AVX1-ONLY-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5599 ; AVX1-ONLY-NEXT: vunpckhps {{.*#+}} ymm3 = ymm9[2],ymm0[2],ymm9[3],ymm0[3],ymm9[6],ymm0[6],ymm9[7],ymm0[7]
5600 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm5[1,2],ymm1[1,2],ymm5[5,6],ymm1[5,6]
5601 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
5602 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2,1,3,4,6,5,7]
5603 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm3[4,5],ymm0[6,7]
5604 ; AVX1-ONLY-NEXT: vmovaps %ymm3, %ymm5
5605 ; AVX1-ONLY-NEXT: vbroadcastss 244(%r8), %xmm1
5606 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
5607 ; AVX1-ONLY-NEXT: vbroadcastss 244(%r9), %ymm1
5608 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3],ymm0[4,5,6,7]
5609 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5610 ; AVX1-ONLY-NEXT: vbroadcastss (%rcx), %xmm0
5611 ; AVX1-ONLY-NEXT: vbroadcastss (%rdx), %xmm1
5612 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
5613 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
5614 ; AVX1-ONLY-NEXT: vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
5615 ; AVX1-ONLY-NEXT: # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1]
5616 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm3
5617 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm3[0,1],ymm0[2,3],ymm3[4,5,6,7]
5618 ; AVX1-ONLY-NEXT: vinsertf128 $1, (%r8), %ymm1, %ymm1
5619 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
5620 ; AVX1-ONLY-NEXT: vbroadcastss (%r9), %ymm1
5621 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
5622 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5623 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5624 ; AVX1-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
5625 ; AVX1-ONLY-NEXT: # xmm0 = xmm0[2],mem[2],xmm0[3],mem[3]
5626 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,3,2,3]
5627 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
5628 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
5629 ; AVX1-ONLY-NEXT: vpermilps {{.*#+}} xmm1 = mem[2,1,3,3]
5630 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm1
5631 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
5632 ; AVX1-ONLY-NEXT: vmovaps (%r9), %xmm1
5633 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm3 = xmm1[0,2,2,3]
5634 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm3, %ymm1
5635 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4,5,6],ymm1[7]
5636 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5637 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5638 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[3,0],ymm10[3,0],ymm0[7,4],ymm10[7,4]
5639 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0,2,3,6,4,6,7]
5640 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm14[2,3],ymm0[2,3]
5641 ; AVX1-ONLY-NEXT: vperm2f128 $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 32-byte Folded Reload
5642 ; AVX1-ONLY-NEXT: # ymm1 = mem[2,3,2,3]
5643 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[2,1,3,3,6,5,7,7]
5644 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
5645 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = mem[2,3,2,3]
5646 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
5647 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4,5,6],ymm1[7]
5648 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5649 ; AVX1-ONLY-NEXT: vbroadcastss 32(%rcx), %xmm0
5650 ; AVX1-ONLY-NEXT: vbroadcastss 32(%rdx), %xmm1
5651 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
5652 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
5653 ; AVX1-ONLY-NEXT: vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
5654 ; AVX1-ONLY-NEXT: # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1]
5655 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm3
5656 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm3[0,1],ymm0[2,3],ymm3[4,5,6,7]
5657 ; AVX1-ONLY-NEXT: vinsertf128 $1, 32(%r8), %ymm1, %ymm1
5658 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
5659 ; AVX1-ONLY-NEXT: vbroadcastss 32(%r9), %ymm1
5660 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
5661 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5662 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5663 ; AVX1-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
5664 ; AVX1-ONLY-NEXT: # xmm0 = xmm0[2],mem[2],xmm0[3],mem[3]
5665 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,3,2,3]
5666 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
5667 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
5668 ; AVX1-ONLY-NEXT: vpermilps {{.*#+}} xmm1 = mem[2,1,3,3]
5669 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm1
5670 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
5671 ; AVX1-ONLY-NEXT: vmovaps 32(%r9), %xmm1
5672 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm3 = xmm1[0,2,2,3]
5673 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm3, %ymm1
5674 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4,5,6],ymm1[7]
5675 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5676 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5677 ; AVX1-ONLY-NEXT: vshufps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
5678 ; AVX1-ONLY-NEXT: # ymm0 = ymm0[3,0],mem[3,0],ymm0[7,4],mem[7,4]
5679 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0,2,3,6,4,6,7]
5680 ; AVX1-ONLY-NEXT: vperm2f128 $19, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
5681 ; AVX1-ONLY-NEXT: # ymm0 = mem[2,3],ymm0[2,3]
5682 ; AVX1-ONLY-NEXT: vperm2f128 $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 32-byte Folded Reload
5683 ; AVX1-ONLY-NEXT: # ymm1 = mem[2,3,2,3]
5684 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[2,1,3,3,6,5,7,7]
5685 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
5686 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = mem[2,3,2,3]
5687 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
5688 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4,5,6],ymm1[7]
5689 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5690 ; AVX1-ONLY-NEXT: vbroadcastss 64(%rcx), %xmm0
5691 ; AVX1-ONLY-NEXT: vbroadcastss 64(%rdx), %xmm1
5692 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
5693 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
5694 ; AVX1-ONLY-NEXT: vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
5695 ; AVX1-ONLY-NEXT: # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1]
5696 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm3
5697 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm3[0,1],ymm0[2,3],ymm3[4,5,6,7]
5698 ; AVX1-ONLY-NEXT: vinsertf128 $1, 64(%r8), %ymm1, %ymm1
5699 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
5700 ; AVX1-ONLY-NEXT: vbroadcastss 64(%r9), %ymm1
5701 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
5702 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5703 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5704 ; AVX1-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm1 # 16-byte Folded Reload
5705 ; AVX1-ONLY-NEXT: # xmm1 = xmm0[2],mem[2],xmm0[3],mem[3]
5706 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm1[2,3,2,3]
5707 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5708 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
5709 ; AVX1-ONLY-NEXT: vpermilps {{.*#+}} xmm3 = mem[2,1,3,3]
5710 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm3, %ymm3, %ymm3
5711 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm3[0,1],ymm1[2,3,4,5],ymm3[6,7]
5712 ; AVX1-ONLY-NEXT: vmovaps 64(%r9), %xmm3
5713 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm4 = xmm3[0,2,2,3]
5714 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm3
5715 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm3[1],ymm1[2,3,4,5,6],ymm3[7]
5716 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5717 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5718 ; AVX1-ONLY-NEXT: vshufps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm3 # 32-byte Folded Reload
5719 ; AVX1-ONLY-NEXT: # ymm3 = ymm0[3,0],mem[3,0],ymm0[7,4],mem[7,4]
5720 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm3 = ymm3[2,0,2,3,6,4,6,7]
5721 ; AVX1-ONLY-NEXT: vperm2f128 $19, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
5722 ; AVX1-ONLY-NEXT: # ymm3 = mem[2,3],ymm3[2,3]
5723 ; AVX1-ONLY-NEXT: vperm2f128 $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm4 # 32-byte Folded Reload
5724 ; AVX1-ONLY-NEXT: # ymm4 = mem[2,3,2,3]
5725 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm4 = ymm4[2,1,3,3,6,5,7,7]
5726 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0,1],ymm3[2,3,4,5],ymm4[6,7]
5727 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm4 = mem[2,3,2,3]
5728 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm4 = ymm4[0,2,2,3,4,6,6,7]
5729 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm3[0],ymm4[1],ymm3[2,3,4,5,6],ymm4[7]
5730 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5731 ; AVX1-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm3 # 16-byte Folded Reload
5732 ; AVX1-ONLY-NEXT: # xmm3 = xmm0[2],mem[2],xmm0[3],mem[3]
5733 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm3 = xmm3[2,3,2,3]
5734 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5735 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm3
5736 ; AVX1-ONLY-NEXT: vpermilps {{.*#+}} xmm4 = mem[2,1,3,3]
5737 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm4, %ymm4, %ymm4
5738 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0,1],ymm3[2,3,4,5],ymm4[6,7]
5739 ; AVX1-ONLY-NEXT: vmovaps 96(%r9), %xmm4
5740 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm8 = xmm4[0,2,2,3]
5741 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm4, %ymm8, %ymm4
5742 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0],ymm4[1],ymm3[2,3,4,5,6],ymm4[7]
5743 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5744 ; AVX1-ONLY-NEXT: vshufps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm4 # 32-byte Folded Reload
5745 ; AVX1-ONLY-NEXT: # ymm4 = ymm0[3,0],mem[3,0],ymm0[7,4],mem[7,4]
5746 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm4 = ymm4[2,0,2,3,6,4,6,7]
5747 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm12[2,3],ymm4[2,3]
5748 ; AVX1-ONLY-NEXT: vperm2f128 $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm8 # 32-byte Folded Reload
5749 ; AVX1-ONLY-NEXT: # ymm8 = mem[2,3,2,3]
5750 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm8 = ymm8[2,1,3,3,6,5,7,7]
5751 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm4 = ymm8[0,1],ymm4[2,3,4,5],ymm8[6,7]
5752 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm8 = mem[2,3,2,3]
5753 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm8 = ymm8[0,2,2,3,4,6,6,7]
5754 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0],ymm8[1],ymm4[2,3,4,5,6],ymm8[7]
5755 ; AVX1-ONLY-NEXT: vbroadcastss 128(%rcx), %xmm8
5756 ; AVX1-ONLY-NEXT: vbroadcastss 128(%rdx), %xmm9
5757 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} xmm8 = xmm9[0],xmm8[0],xmm9[1],xmm8[1]
5758 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5759 ; AVX1-ONLY-NEXT: vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm9 # 16-byte Folded Reload
5760 ; AVX1-ONLY-NEXT: # xmm9 = xmm0[0],mem[0],xmm0[1],mem[1]
5761 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm9, %ymm0, %ymm11
5762 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm8 = ymm11[0,1],ymm8[2,3],ymm11[4,5,6,7]
5763 ; AVX1-ONLY-NEXT: vinsertf128 $1, 128(%r8), %ymm9, %ymm9
5764 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm8 = ymm9[0,1],ymm8[2,3],ymm9[4,5],ymm8[6,7]
5765 ; AVX1-ONLY-NEXT: vbroadcastss 128(%r9), %ymm9
5766 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1,2,3,4],ymm9[5],ymm8[6,7]
5767 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5768 ; AVX1-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm9 # 16-byte Folded Reload
5769 ; AVX1-ONLY-NEXT: # xmm9 = xmm0[2],mem[2],xmm0[3],mem[3]
5770 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm9 = xmm9[2,3,2,3]
5771 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5772 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm9, %ymm0, %ymm9
5773 ; AVX1-ONLY-NEXT: vpermilps {{.*#+}} xmm11 = mem[2,1,3,3]
5774 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm11, %ymm11, %ymm11
5775 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm9 = ymm11[0,1],ymm9[2,3,4,5],ymm11[6,7]
5776 ; AVX1-ONLY-NEXT: vmovaps 128(%r9), %xmm11
5777 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm12 = xmm11[0,2,2,3]
5778 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm11, %ymm12, %ymm11
5779 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm9 = ymm9[0],ymm11[1],ymm9[2,3,4,5,6],ymm11[7]
5780 ; AVX1-ONLY-NEXT: vshufps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm11 # 32-byte Folded Reload
5781 ; AVX1-ONLY-NEXT: # ymm11 = ymm15[3,0],mem[3,0],ymm15[7,4],mem[7,4]
5782 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm11 = ymm11[2,0,2,3,6,4,6,7]
5783 ; AVX1-ONLY-NEXT: vperm2f128 $19, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm10 # 32-byte Folded Reload
5784 ; AVX1-ONLY-NEXT: # ymm10 = mem[2,3],ymm11[2,3]
5785 ; AVX1-ONLY-NEXT: vperm2f128 $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm11 # 32-byte Folded Reload
5786 ; AVX1-ONLY-NEXT: # ymm11 = mem[2,3,2,3]
5787 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm11 = ymm11[2,1,3,3,6,5,7,7]
5788 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm10 = ymm11[0,1],ymm10[2,3,4,5],ymm11[6,7]
5789 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm11 = mem[2,3,2,3]
5790 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm11 = ymm11[0,2,2,3,4,6,6,7]
5791 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0],ymm11[1],ymm10[2,3,4,5,6],ymm11[7]
5792 ; AVX1-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm13, %xmm11 # 16-byte Folded Reload
5793 ; AVX1-ONLY-NEXT: # xmm11 = xmm13[2],mem[2],xmm13[3],mem[3]
5794 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm11 = xmm11[2,3,2,3]
5795 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5796 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm11, %ymm0, %ymm11
5797 ; AVX1-ONLY-NEXT: vpermilps {{.*#+}} xmm12 = mem[2,1,3,3]
5798 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm12, %ymm12, %ymm12
5799 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm11 = ymm12[0,1],ymm11[2,3,4,5],ymm12[6,7]
5800 ; AVX1-ONLY-NEXT: vmovaps 160(%r9), %xmm12
5801 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm13 = xmm12[0,2,2,3]
5802 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm12, %ymm13, %ymm12
5803 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm11 = ymm11[0],ymm12[1],ymm11[2,3,4,5,6],ymm12[7]
5804 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5805 ; AVX1-ONLY-NEXT: vshufps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm12 # 32-byte Folded Reload
5806 ; AVX1-ONLY-NEXT: # ymm12 = ymm0[3,0],mem[3,0],ymm0[7,4],mem[7,4]
5807 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm12 = ymm12[2,0,2,3,6,4,6,7]
5808 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm7 = ymm7[2,3],ymm12[2,3]
5809 ; AVX1-ONLY-NEXT: vperm2f128 $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm12 # 32-byte Folded Reload
5810 ; AVX1-ONLY-NEXT: # ymm12 = mem[2,3,2,3]
5811 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm12 = ymm12[2,1,3,3,6,5,7,7]
5812 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm7 = ymm12[0,1],ymm7[2,3,4,5],ymm12[6,7]
5813 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm12 = mem[2,3,2,3]
5814 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm12 = ymm12[0,2,2,3,4,6,6,7]
5815 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0],ymm12[1],ymm7[2,3,4,5,6],ymm12[7]
5816 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5817 ; AVX1-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm12 # 16-byte Folded Reload
5818 ; AVX1-ONLY-NEXT: # xmm12 = xmm0[2],mem[2],xmm0[3],mem[3]
5819 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm12 = xmm12[2,3,2,3]
5820 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5821 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm12, %ymm0, %ymm12
5822 ; AVX1-ONLY-NEXT: vpermilps {{.*#+}} xmm13 = mem[2,1,3,3]
5823 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm13, %ymm13, %ymm13
5824 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm12 = ymm13[0,1],ymm12[2,3,4,5],ymm13[6,7]
5825 ; AVX1-ONLY-NEXT: vmovaps 192(%r9), %xmm13
5826 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm14 = xmm13[0,2,2,3]
5827 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm13, %ymm14, %ymm13
5828 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm12 = ymm12[0],ymm13[1],ymm12[2,3,4,5,6],ymm13[7]
5829 ; AVX1-ONLY-NEXT: vshufps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm13 # 32-byte Folded Reload
5830 ; AVX1-ONLY-NEXT: # ymm13 = ymm2[3,0],mem[3,0],ymm2[7,4],mem[7,4]
5831 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm13 = ymm13[2,0,2,3,6,4,6,7]
5832 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm6[2,3],ymm13[2,3]
5833 ; AVX1-ONLY-NEXT: vperm2f128 $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm13 # 32-byte Folded Reload
5834 ; AVX1-ONLY-NEXT: # ymm13 = mem[2,3,2,3]
5835 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm13 = ymm13[2,1,3,3,6,5,7,7]
5836 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm13[0,1],ymm2[2,3,4,5],ymm13[6,7]
5837 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm13 = mem[2,3,2,3]
5838 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm13 = ymm13[0,2,2,3,4,6,6,7]
5839 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0],ymm13[1],ymm2[2,3,4,5,6],ymm13[7]
5840 ; AVX1-ONLY-NEXT: vbroadcastss 224(%rcx), %xmm13
5841 ; AVX1-ONLY-NEXT: vbroadcastss 224(%rdx), %xmm14
5842 ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} xmm13 = xmm14[0],xmm13[0],xmm14[1],xmm13[1]
5843 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5844 ; AVX1-ONLY-NEXT: vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm14 # 16-byte Folded Reload
5845 ; AVX1-ONLY-NEXT: # xmm14 = xmm0[0],mem[0],xmm0[1],mem[1]
5846 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm14, %ymm0, %ymm15
5847 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm13 = ymm15[0,1],ymm13[2,3],ymm15[4,5,6,7]
5848 ; AVX1-ONLY-NEXT: vinsertf128 $1, 224(%r8), %ymm14, %ymm14
5849 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm13 = ymm14[0,1],ymm13[2,3],ymm14[4,5],ymm13[6,7]
5850 ; AVX1-ONLY-NEXT: vbroadcastss 224(%r9), %ymm14
5851 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm13 = ymm13[0,1,2,3,4],ymm14[5],ymm13[6,7]
5852 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5853 ; AVX1-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm14 # 16-byte Folded Reload
5854 ; AVX1-ONLY-NEXT: # xmm14 = xmm0[2],mem[2],xmm0[3],mem[3]
5855 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm14 = xmm14[2,3,2,3]
5856 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5857 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm14, %ymm0, %ymm14
5858 ; AVX1-ONLY-NEXT: vpermilps {{.*#+}} xmm15 = mem[2,1,3,3]
5859 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm15, %ymm15, %ymm15
5860 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm14 = ymm15[0,1],ymm14[2,3,4,5],ymm15[6,7]
5861 ; AVX1-ONLY-NEXT: vmovaps 224(%r9), %xmm15
5862 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm15[0,2,2,3]
5863 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm15, %ymm0, %ymm0
5864 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm14[0],ymm0[1],ymm14[2,3,4,5,6],ymm0[7]
5865 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
5866 ; AVX1-ONLY-NEXT: vshufps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm14 # 32-byte Folded Reload
5867 ; AVX1-ONLY-NEXT: # ymm14 = ymm6[3,0],mem[3,0],ymm6[7,4],mem[7,4]
5868 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm14 = ymm14[2,0,2,3,6,4,6,7]
5869 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm5 = ymm5[2,3],ymm14[2,3]
5870 ; AVX1-ONLY-NEXT: vperm2f128 $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm14 # 32-byte Folded Reload
5871 ; AVX1-ONLY-NEXT: # ymm14 = mem[2,3,2,3]
5872 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm14 = ymm14[2,1,3,3,6,5,7,7]
5873 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm5 = ymm14[0,1],ymm5[2,3,4,5],ymm14[6,7]
5874 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm14 = mem[2,3,2,3]
5875 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm14 = ymm14[0,2,2,3,4,6,6,7]
5876 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0],ymm14[1],ymm5[2,3,4,5,6],ymm14[7]
5877 ; AVX1-ONLY-NEXT: movq {{[0-9]+}}(%rsp), %rax
5878 ; AVX1-ONLY-NEXT: vmovaps %ymm5, 1504(%rax)
5879 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 1408(%rax)
5880 ; AVX1-ONLY-NEXT: vmovaps %ymm13, 1344(%rax)
5881 ; AVX1-ONLY-NEXT: vmovaps %ymm2, 1312(%rax)
5882 ; AVX1-ONLY-NEXT: vmovaps %ymm12, 1216(%rax)
5883 ; AVX1-ONLY-NEXT: vmovaps %ymm7, 1120(%rax)
5884 ; AVX1-ONLY-NEXT: vmovaps %ymm11, 1024(%rax)
5885 ; AVX1-ONLY-NEXT: vmovaps %ymm10, 928(%rax)
5886 ; AVX1-ONLY-NEXT: vmovaps %ymm9, 832(%rax)
5887 ; AVX1-ONLY-NEXT: vmovaps %ymm8, 768(%rax)
5888 ; AVX1-ONLY-NEXT: vmovaps %ymm4, 736(%rax)
5889 ; AVX1-ONLY-NEXT: vmovaps %ymm3, 640(%rax)
5890 ; AVX1-ONLY-NEXT: vmovaps %ymm1, 544(%rax)
5891 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5892 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 448(%rax)
5893 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5894 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 384(%rax)
5895 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5896 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 352(%rax)
5897 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5898 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 256(%rax)
5899 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5900 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 192(%rax)
5901 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5902 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 160(%rax)
5903 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5904 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 64(%rax)
5905 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5906 ; AVX1-ONLY-NEXT: vmovaps %ymm0, (%rax)
5907 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5908 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 1472(%rax)
5909 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5910 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 1280(%rax)
5911 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5912 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 1152(%rax)
5913 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5914 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 1088(%rax)
5915 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5916 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 960(%rax)
5917 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5918 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 896(%rax)
5919 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5920 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 704(%rax)
5921 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5922 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 576(%rax)
5923 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5924 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 512(%rax)
5925 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5926 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 320(%rax)
5927 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5928 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 128(%rax)
5929 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5930 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 1440(%rax)
5931 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5932 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 1376(%rax)
5933 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5934 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 1248(%rax)
5935 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5936 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 1184(%rax)
5937 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5938 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 1056(%rax)
5939 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5940 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 992(%rax)
5941 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5942 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 864(%rax)
5943 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5944 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 800(%rax)
5945 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5946 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 672(%rax)
5947 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5948 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 608(%rax)
5949 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5950 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 480(%rax)
5951 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5952 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 416(%rax)
5953 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5954 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 288(%rax)
5955 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5956 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 224(%rax)
5957 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5958 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 96(%rax)
5959 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5960 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 32(%rax)
5961 ; AVX1-ONLY-NEXT: addq $2504, %rsp # imm = 0x9C8
5962 ; AVX1-ONLY-NEXT: vzeroupper
5963 ; AVX1-ONLY-NEXT: retq
5965 ; AVX2-SLOW-LABEL: store_i32_stride6_vf64:
5966 ; AVX2-SLOW: # %bb.0:
5967 ; AVX2-SLOW-NEXT: subq $2504, %rsp # imm = 0x9C8
5968 ; AVX2-SLOW-NEXT: vmovdqa (%rsi), %xmm0
5969 ; AVX2-SLOW-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5970 ; AVX2-SLOW-NEXT: vmovdqa 32(%rsi), %xmm1
5971 ; AVX2-SLOW-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5972 ; AVX2-SLOW-NEXT: vmovdqa (%rdi), %xmm3
5973 ; AVX2-SLOW-NEXT: vmovdqa 32(%rdi), %xmm10
5974 ; AVX2-SLOW-NEXT: vmovdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5975 ; AVX2-SLOW-NEXT: vpunpckhdq {{.*#+}} xmm0 = xmm3[2],xmm0[2],xmm3[3],xmm0[3]
5976 ; AVX2-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5977 ; AVX2-SLOW-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm4
5978 ; AVX2-SLOW-NEXT: vmovdqa (%rcx), %xmm0
5979 ; AVX2-SLOW-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5980 ; AVX2-SLOW-NEXT: vmovdqa 32(%rcx), %xmm8
5981 ; AVX2-SLOW-NEXT: vmovdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5982 ; AVX2-SLOW-NEXT: vmovdqa 64(%rcx), %xmm7
5983 ; AVX2-SLOW-NEXT: vmovdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5984 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm5 = xmm0[1,2,2,3]
5985 ; AVX2-SLOW-NEXT: vmovdqa (%rdx), %xmm0
5986 ; AVX2-SLOW-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5987 ; AVX2-SLOW-NEXT: vmovdqa 32(%rdx), %xmm9
5988 ; AVX2-SLOW-NEXT: vmovdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5989 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm6 = xmm0[1,2,2,3]
5990 ; AVX2-SLOW-NEXT: vpunpckldq {{.*#+}} xmm5 = xmm6[0],xmm5[0],xmm6[1],xmm5[1]
5991 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm5 = ymm5[0,1,2,1]
5992 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm4 = ymm5[0,1,2,3],ymm4[4,5],ymm5[6,7]
5993 ; AVX2-SLOW-NEXT: vmovdqa (%r8), %xmm2
5994 ; AVX2-SLOW-NEXT: vmovdqa 32(%r8), %xmm6
5995 ; AVX2-SLOW-NEXT: vmovdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5996 ; AVX2-SLOW-NEXT: vpmovzxdq {{.*#+}} xmm5 = xmm2[0],zero,xmm2[1],zero
5997 ; AVX2-SLOW-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5998 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1],ymm5[2,3],ymm4[4,5,6,7]
5999 ; AVX2-SLOW-NEXT: vpbroadcastd 4(%r9), %ymm5
6000 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm4[0,1,2],ymm5[3],ymm4[4,5,6,7]
6001 ; AVX2-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6002 ; AVX2-SLOW-NEXT: vpunpckhdq {{.*#+}} xmm0 = xmm10[2],xmm1[2],xmm10[3],xmm1[3]
6003 ; AVX2-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6004 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm4 = xmm8[1,2,2,3]
6005 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm5 = xmm9[1,2,2,3]
6006 ; AVX2-SLOW-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm5[0],xmm4[0],xmm5[1],xmm4[1]
6007 ; AVX2-SLOW-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm5
6008 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,1,2,1]
6009 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm5[4,5],ymm4[6,7]
6010 ; AVX2-SLOW-NEXT: vpmovzxdq {{.*#+}} xmm5 = xmm6[0],zero,xmm6[1],zero
6011 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1],ymm5[2,3],ymm4[4,5,6,7]
6012 ; AVX2-SLOW-NEXT: vpbroadcastd 36(%r9), %ymm5
6013 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm4[0,1,2],ymm5[3],ymm4[4,5,6,7]
6014 ; AVX2-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6015 ; AVX2-SLOW-NEXT: vmovdqa 64(%rdx), %xmm0
6016 ; AVX2-SLOW-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6017 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm4 = xmm7[1,2,2,3]
6018 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm5 = xmm0[1,2,2,3]
6019 ; AVX2-SLOW-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm5[0],xmm4[0],xmm5[1],xmm4[1]
6020 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm6 = ymm4[0,1,2,1]
6021 ; AVX2-SLOW-NEXT: vmovdqa 64(%rsi), %xmm4
6022 ; AVX2-SLOW-NEXT: vmovdqa 64(%rdi), %xmm5
6023 ; AVX2-SLOW-NEXT: vpunpckhdq {{.*#+}} xmm0 = xmm5[2],xmm4[2],xmm5[3],xmm4[3]
6024 ; AVX2-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6025 ; AVX2-SLOW-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm7
6026 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm7[4,5],ymm6[6,7]
6027 ; AVX2-SLOW-NEXT: vmovdqa 64(%r8), %xmm0
6028 ; AVX2-SLOW-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6029 ; AVX2-SLOW-NEXT: vpmovzxdq {{.*#+}} xmm7 = xmm0[0],zero,xmm0[1],zero
6030 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm6 = ymm6[0,1],ymm7[2,3],ymm6[4,5,6,7]
6031 ; AVX2-SLOW-NEXT: vpbroadcastd 68(%r9), %ymm7
6032 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm6[0,1,2],ymm7[3],ymm6[4,5,6,7]
6033 ; AVX2-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6034 ; AVX2-SLOW-NEXT: vmovdqa 96(%rcx), %xmm0
6035 ; AVX2-SLOW-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6036 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm6 = xmm0[1,2,2,3]
6037 ; AVX2-SLOW-NEXT: vmovdqa 96(%rdx), %xmm0
6038 ; AVX2-SLOW-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6039 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm7 = xmm0[1,2,2,3]
6040 ; AVX2-SLOW-NEXT: vpunpckldq {{.*#+}} xmm6 = xmm7[0],xmm6[0],xmm7[1],xmm6[1]
6041 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm8 = ymm6[0,1,2,1]
6042 ; AVX2-SLOW-NEXT: vmovdqa 96(%rsi), %xmm6
6043 ; AVX2-SLOW-NEXT: vmovdqa 96(%rdi), %xmm7
6044 ; AVX2-SLOW-NEXT: vpunpckhdq {{.*#+}} xmm0 = xmm7[2],xmm6[2],xmm7[3],xmm6[3]
6045 ; AVX2-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6046 ; AVX2-SLOW-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm9
6047 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm9[4,5],ymm8[6,7]
6048 ; AVX2-SLOW-NEXT: vmovdqa 96(%r8), %xmm0
6049 ; AVX2-SLOW-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6050 ; AVX2-SLOW-NEXT: vpmovzxdq {{.*#+}} xmm9 = xmm0[0],zero,xmm0[1],zero
6051 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm8 = ymm8[0,1],ymm9[2,3],ymm8[4,5,6,7]
6052 ; AVX2-SLOW-NEXT: vpbroadcastd 100(%r9), %ymm9
6053 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm8[0,1,2],ymm9[3],ymm8[4,5,6,7]
6054 ; AVX2-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6055 ; AVX2-SLOW-NEXT: vmovdqa 128(%rcx), %xmm0
6056 ; AVX2-SLOW-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6057 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm8 = xmm0[1,2,2,3]
6058 ; AVX2-SLOW-NEXT: vmovdqa 128(%rdx), %xmm0
6059 ; AVX2-SLOW-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6060 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm9 = xmm0[1,2,2,3]
6061 ; AVX2-SLOW-NEXT: vpunpckldq {{.*#+}} xmm8 = xmm9[0],xmm8[0],xmm9[1],xmm8[1]
6062 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm10 = ymm8[0,1,2,1]
6063 ; AVX2-SLOW-NEXT: vmovdqa 128(%rsi), %xmm8
6064 ; AVX2-SLOW-NEXT: vmovdqa 128(%rdi), %xmm9
6065 ; AVX2-SLOW-NEXT: vpunpckhdq {{.*#+}} xmm0 = xmm9[2],xmm8[2],xmm9[3],xmm8[3]
6066 ; AVX2-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6067 ; AVX2-SLOW-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm11
6068 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm10 = ymm10[0,1,2,3],ymm11[4,5],ymm10[6,7]
6069 ; AVX2-SLOW-NEXT: vmovdqa 128(%r8), %xmm0
6070 ; AVX2-SLOW-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6071 ; AVX2-SLOW-NEXT: vpmovzxdq {{.*#+}} xmm11 = xmm0[0],zero,xmm0[1],zero
6072 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm10 = ymm10[0,1],ymm11[2,3],ymm10[4,5,6,7]
6073 ; AVX2-SLOW-NEXT: vpbroadcastd 132(%r9), %ymm11
6074 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm10[0,1,2],ymm11[3],ymm10[4,5,6,7]
6075 ; AVX2-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6076 ; AVX2-SLOW-NEXT: vmovdqa 160(%rcx), %xmm0
6077 ; AVX2-SLOW-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6078 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm10 = xmm0[1,2,2,3]
6079 ; AVX2-SLOW-NEXT: vmovdqa 160(%rdx), %xmm0
6080 ; AVX2-SLOW-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6081 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm11 = xmm0[1,2,2,3]
6082 ; AVX2-SLOW-NEXT: vpunpckldq {{.*#+}} xmm10 = xmm11[0],xmm10[0],xmm11[1],xmm10[1]
6083 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm12 = ymm10[0,1,2,1]
6084 ; AVX2-SLOW-NEXT: vmovdqa 160(%rsi), %xmm10
6085 ; AVX2-SLOW-NEXT: vmovdqa 160(%rdi), %xmm11
6086 ; AVX2-SLOW-NEXT: vpunpckhdq {{.*#+}} xmm0 = xmm11[2],xmm10[2],xmm11[3],xmm10[3]
6087 ; AVX2-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6088 ; AVX2-SLOW-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm13
6089 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm12 = ymm12[0,1,2,3],ymm13[4,5],ymm12[6,7]
6090 ; AVX2-SLOW-NEXT: vmovdqa 160(%r8), %xmm0
6091 ; AVX2-SLOW-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6092 ; AVX2-SLOW-NEXT: vpmovzxdq {{.*#+}} xmm13 = xmm0[0],zero,xmm0[1],zero
6093 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm12 = ymm12[0,1],ymm13[2,3],ymm12[4,5,6,7]
6094 ; AVX2-SLOW-NEXT: vpbroadcastd 164(%r9), %ymm13
6095 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm12[0,1,2],ymm13[3],ymm12[4,5,6,7]
6096 ; AVX2-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6097 ; AVX2-SLOW-NEXT: vmovdqa 192(%rcx), %xmm0
6098 ; AVX2-SLOW-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6099 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm12 = xmm0[1,2,2,3]
6100 ; AVX2-SLOW-NEXT: vmovdqa 192(%rdx), %xmm0
6101 ; AVX2-SLOW-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6102 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm13 = xmm0[1,2,2,3]
6103 ; AVX2-SLOW-NEXT: vpunpckldq {{.*#+}} xmm12 = xmm13[0],xmm12[0],xmm13[1],xmm12[1]
6104 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm14 = ymm12[0,1,2,1]
6105 ; AVX2-SLOW-NEXT: vmovdqa 192(%rsi), %xmm12
6106 ; AVX2-SLOW-NEXT: vmovdqa 192(%rdi), %xmm13
6107 ; AVX2-SLOW-NEXT: vpunpckhdq {{.*#+}} xmm0 = xmm13[2],xmm12[2],xmm13[3],xmm12[3]
6108 ; AVX2-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6109 ; AVX2-SLOW-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm15
6110 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm14 = ymm14[0,1,2,3],ymm15[4,5],ymm14[6,7]
6111 ; AVX2-SLOW-NEXT: vmovdqa 192(%r8), %xmm0
6112 ; AVX2-SLOW-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6113 ; AVX2-SLOW-NEXT: vpmovzxdq {{.*#+}} xmm15 = xmm0[0],zero,xmm0[1],zero
6114 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm14 = ymm14[0,1],ymm15[2,3],ymm14[4,5,6,7]
6115 ; AVX2-SLOW-NEXT: vpbroadcastd 196(%r9), %ymm15
6116 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm14[0,1,2],ymm15[3],ymm14[4,5,6,7]
6117 ; AVX2-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6118 ; AVX2-SLOW-NEXT: vmovdqa 224(%rcx), %xmm0
6119 ; AVX2-SLOW-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6120 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm14 = xmm0[1,2,2,3]
6121 ; AVX2-SLOW-NEXT: vmovdqa 224(%rdx), %xmm0
6122 ; AVX2-SLOW-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6123 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm15 = xmm0[1,2,2,3]
6124 ; AVX2-SLOW-NEXT: vpunpckldq {{.*#+}} xmm14 = xmm15[0],xmm14[0],xmm15[1],xmm14[1]
6125 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm1 = ymm14[0,1,2,1]
6126 ; AVX2-SLOW-NEXT: vmovdqa 224(%rsi), %xmm14
6127 ; AVX2-SLOW-NEXT: vmovdqa %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6128 ; AVX2-SLOW-NEXT: vmovdqa 224(%rdi), %xmm0
6129 ; AVX2-SLOW-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6130 ; AVX2-SLOW-NEXT: vpunpckhdq {{.*#+}} xmm14 = xmm0[2],xmm14[2],xmm0[3],xmm14[3]
6131 ; AVX2-SLOW-NEXT: vmovdqu %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6132 ; AVX2-SLOW-NEXT: vinserti128 $1, %xmm14, %ymm0, %ymm14
6133 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm14[4,5],ymm1[6,7]
6134 ; AVX2-SLOW-NEXT: vmovdqa 224(%r8), %xmm14
6135 ; AVX2-SLOW-NEXT: vmovdqa %xmm14, (%rsp) # 16-byte Spill
6136 ; AVX2-SLOW-NEXT: vpmovzxdq {{.*#+}} xmm14 = xmm14[0],zero,xmm14[1],zero
6137 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm14[2,3],ymm0[4,5,6,7]
6138 ; AVX2-SLOW-NEXT: vpbroadcastd 228(%r9), %ymm14
6139 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm14[3],ymm0[4,5,6,7]
6140 ; AVX2-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6141 ; AVX2-SLOW-NEXT: vpbroadcastd (%rcx), %xmm0
6142 ; AVX2-SLOW-NEXT: vpbroadcastd (%rdx), %xmm14
6143 ; AVX2-SLOW-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm14[0],xmm0[0],xmm14[1],xmm0[1]
6144 ; AVX2-SLOW-NEXT: vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm1 # 16-byte Folded Reload
6145 ; AVX2-SLOW-NEXT: # xmm1 = xmm3[0],mem[0],xmm3[1],mem[1]
6146 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,1,2,1]
6147 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5,6,7]
6148 ; AVX2-SLOW-NEXT: vpbroadcastq %xmm2, %ymm1
6149 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
6150 ; AVX2-SLOW-NEXT: vmovdqa (%r9), %xmm1
6151 ; AVX2-SLOW-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6152 ; AVX2-SLOW-NEXT: vpbroadcastd %xmm1, %ymm1
6153 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
6154 ; AVX2-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6155 ; AVX2-SLOW-NEXT: vmovdqa (%rdx), %ymm0
6156 ; AVX2-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6157 ; AVX2-SLOW-NEXT: vmovdqa (%rcx), %ymm1
6158 ; AVX2-SLOW-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6159 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm2 = ymm1[0,1,2,2,4,5,6,6]
6160 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm14 = ymm0[1,1,2,3,5,5,6,7]
6161 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm14[0],ymm2[1],ymm14[2],ymm2[3],ymm14[4],ymm2[5],ymm14[6],ymm2[7]
6162 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,1,2,3]
6163 ; AVX2-SLOW-NEXT: vmovdqa (%rdi), %ymm0
6164 ; AVX2-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6165 ; AVX2-SLOW-NEXT: vmovdqa (%rsi), %ymm1
6166 ; AVX2-SLOW-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6167 ; AVX2-SLOW-NEXT: vpunpckhdq {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
6168 ; AVX2-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6169 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm0[4,5],ymm2[6,7]
6170 ; AVX2-SLOW-NEXT: vpmovzxdq {{.*#+}} xmm14 = mem[0],zero,mem[1],zero
6171 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1],ymm14[2,3],ymm2[4,5,6,7]
6172 ; AVX2-SLOW-NEXT: vpbroadcastd 20(%r9), %ymm14
6173 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2],ymm14[3],ymm2[4,5,6,7]
6174 ; AVX2-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6175 ; AVX2-SLOW-NEXT: vbroadcastss 32(%rcx), %xmm2
6176 ; AVX2-SLOW-NEXT: vbroadcastss 32(%rdx), %xmm14
6177 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} xmm2 = xmm14[0],xmm2[0],xmm14[1],xmm2[1]
6178 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6179 ; AVX2-SLOW-NEXT: vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm3 # 16-byte Folded Reload
6180 ; AVX2-SLOW-NEXT: # xmm3 = xmm0[0],mem[0],xmm0[1],mem[1]
6181 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[0,1,2,1]
6182 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3],ymm3[4,5,6,7]
6183 ; AVX2-SLOW-NEXT: vbroadcastsd {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 16-byte Folded Reload
6184 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5],ymm2[6,7]
6185 ; AVX2-SLOW-NEXT: vmovaps 32(%r9), %xmm0
6186 ; AVX2-SLOW-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6187 ; AVX2-SLOW-NEXT: vbroadcastss %xmm0, %ymm3
6188 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3,4],ymm3[5],ymm2[6,7]
6189 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6190 ; AVX2-SLOW-NEXT: vmovdqa 32(%rdx), %ymm0
6191 ; AVX2-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6192 ; AVX2-SLOW-NEXT: vmovdqa 32(%rcx), %ymm3
6193 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm14 = ymm3[0,1,2,2,4,5,6,6]
6194 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm15 = ymm0[1,1,2,3,5,5,6,7]
6195 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm14 = ymm15[0],ymm14[1],ymm15[2],ymm14[3],ymm15[4],ymm14[5],ymm15[6],ymm14[7]
6196 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm14 = ymm14[2,1,2,3]
6197 ; AVX2-SLOW-NEXT: vmovdqa 32(%rdi), %ymm0
6198 ; AVX2-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6199 ; AVX2-SLOW-NEXT: vmovdqa 32(%rsi), %ymm1
6200 ; AVX2-SLOW-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6201 ; AVX2-SLOW-NEXT: vpunpckhdq {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
6202 ; AVX2-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6203 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm14 = ymm14[0,1,2,3],ymm0[4,5],ymm14[6,7]
6204 ; AVX2-SLOW-NEXT: vpmovzxdq {{.*#+}} xmm15 = mem[0],zero,mem[1],zero
6205 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm14 = ymm14[0,1],ymm15[2,3],ymm14[4,5,6,7]
6206 ; AVX2-SLOW-NEXT: vpbroadcastd 52(%r9), %ymm15
6207 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm14[0,1,2],ymm15[3],ymm14[4,5,6,7]
6208 ; AVX2-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6209 ; AVX2-SLOW-NEXT: vpbroadcastd 64(%rcx), %xmm14
6210 ; AVX2-SLOW-NEXT: vpbroadcastd 64(%rdx), %xmm15
6211 ; AVX2-SLOW-NEXT: vpunpckldq {{.*#+}} xmm14 = xmm15[0],xmm14[0],xmm15[1],xmm14[1]
6212 ; AVX2-SLOW-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm5[0],xmm4[0],xmm5[1],xmm4[1]
6213 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,1,2,1]
6214 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1],ymm14[2,3],ymm4[4,5,6,7]
6215 ; AVX2-SLOW-NEXT: vpbroadcastq {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 16-byte Folded Reload
6216 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm5[4,5],ymm4[6,7]
6217 ; AVX2-SLOW-NEXT: vmovdqa 64(%r9), %xmm0
6218 ; AVX2-SLOW-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6219 ; AVX2-SLOW-NEXT: vpbroadcastd %xmm0, %ymm5
6220 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm4[0,1,2,3,4],ymm5[5],ymm4[6,7]
6221 ; AVX2-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6222 ; AVX2-SLOW-NEXT: vmovdqa 64(%rdx), %ymm5
6223 ; AVX2-SLOW-NEXT: vmovdqa 64(%rcx), %ymm4
6224 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm14 = ymm4[0,1,2,2,4,5,6,6]
6225 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm15 = ymm5[1,1,2,3,5,5,6,7]
6226 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm14 = ymm15[0],ymm14[1],ymm15[2],ymm14[3],ymm15[4],ymm14[5],ymm15[6],ymm14[7]
6227 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm14 = ymm14[2,1,2,3]
6228 ; AVX2-SLOW-NEXT: vmovdqa 64(%rdi), %ymm0
6229 ; AVX2-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6230 ; AVX2-SLOW-NEXT: vmovdqa 64(%rsi), %ymm1
6231 ; AVX2-SLOW-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6232 ; AVX2-SLOW-NEXT: vpunpckhdq {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
6233 ; AVX2-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6234 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm14 = ymm14[0,1,2,3],ymm0[4,5],ymm14[6,7]
6235 ; AVX2-SLOW-NEXT: vpmovzxdq {{.*#+}} xmm15 = mem[0],zero,mem[1],zero
6236 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm14 = ymm14[0,1],ymm15[2,3],ymm14[4,5,6,7]
6237 ; AVX2-SLOW-NEXT: vpbroadcastd 84(%r9), %ymm15
6238 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm14[0,1,2],ymm15[3],ymm14[4,5,6,7]
6239 ; AVX2-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6240 ; AVX2-SLOW-NEXT: vpbroadcastd {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Folded Reload
6241 ; AVX2-SLOW-NEXT: vpbroadcastd {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Folded Reload
6242 ; AVX2-SLOW-NEXT: vpunpckldq {{.*#+}} xmm14 = xmm15[0],xmm14[0],xmm15[1],xmm14[1]
6243 ; AVX2-SLOW-NEXT: vpunpckldq {{.*#+}} xmm6 = xmm7[0],xmm6[0],xmm7[1],xmm6[1]
6244 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm6 = ymm6[0,1,2,1]
6245 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm6 = ymm6[0,1],ymm14[2,3],ymm6[4,5,6,7]
6246 ; AVX2-SLOW-NEXT: vpbroadcastq {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 16-byte Folded Reload
6247 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm7[4,5],ymm6[6,7]
6248 ; AVX2-SLOW-NEXT: vmovdqa 96(%r9), %xmm0
6249 ; AVX2-SLOW-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6250 ; AVX2-SLOW-NEXT: vpbroadcastd %xmm0, %ymm7
6251 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm6[0,1,2,3,4],ymm7[5],ymm6[6,7]
6252 ; AVX2-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6253 ; AVX2-SLOW-NEXT: vmovdqa 96(%rdx), %ymm7
6254 ; AVX2-SLOW-NEXT: vmovdqa 96(%rcx), %ymm6
6255 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm14 = ymm6[0,1,2,2,4,5,6,6]
6256 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm15 = ymm7[1,1,2,3,5,5,6,7]
6257 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm14 = ymm15[0],ymm14[1],ymm15[2],ymm14[3],ymm15[4],ymm14[5],ymm15[6],ymm14[7]
6258 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm14 = ymm14[2,1,2,3]
6259 ; AVX2-SLOW-NEXT: vmovdqa 96(%rdi), %ymm0
6260 ; AVX2-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6261 ; AVX2-SLOW-NEXT: vmovdqa 96(%rsi), %ymm1
6262 ; AVX2-SLOW-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6263 ; AVX2-SLOW-NEXT: vpunpckhdq {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
6264 ; AVX2-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6265 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm14 = ymm14[0,1,2,3],ymm0[4,5],ymm14[6,7]
6266 ; AVX2-SLOW-NEXT: vpmovzxdq {{.*#+}} xmm15 = mem[0],zero,mem[1],zero
6267 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm14 = ymm14[0,1],ymm15[2,3],ymm14[4,5,6,7]
6268 ; AVX2-SLOW-NEXT: vpbroadcastd 116(%r9), %ymm15
6269 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm14[0,1,2],ymm15[3],ymm14[4,5,6,7]
6270 ; AVX2-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6271 ; AVX2-SLOW-NEXT: vpbroadcastd 128(%rcx), %xmm14
6272 ; AVX2-SLOW-NEXT: vpbroadcastd 128(%rdx), %xmm15
6273 ; AVX2-SLOW-NEXT: vpunpckldq {{.*#+}} xmm14 = xmm15[0],xmm14[0],xmm15[1],xmm14[1]
6274 ; AVX2-SLOW-NEXT: vpunpckldq {{.*#+}} xmm8 = xmm9[0],xmm8[0],xmm9[1],xmm8[1]
6275 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm8 = ymm8[0,1,2,1]
6276 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm8 = ymm8[0,1],ymm14[2,3],ymm8[4,5,6,7]
6277 ; AVX2-SLOW-NEXT: vpbroadcastq {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 16-byte Folded Reload
6278 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm9[4,5],ymm8[6,7]
6279 ; AVX2-SLOW-NEXT: vmovdqa 128(%r9), %xmm0
6280 ; AVX2-SLOW-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6281 ; AVX2-SLOW-NEXT: vpbroadcastd %xmm0, %ymm9
6282 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm8[0,1,2,3,4],ymm9[5],ymm8[6,7]
6283 ; AVX2-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6284 ; AVX2-SLOW-NEXT: vmovdqa 128(%rdx), %ymm9
6285 ; AVX2-SLOW-NEXT: vmovdqa 128(%rcx), %ymm8
6286 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm14 = ymm8[0,1,2,2,4,5,6,6]
6287 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm15 = ymm9[1,1,2,3,5,5,6,7]
6288 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm14 = ymm15[0],ymm14[1],ymm15[2],ymm14[3],ymm15[4],ymm14[5],ymm15[6],ymm14[7]
6289 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm14 = ymm14[2,1,2,3]
6290 ; AVX2-SLOW-NEXT: vmovdqa 128(%rdi), %ymm0
6291 ; AVX2-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6292 ; AVX2-SLOW-NEXT: vmovdqa 128(%rsi), %ymm1
6293 ; AVX2-SLOW-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6294 ; AVX2-SLOW-NEXT: vpunpckhdq {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
6295 ; AVX2-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6296 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm14 = ymm14[0,1,2,3],ymm0[4,5],ymm14[6,7]
6297 ; AVX2-SLOW-NEXT: vpmovzxdq {{.*#+}} xmm15 = mem[0],zero,mem[1],zero
6298 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm14 = ymm14[0,1],ymm15[2,3],ymm14[4,5,6,7]
6299 ; AVX2-SLOW-NEXT: vpbroadcastd 148(%r9), %ymm15
6300 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm14[0,1,2],ymm15[3],ymm14[4,5,6,7]
6301 ; AVX2-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6302 ; AVX2-SLOW-NEXT: vpbroadcastd {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Folded Reload
6303 ; AVX2-SLOW-NEXT: vpbroadcastd {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Folded Reload
6304 ; AVX2-SLOW-NEXT: vpunpckldq {{.*#+}} xmm14 = xmm15[0],xmm14[0],xmm15[1],xmm14[1]
6305 ; AVX2-SLOW-NEXT: vpunpckldq {{.*#+}} xmm10 = xmm11[0],xmm10[0],xmm11[1],xmm10[1]
6306 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm10 = ymm10[0,1,2,1]
6307 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm10 = ymm10[0,1],ymm14[2,3],ymm10[4,5,6,7]
6308 ; AVX2-SLOW-NEXT: vpbroadcastq {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 16-byte Folded Reload
6309 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm10 = ymm10[0,1,2,3],ymm11[4,5],ymm10[6,7]
6310 ; AVX2-SLOW-NEXT: vmovdqa 160(%r9), %xmm0
6311 ; AVX2-SLOW-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6312 ; AVX2-SLOW-NEXT: vpbroadcastd %xmm0, %ymm11
6313 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm10[0,1,2,3,4],ymm11[5],ymm10[6,7]
6314 ; AVX2-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6315 ; AVX2-SLOW-NEXT: vmovdqa 160(%rdx), %ymm11
6316 ; AVX2-SLOW-NEXT: vmovdqa 160(%rcx), %ymm10
6317 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm14 = ymm10[0,1,2,2,4,5,6,6]
6318 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm15 = ymm11[1,1,2,3,5,5,6,7]
6319 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm14 = ymm15[0],ymm14[1],ymm15[2],ymm14[3],ymm15[4],ymm14[5],ymm15[6],ymm14[7]
6320 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm14 = ymm14[2,1,2,3]
6321 ; AVX2-SLOW-NEXT: vmovdqa 160(%rdi), %ymm0
6322 ; AVX2-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6323 ; AVX2-SLOW-NEXT: vmovdqa 160(%rsi), %ymm1
6324 ; AVX2-SLOW-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6325 ; AVX2-SLOW-NEXT: vpunpckhdq {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
6326 ; AVX2-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6327 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm14 = ymm14[0,1,2,3],ymm0[4,5],ymm14[6,7]
6328 ; AVX2-SLOW-NEXT: vpmovzxdq {{.*#+}} xmm15 = mem[0],zero,mem[1],zero
6329 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm14 = ymm14[0,1],ymm15[2,3],ymm14[4,5,6,7]
6330 ; AVX2-SLOW-NEXT: vpbroadcastd 180(%r9), %ymm15
6331 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm14[0,1,2],ymm15[3],ymm14[4,5,6,7]
6332 ; AVX2-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6333 ; AVX2-SLOW-NEXT: vpbroadcastd {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Folded Reload
6334 ; AVX2-SLOW-NEXT: vpbroadcastd {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Folded Reload
6335 ; AVX2-SLOW-NEXT: vpunpckldq {{.*#+}} xmm14 = xmm15[0],xmm14[0],xmm15[1],xmm14[1]
6336 ; AVX2-SLOW-NEXT: vpunpckldq {{.*#+}} xmm12 = xmm13[0],xmm12[0],xmm13[1],xmm12[1]
6337 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm12 = ymm12[0,1,2,1]
6338 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm12 = ymm12[0,1],ymm14[2,3],ymm12[4,5,6,7]
6339 ; AVX2-SLOW-NEXT: vpbroadcastq {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 16-byte Folded Reload
6340 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm12 = ymm12[0,1,2,3],ymm13[4,5],ymm12[6,7]
6341 ; AVX2-SLOW-NEXT: vmovdqa 192(%r9), %xmm0
6342 ; AVX2-SLOW-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6343 ; AVX2-SLOW-NEXT: vpbroadcastd %xmm0, %ymm13
6344 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm12[0,1,2,3,4],ymm13[5],ymm12[6,7]
6345 ; AVX2-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6346 ; AVX2-SLOW-NEXT: vmovdqa 192(%rdx), %ymm13
6347 ; AVX2-SLOW-NEXT: vmovdqa 192(%rcx), %ymm12
6348 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm14 = ymm12[0,1,2,2,4,5,6,6]
6349 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm15 = ymm13[1,1,2,3,5,5,6,7]
6350 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm14 = ymm15[0],ymm14[1],ymm15[2],ymm14[3],ymm15[4],ymm14[5],ymm15[6],ymm14[7]
6351 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm14 = ymm14[2,1,2,3]
6352 ; AVX2-SLOW-NEXT: vmovdqa 192(%rdi), %ymm0
6353 ; AVX2-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6354 ; AVX2-SLOW-NEXT: vmovdqa 192(%rsi), %ymm1
6355 ; AVX2-SLOW-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6356 ; AVX2-SLOW-NEXT: vpunpckhdq {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
6357 ; AVX2-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6358 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm14 = ymm14[0,1,2,3],ymm0[4,5],ymm14[6,7]
6359 ; AVX2-SLOW-NEXT: vpmovzxdq {{.*#+}} xmm15 = mem[0],zero,mem[1],zero
6360 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm14 = ymm14[0,1],ymm15[2,3],ymm14[4,5,6,7]
6361 ; AVX2-SLOW-NEXT: vpbroadcastd 212(%r9), %ymm15
6362 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm14[0,1,2],ymm15[3],ymm14[4,5,6,7]
6363 ; AVX2-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6364 ; AVX2-SLOW-NEXT: vbroadcastss 224(%rcx), %xmm14
6365 ; AVX2-SLOW-NEXT: vbroadcastss 224(%rdx), %xmm15
6366 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} xmm14 = xmm15[0],xmm14[0],xmm15[1],xmm14[1]
6367 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6368 ; AVX2-SLOW-NEXT: vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm15 # 16-byte Folded Reload
6369 ; AVX2-SLOW-NEXT: # xmm15 = xmm0[0],mem[0],xmm0[1],mem[1]
6370 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm15 = ymm15[0,1,2,1]
6371 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm14 = ymm15[0,1],ymm14[2,3],ymm15[4,5,6,7]
6372 ; AVX2-SLOW-NEXT: vbroadcastsd (%rsp), %ymm15 # 16-byte Folded Reload
6373 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm14 = ymm14[0,1,2,3],ymm15[4,5],ymm14[6,7]
6374 ; AVX2-SLOW-NEXT: vmovaps 224(%r9), %xmm0
6375 ; AVX2-SLOW-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6376 ; AVX2-SLOW-NEXT: vbroadcastss %xmm0, %ymm15
6377 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm14[0,1,2,3,4],ymm15[5],ymm14[6,7]
6378 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6379 ; AVX2-SLOW-NEXT: vmovdqa 224(%rdx), %ymm14
6380 ; AVX2-SLOW-NEXT: vmovdqu %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6381 ; AVX2-SLOW-NEXT: vmovdqa 224(%rcx), %ymm0
6382 ; AVX2-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6383 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,1,2,2,4,5,6,6]
6384 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm15 = ymm14[1,1,2,3,5,5,6,7]
6385 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm15[0],ymm0[1],ymm15[2],ymm0[3],ymm15[4],ymm0[5],ymm15[6],ymm0[7]
6386 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm1 = ymm0[2,1,2,3]
6387 ; AVX2-SLOW-NEXT: vmovdqa 224(%rdi), %ymm14
6388 ; AVX2-SLOW-NEXT: vmovdqu %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6389 ; AVX2-SLOW-NEXT: vmovdqa 224(%rsi), %ymm0
6390 ; AVX2-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6391 ; AVX2-SLOW-NEXT: vpunpckhdq {{.*#+}} ymm14 = ymm14[2],ymm0[2],ymm14[3],ymm0[3],ymm14[6],ymm0[6],ymm14[7],ymm0[7]
6392 ; AVX2-SLOW-NEXT: vmovdqu %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6393 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm14[4,5],ymm1[6,7]
6394 ; AVX2-SLOW-NEXT: vpmovzxdq {{.*#+}} xmm15 = mem[0],zero,mem[1],zero
6395 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm15[2,3],ymm0[4,5,6,7]
6396 ; AVX2-SLOW-NEXT: vpbroadcastd 244(%r9), %ymm15
6397 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm15[3],ymm0[4,5,6,7]
6398 ; AVX2-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6399 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6400 ; AVX2-SLOW-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
6401 ; AVX2-SLOW-NEXT: # xmm0 = xmm0[2],mem[2],xmm0[3],mem[3]
6402 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,3,2,3]
6403 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
6404 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm0, %ymm14, %ymm0
6405 ; AVX2-SLOW-NEXT: vpermilps $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Folded Reload
6406 ; AVX2-SLOW-NEXT: # xmm15 = mem[2,2,3,3]
6407 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm15 = ymm15[0,1,2,1]
6408 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm15[0,1],ymm0[2,3,4,5],ymm15[6,7]
6409 ; AVX2-SLOW-NEXT: vpermilps $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Folded Reload
6410 ; AVX2-SLOW-NEXT: # xmm15 = mem[2,2,3,3]
6411 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm15 = ymm15[0,1,2,1]
6412 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm15[1],ymm0[2,3,4,5,6],ymm15[7]
6413 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6414 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
6415 ; AVX2-SLOW-NEXT: vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
6416 ; AVX2-SLOW-NEXT: # ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5]
6417 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
6418 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
6419 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} ymm15 = ymm1[0],ymm2[0],ymm1[1],ymm2[1],ymm1[4],ymm2[4],ymm1[5],ymm2[5]
6420 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm15 = ymm15[2,2,2,2]
6421 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,1,2,3]
6422 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm15[2,3],ymm0[4,5,6,7]
6423 ; AVX2-SLOW-NEXT: vmovaps (%r8), %ymm15
6424 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm15[4,5],ymm0[6,7]
6425 ; AVX2-SLOW-NEXT: vbroadcastss 16(%r9), %ymm14
6426 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm14[5],ymm0[6,7]
6427 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6428 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm0 = ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[6],ymm2[6],ymm1[7],ymm2[7]
6429 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,3,2,3,6,7,6,7]
6430 ; AVX2-SLOW-NEXT: vperm2f128 $19, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
6431 ; AVX2-SLOW-NEXT: # ymm0 = mem[2,3],ymm0[2,3]
6432 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm1 = ymm15[2,1,3,3,6,5,7,7]
6433 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
6434 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
6435 ; AVX2-SLOW-NEXT: vpermilps {{.*#+}} ymm1 = mem[0,2,2,3,4,6,6,7]
6436 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
6437 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4,5,6],ymm1[7]
6438 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6439 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6440 ; AVX2-SLOW-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
6441 ; AVX2-SLOW-NEXT: # xmm0 = xmm0[2],mem[2],xmm0[3],mem[3]
6442 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,3,2,3]
6443 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
6444 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
6445 ; AVX2-SLOW-NEXT: vpermilps $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
6446 ; AVX2-SLOW-NEXT: # xmm1 = mem[2,2,3,3]
6447 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1]
6448 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
6449 ; AVX2-SLOW-NEXT: vpermilps $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
6450 ; AVX2-SLOW-NEXT: # xmm1 = mem[2,2,3,3]
6451 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1]
6452 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4,5,6],ymm1[7]
6453 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6454 ; AVX2-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
6455 ; AVX2-SLOW-NEXT: vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
6456 ; AVX2-SLOW-NEXT: # ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5]
6457 ; AVX2-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
6458 ; AVX2-SLOW-NEXT: vpunpckldq {{.*#+}} ymm1 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[4],ymm3[4],ymm2[5],ymm3[5]
6459 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,2,2,2]
6460 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,1,2,3]
6461 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
6462 ; AVX2-SLOW-NEXT: vmovdqa 32(%r8), %ymm1
6463 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
6464 ; AVX2-SLOW-NEXT: vpbroadcastd 48(%r9), %ymm14
6465 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm14[5],ymm0[6,7]
6466 ; AVX2-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6467 ; AVX2-SLOW-NEXT: vpunpckhdq {{.*#+}} ymm0 = ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[6],ymm3[6],ymm2[7],ymm3[7]
6468 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[2,3,2,3,6,7,6,7]
6469 ; AVX2-SLOW-NEXT: vperm2i128 $19, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
6470 ; AVX2-SLOW-NEXT: # ymm0 = mem[2,3],ymm0[2,3]
6471 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[2,1,3,3,6,5,7,7]
6472 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,1,2,3]
6473 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
6474 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm1 = mem[0,2,2,3,4,6,6,7]
6475 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,1,2,3]
6476 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4,5,6],ymm1[7]
6477 ; AVX2-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6478 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6479 ; AVX2-SLOW-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
6480 ; AVX2-SLOW-NEXT: # xmm0 = xmm0[2],mem[2],xmm0[3],mem[3]
6481 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,3,2,3]
6482 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
6483 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
6484 ; AVX2-SLOW-NEXT: vpermilps $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
6485 ; AVX2-SLOW-NEXT: # xmm1 = mem[2,2,3,3]
6486 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1]
6487 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
6488 ; AVX2-SLOW-NEXT: vpermilps $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
6489 ; AVX2-SLOW-NEXT: # xmm1 = mem[2,2,3,3]
6490 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1]
6491 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4,5,6],ymm1[7]
6492 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6493 ; AVX2-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
6494 ; AVX2-SLOW-NEXT: vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
6495 ; AVX2-SLOW-NEXT: # ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5]
6496 ; AVX2-SLOW-NEXT: vpunpckldq {{.*#+}} ymm1 = ymm5[0],ymm4[0],ymm5[1],ymm4[1],ymm5[4],ymm4[4],ymm5[5],ymm4[5]
6497 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,2,2,2]
6498 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,1,2,3]
6499 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
6500 ; AVX2-SLOW-NEXT: vmovdqa 64(%r8), %ymm1
6501 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
6502 ; AVX2-SLOW-NEXT: vpbroadcastd 80(%r9), %ymm2
6503 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm2[5],ymm0[6,7]
6504 ; AVX2-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6505 ; AVX2-SLOW-NEXT: vpunpckhdq {{.*#+}} ymm0 = ymm5[2],ymm4[2],ymm5[3],ymm4[3],ymm5[6],ymm4[6],ymm5[7],ymm4[7]
6506 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[2,3,2,3,6,7,6,7]
6507 ; AVX2-SLOW-NEXT: vperm2i128 $19, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
6508 ; AVX2-SLOW-NEXT: # ymm0 = mem[2,3],ymm0[2,3]
6509 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[2,1,3,3,6,5,7,7]
6510 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,1,2,3]
6511 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
6512 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm1 = mem[0,2,2,3,4,6,6,7]
6513 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,1,2,3]
6514 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4,5,6],ymm1[7]
6515 ; AVX2-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6516 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6517 ; AVX2-SLOW-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
6518 ; AVX2-SLOW-NEXT: # xmm0 = xmm0[2],mem[2],xmm0[3],mem[3]
6519 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,3,2,3]
6520 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
6521 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
6522 ; AVX2-SLOW-NEXT: vpermilps $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
6523 ; AVX2-SLOW-NEXT: # xmm1 = mem[2,2,3,3]
6524 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1]
6525 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
6526 ; AVX2-SLOW-NEXT: vpermilps $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
6527 ; AVX2-SLOW-NEXT: # xmm1 = mem[2,2,3,3]
6528 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1]
6529 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4,5,6],ymm1[7]
6530 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6531 ; AVX2-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
6532 ; AVX2-SLOW-NEXT: vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
6533 ; AVX2-SLOW-NEXT: # ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5]
6534 ; AVX2-SLOW-NEXT: vpunpckldq {{.*#+}} ymm1 = ymm7[0],ymm6[0],ymm7[1],ymm6[1],ymm7[4],ymm6[4],ymm7[5],ymm6[5]
6535 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,2,2,2]
6536 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,1,2,3]
6537 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
6538 ; AVX2-SLOW-NEXT: vmovdqa 96(%r8), %ymm1
6539 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
6540 ; AVX2-SLOW-NEXT: vpbroadcastd 112(%r9), %ymm2
6541 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm2[5],ymm0[6,7]
6542 ; AVX2-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6543 ; AVX2-SLOW-NEXT: vpunpckhdq {{.*#+}} ymm0 = ymm7[2],ymm6[2],ymm7[3],ymm6[3],ymm7[6],ymm6[6],ymm7[7],ymm6[7]
6544 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[2,3,2,3,6,7,6,7]
6545 ; AVX2-SLOW-NEXT: vperm2i128 $19, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
6546 ; AVX2-SLOW-NEXT: # ymm0 = mem[2,3],ymm0[2,3]
6547 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[2,1,3,3,6,5,7,7]
6548 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,1,2,3]
6549 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
6550 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm1 = mem[0,2,2,3,4,6,6,7]
6551 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,1,2,3]
6552 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm6 = ymm0[0],ymm1[1],ymm0[2,3,4,5,6],ymm1[7]
6553 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6554 ; AVX2-SLOW-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
6555 ; AVX2-SLOW-NEXT: # xmm0 = xmm0[2],mem[2],xmm0[3],mem[3]
6556 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,3,2,3]
6557 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
6558 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
6559 ; AVX2-SLOW-NEXT: vpermilps $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
6560 ; AVX2-SLOW-NEXT: # xmm1 = mem[2,2,3,3]
6561 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1]
6562 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
6563 ; AVX2-SLOW-NEXT: vpermilps $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
6564 ; AVX2-SLOW-NEXT: # xmm1 = mem[2,2,3,3]
6565 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1]
6566 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm7 = ymm0[0],ymm1[1],ymm0[2,3,4,5,6],ymm1[7]
6567 ; AVX2-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
6568 ; AVX2-SLOW-NEXT: vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
6569 ; AVX2-SLOW-NEXT: # ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5]
6570 ; AVX2-SLOW-NEXT: vpunpckldq {{.*#+}} ymm1 = ymm9[0],ymm8[0],ymm9[1],ymm8[1],ymm9[4],ymm8[4],ymm9[5],ymm8[5]
6571 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,2,2,2]
6572 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,1,2,3]
6573 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
6574 ; AVX2-SLOW-NEXT: vmovdqa 128(%r8), %ymm1
6575 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
6576 ; AVX2-SLOW-NEXT: vpbroadcastd 144(%r9), %ymm2
6577 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm5 = ymm0[0,1,2,3,4],ymm2[5],ymm0[6,7]
6578 ; AVX2-SLOW-NEXT: vpunpckhdq {{.*#+}} ymm0 = ymm9[2],ymm8[2],ymm9[3],ymm8[3],ymm9[6],ymm8[6],ymm9[7],ymm8[7]
6579 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[2,3,2,3,6,7,6,7]
6580 ; AVX2-SLOW-NEXT: vperm2i128 $19, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
6581 ; AVX2-SLOW-NEXT: # ymm0 = mem[2,3],ymm0[2,3]
6582 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[2,1,3,3,6,5,7,7]
6583 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,1,2,3]
6584 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
6585 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm1 = mem[0,2,2,3,4,6,6,7]
6586 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,1,2,3]
6587 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm8 = ymm0[0],ymm1[1],ymm0[2,3,4,5,6],ymm1[7]
6588 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6589 ; AVX2-SLOW-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
6590 ; AVX2-SLOW-NEXT: # xmm0 = xmm0[2],mem[2],xmm0[3],mem[3]
6591 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,3,2,3]
6592 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
6593 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
6594 ; AVX2-SLOW-NEXT: vpermilps $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
6595 ; AVX2-SLOW-NEXT: # xmm1 = mem[2,2,3,3]
6596 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1]
6597 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
6598 ; AVX2-SLOW-NEXT: vpermilps $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
6599 ; AVX2-SLOW-NEXT: # xmm1 = mem[2,2,3,3]
6600 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1]
6601 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm9 = ymm0[0],ymm1[1],ymm0[2,3,4,5,6],ymm1[7]
6602 ; AVX2-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
6603 ; AVX2-SLOW-NEXT: vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
6604 ; AVX2-SLOW-NEXT: # ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5]
6605 ; AVX2-SLOW-NEXT: vpunpckldq {{.*#+}} ymm1 = ymm11[0],ymm10[0],ymm11[1],ymm10[1],ymm11[4],ymm10[4],ymm11[5],ymm10[5]
6606 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,2,2,2]
6607 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,1,2,3]
6608 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
6609 ; AVX2-SLOW-NEXT: vmovdqa 160(%r8), %ymm14
6610 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm14[4,5],ymm0[6,7]
6611 ; AVX2-SLOW-NEXT: vpbroadcastd 176(%r9), %ymm1
6612 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm3 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
6613 ; AVX2-SLOW-NEXT: vpunpckhdq {{.*#+}} ymm0 = ymm11[2],ymm10[2],ymm11[3],ymm10[3],ymm11[6],ymm10[6],ymm11[7],ymm10[7]
6614 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[2,3,2,3,6,7,6,7]
6615 ; AVX2-SLOW-NEXT: vperm2i128 $19, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
6616 ; AVX2-SLOW-NEXT: # ymm0 = mem[2,3],ymm0[2,3]
6617 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm10 = ymm14[2,1,3,3,6,5,7,7]
6618 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm10 = ymm10[2,1,2,3]
6619 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm10[0,1],ymm0[2,3,4,5],ymm10[6,7]
6620 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm10 = mem[0,2,2,3,4,6,6,7]
6621 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm10 = ymm10[2,1,2,3]
6622 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm10 = ymm0[0],ymm10[1],ymm0[2,3,4,5,6],ymm10[7]
6623 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6624 ; AVX2-SLOW-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
6625 ; AVX2-SLOW-NEXT: # xmm0 = xmm0[2],mem[2],xmm0[3],mem[3]
6626 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,3,2,3]
6627 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
6628 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm0, %ymm4, %ymm0
6629 ; AVX2-SLOW-NEXT: vpermilps $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Folded Reload
6630 ; AVX2-SLOW-NEXT: # xmm11 = mem[2,2,3,3]
6631 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm11 = ymm11[0,1,2,1]
6632 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm11[0,1],ymm0[2,3,4,5],ymm11[6,7]
6633 ; AVX2-SLOW-NEXT: vpermilps $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Folded Reload
6634 ; AVX2-SLOW-NEXT: # xmm11 = mem[2,2,3,3]
6635 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm11 = ymm11[0,1,2,1]
6636 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm11 = ymm0[0],ymm11[1],ymm0[2,3,4,5,6],ymm11[7]
6637 ; AVX2-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
6638 ; AVX2-SLOW-NEXT: vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
6639 ; AVX2-SLOW-NEXT: # ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5]
6640 ; AVX2-SLOW-NEXT: vpunpckldq {{.*#+}} ymm14 = ymm13[0],ymm12[0],ymm13[1],ymm12[1],ymm13[4],ymm12[4],ymm13[5],ymm12[5]
6641 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm14 = ymm14[2,2,2,2]
6642 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,1,2,3]
6643 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm14[2,3],ymm0[4,5,6,7]
6644 ; AVX2-SLOW-NEXT: vmovdqa 192(%r8), %ymm14
6645 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm14[4,5],ymm0[6,7]
6646 ; AVX2-SLOW-NEXT: vpbroadcastd 208(%r9), %ymm15
6647 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm0[0,1,2,3,4],ymm15[5],ymm0[6,7]
6648 ; AVX2-SLOW-NEXT: vpunpckhdq {{.*#+}} ymm12 = ymm13[2],ymm12[2],ymm13[3],ymm12[3],ymm13[6],ymm12[6],ymm13[7],ymm12[7]
6649 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm12 = ymm12[2,3,2,3,6,7,6,7]
6650 ; AVX2-SLOW-NEXT: vperm2i128 $19, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm12 # 32-byte Folded Reload
6651 ; AVX2-SLOW-NEXT: # ymm12 = mem[2,3],ymm12[2,3]
6652 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm13 = ymm14[2,1,3,3,6,5,7,7]
6653 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm13 = ymm13[2,1,2,3]
6654 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm12 = ymm13[0,1],ymm12[2,3,4,5],ymm13[6,7]
6655 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm13 = mem[0,2,2,3,4,6,6,7]
6656 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm13 = ymm13[2,1,2,3]
6657 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm12 = ymm12[0],ymm13[1],ymm12[2,3,4,5,6],ymm13[7]
6658 ; AVX2-SLOW-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
6659 ; AVX2-SLOW-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm13 # 16-byte Folded Reload
6660 ; AVX2-SLOW-NEXT: # xmm13 = xmm4[2],mem[2],xmm4[3],mem[3]
6661 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm13 = xmm13[2,3,2,3]
6662 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
6663 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm13, %ymm4, %ymm13
6664 ; AVX2-SLOW-NEXT: vpermilps $250, (%rsp), %xmm14 # 16-byte Folded Reload
6665 ; AVX2-SLOW-NEXT: # xmm14 = mem[2,2,3,3]
6666 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm14 = ymm14[0,1,2,1]
6667 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm13 = ymm14[0,1],ymm13[2,3,4,5],ymm14[6,7]
6668 ; AVX2-SLOW-NEXT: vpermilps $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Folded Reload
6669 ; AVX2-SLOW-NEXT: # xmm14 = mem[2,2,3,3]
6670 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm14 = ymm14[0,1,2,1]
6671 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm13 = ymm13[0],ymm14[1],ymm13[2,3,4,5,6],ymm14[7]
6672 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
6673 ; AVX2-SLOW-NEXT: vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm14 # 32-byte Folded Reload
6674 ; AVX2-SLOW-NEXT: # ymm14 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5]
6675 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
6676 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
6677 ; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} ymm15 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
6678 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm15 = ymm15[2,2,2,2]
6679 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm14 = ymm14[2,1,2,3]
6680 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm14 = ymm14[0,1],ymm15[2,3],ymm14[4,5,6,7]
6681 ; AVX2-SLOW-NEXT: vmovaps 224(%r8), %ymm15
6682 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm14 = ymm14[0,1,2,3],ymm15[4,5],ymm14[6,7]
6683 ; AVX2-SLOW-NEXT: vbroadcastss 240(%r9), %ymm4
6684 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm4 = ymm14[0,1,2,3,4],ymm4[5],ymm14[6,7]
6685 ; AVX2-SLOW-NEXT: vunpckhps {{.*#+}} ymm14 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
6686 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm14 = ymm14[2,3,2,3,6,7,6,7]
6687 ; AVX2-SLOW-NEXT: vperm2f128 $19, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
6688 ; AVX2-SLOW-NEXT: # ymm14 = mem[2,3],ymm14[2,3]
6689 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm15 = ymm15[2,1,3,3,6,5,7,7]
6690 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm15 = ymm15[2,1,2,3]
6691 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm14 = ymm15[0,1],ymm14[2,3,4,5],ymm15[6,7]
6692 ; AVX2-SLOW-NEXT: vpermilps {{.*#+}} ymm15 = mem[0,2,2,3,4,6,6,7]
6693 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm15 = ymm15[2,1,2,3]
6694 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm14 = ymm14[0],ymm15[1],ymm14[2,3,4,5,6],ymm15[7]
6695 ; AVX2-SLOW-NEXT: movq {{[0-9]+}}(%rsp), %rax
6696 ; AVX2-SLOW-NEXT: vmovaps %ymm14, 1504(%rax)
6697 ; AVX2-SLOW-NEXT: vmovaps %ymm4, 1440(%rax)
6698 ; AVX2-SLOW-NEXT: vmovaps %ymm13, 1408(%rax)
6699 ; AVX2-SLOW-NEXT: vmovdqa %ymm12, 1312(%rax)
6700 ; AVX2-SLOW-NEXT: vmovdqa %ymm2, 1248(%rax)
6701 ; AVX2-SLOW-NEXT: vmovaps %ymm11, 1216(%rax)
6702 ; AVX2-SLOW-NEXT: vmovdqa %ymm10, 1120(%rax)
6703 ; AVX2-SLOW-NEXT: vmovdqa %ymm3, 1056(%rax)
6704 ; AVX2-SLOW-NEXT: vmovaps %ymm9, 1024(%rax)
6705 ; AVX2-SLOW-NEXT: vmovdqa %ymm8, 928(%rax)
6706 ; AVX2-SLOW-NEXT: vmovdqa %ymm5, 864(%rax)
6707 ; AVX2-SLOW-NEXT: vmovaps %ymm7, 832(%rax)
6708 ; AVX2-SLOW-NEXT: vmovdqa %ymm6, 736(%rax)
6709 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
6710 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 672(%rax)
6711 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
6712 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 640(%rax)
6713 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
6714 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 544(%rax)
6715 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
6716 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 480(%rax)
6717 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
6718 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 448(%rax)
6719 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
6720 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 352(%rax)
6721 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
6722 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 288(%rax)
6723 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
6724 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 256(%rax)
6725 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
6726 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 160(%rax)
6727 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
6728 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 96(%rax)
6729 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
6730 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 64(%rax)
6731 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
6732 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 1472(%rax)
6733 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
6734 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 1344(%rax)
6735 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
6736 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 1280(%rax)
6737 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
6738 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 1152(%rax)
6739 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
6740 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 1088(%rax)
6741 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
6742 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 960(%rax)
6743 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
6744 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 896(%rax)
6745 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
6746 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 768(%rax)
6747 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
6748 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 704(%rax)
6749 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
6750 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 576(%rax)
6751 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
6752 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 512(%rax)
6753 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
6754 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 384(%rax)
6755 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
6756 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 320(%rax)
6757 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
6758 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 192(%rax)
6759 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
6760 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 128(%rax)
6761 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
6762 ; AVX2-SLOW-NEXT: vmovaps %ymm0, (%rax)
6763 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
6764 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 1376(%rax)
6765 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
6766 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 1184(%rax)
6767 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
6768 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 992(%rax)
6769 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
6770 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 800(%rax)
6771 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
6772 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 608(%rax)
6773 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
6774 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 416(%rax)
6775 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
6776 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 224(%rax)
6777 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
6778 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 32(%rax)
6779 ; AVX2-SLOW-NEXT: addq $2504, %rsp # imm = 0x9C8
6780 ; AVX2-SLOW-NEXT: vzeroupper
6781 ; AVX2-SLOW-NEXT: retq
6783 ; AVX2-FAST-LABEL: store_i32_stride6_vf64:
6784 ; AVX2-FAST: # %bb.0:
6785 ; AVX2-FAST-NEXT: subq $2376, %rsp # imm = 0x948
6786 ; AVX2-FAST-NEXT: vmovdqa (%rsi), %xmm9
6787 ; AVX2-FAST-NEXT: vmovdqa 32(%rsi), %xmm1
6788 ; AVX2-FAST-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6789 ; AVX2-FAST-NEXT: vmovdqa (%rdi), %xmm7
6790 ; AVX2-FAST-NEXT: vmovdqa 32(%rdi), %xmm10
6791 ; AVX2-FAST-NEXT: vmovdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6792 ; AVX2-FAST-NEXT: vpunpckhdq {{.*#+}} xmm0 = xmm7[2],xmm9[2],xmm7[3],xmm9[3]
6793 ; AVX2-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6794 ; AVX2-FAST-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
6795 ; AVX2-FAST-NEXT: vmovdqa (%rcx), %xmm2
6796 ; AVX2-FAST-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6797 ; AVX2-FAST-NEXT: vmovdqa 32(%rcx), %xmm6
6798 ; AVX2-FAST-NEXT: vmovdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6799 ; AVX2-FAST-NEXT: vmovdqa 64(%rcx), %xmm5
6800 ; AVX2-FAST-NEXT: vmovdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6801 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[1,2,2,3]
6802 ; AVX2-FAST-NEXT: vmovdqa (%rdx), %xmm2
6803 ; AVX2-FAST-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6804 ; AVX2-FAST-NEXT: vmovdqa 32(%rdx), %xmm8
6805 ; AVX2-FAST-NEXT: vmovdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6806 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm4 = xmm2[1,2,2,3]
6807 ; AVX2-FAST-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
6808 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,1,2,1]
6809 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm0[4,5],ymm3[6,7]
6810 ; AVX2-FAST-NEXT: vmovdqa (%r8), %xmm2
6811 ; AVX2-FAST-NEXT: vmovdqa 32(%r8), %xmm11
6812 ; AVX2-FAST-NEXT: vpmovzxdq {{.*#+}} xmm4 = xmm2[0],zero,xmm2[1],zero
6813 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1],ymm4[2,3],ymm3[4,5,6,7]
6814 ; AVX2-FAST-NEXT: vpbroadcastd 4(%r9), %ymm4
6815 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2],ymm4[3],ymm3[4,5,6,7]
6816 ; AVX2-FAST-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6817 ; AVX2-FAST-NEXT: vpunpckhdq {{.*#+}} xmm0 = xmm10[2],xmm1[2],xmm10[3],xmm1[3]
6818 ; AVX2-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6819 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm3 = xmm6[1,2,2,3]
6820 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm4 = xmm8[1,2,2,3]
6821 ; AVX2-FAST-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
6822 ; AVX2-FAST-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm4
6823 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,1,2,1]
6824 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm4[4,5],ymm3[6,7]
6825 ; AVX2-FAST-NEXT: vpmovzxdq {{.*#+}} xmm4 = xmm11[0],zero,xmm11[1],zero
6826 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1],ymm4[2,3],ymm3[4,5,6,7]
6827 ; AVX2-FAST-NEXT: vpbroadcastd 36(%r9), %ymm4
6828 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2],ymm4[3],ymm3[4,5,6,7]
6829 ; AVX2-FAST-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6830 ; AVX2-FAST-NEXT: vmovdqa 64(%rdx), %xmm0
6831 ; AVX2-FAST-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6832 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm3 = xmm5[1,2,2,3]
6833 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm4 = xmm0[1,2,2,3]
6834 ; AVX2-FAST-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
6835 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,1,2,1]
6836 ; AVX2-FAST-NEXT: vmovdqa 64(%rsi), %xmm15
6837 ; AVX2-FAST-NEXT: vmovdqa 64(%rdi), %xmm13
6838 ; AVX2-FAST-NEXT: vpunpckhdq {{.*#+}} xmm0 = xmm13[2],xmm15[2],xmm13[3],xmm15[3]
6839 ; AVX2-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6840 ; AVX2-FAST-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm4
6841 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm4[4,5],ymm3[6,7]
6842 ; AVX2-FAST-NEXT: vmovdqa 64(%r8), %xmm4
6843 ; AVX2-FAST-NEXT: vpmovzxdq {{.*#+}} xmm5 = xmm4[0],zero,xmm4[1],zero
6844 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1],ymm5[2,3],ymm3[4,5,6,7]
6845 ; AVX2-FAST-NEXT: vpbroadcastd 68(%r9), %ymm5
6846 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2],ymm5[3],ymm3[4,5,6,7]
6847 ; AVX2-FAST-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6848 ; AVX2-FAST-NEXT: vmovdqa 96(%rcx), %xmm0
6849 ; AVX2-FAST-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6850 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,2,2,3]
6851 ; AVX2-FAST-NEXT: vmovdqa 96(%rdx), %xmm0
6852 ; AVX2-FAST-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6853 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm5 = xmm0[1,2,2,3]
6854 ; AVX2-FAST-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm5[0],xmm3[0],xmm5[1],xmm3[1]
6855 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,1,2,1]
6856 ; AVX2-FAST-NEXT: vmovdqa 96(%rsi), %xmm10
6857 ; AVX2-FAST-NEXT: vmovdqa 96(%rdi), %xmm12
6858 ; AVX2-FAST-NEXT: vpunpckhdq {{.*#+}} xmm0 = xmm12[2],xmm10[2],xmm12[3],xmm10[3]
6859 ; AVX2-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6860 ; AVX2-FAST-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm5
6861 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm5[4,5],ymm3[6,7]
6862 ; AVX2-FAST-NEXT: vmovdqa 96(%r8), %xmm8
6863 ; AVX2-FAST-NEXT: vpmovzxdq {{.*#+}} xmm5 = xmm8[0],zero,xmm8[1],zero
6864 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1],ymm5[2,3],ymm3[4,5,6,7]
6865 ; AVX2-FAST-NEXT: vpbroadcastd 100(%r9), %ymm5
6866 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2],ymm5[3],ymm3[4,5,6,7]
6867 ; AVX2-FAST-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6868 ; AVX2-FAST-NEXT: vmovdqa 128(%rcx), %xmm0
6869 ; AVX2-FAST-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6870 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,2,2,3]
6871 ; AVX2-FAST-NEXT: vmovdqa 128(%rdx), %xmm0
6872 ; AVX2-FAST-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6873 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm5 = xmm0[1,2,2,3]
6874 ; AVX2-FAST-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm5[0],xmm3[0],xmm5[1],xmm3[1]
6875 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,1,2,1]
6876 ; AVX2-FAST-NEXT: vmovdqa 128(%rsi), %xmm0
6877 ; AVX2-FAST-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6878 ; AVX2-FAST-NEXT: vmovdqa 128(%rdi), %xmm1
6879 ; AVX2-FAST-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6880 ; AVX2-FAST-NEXT: vpunpckhdq {{.*#+}} xmm0 = xmm1[2],xmm0[2],xmm1[3],xmm0[3]
6881 ; AVX2-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6882 ; AVX2-FAST-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm5
6883 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm5[4,5],ymm3[6,7]
6884 ; AVX2-FAST-NEXT: vmovdqa 128(%r8), %xmm5
6885 ; AVX2-FAST-NEXT: vpmovzxdq {{.*#+}} xmm6 = xmm5[0],zero,xmm5[1],zero
6886 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1],ymm6[2,3],ymm3[4,5,6,7]
6887 ; AVX2-FAST-NEXT: vpbroadcastd 132(%r9), %ymm6
6888 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2],ymm6[3],ymm3[4,5,6,7]
6889 ; AVX2-FAST-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6890 ; AVX2-FAST-NEXT: vmovdqa 160(%rcx), %xmm0
6891 ; AVX2-FAST-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6892 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,2,2,3]
6893 ; AVX2-FAST-NEXT: vmovdqa 160(%rdx), %xmm0
6894 ; AVX2-FAST-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6895 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm6 = xmm0[1,2,2,3]
6896 ; AVX2-FAST-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm6[0],xmm3[0],xmm6[1],xmm3[1]
6897 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,1,2,1]
6898 ; AVX2-FAST-NEXT: vmovdqa 160(%rsi), %xmm0
6899 ; AVX2-FAST-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6900 ; AVX2-FAST-NEXT: vmovdqa 160(%rdi), %xmm1
6901 ; AVX2-FAST-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6902 ; AVX2-FAST-NEXT: vpunpckhdq {{.*#+}} xmm0 = xmm1[2],xmm0[2],xmm1[3],xmm0[3]
6903 ; AVX2-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6904 ; AVX2-FAST-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm6
6905 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm6[4,5],ymm3[6,7]
6906 ; AVX2-FAST-NEXT: vmovdqa 160(%r8), %xmm6
6907 ; AVX2-FAST-NEXT: vpmovzxdq {{.*#+}} xmm14 = xmm6[0],zero,xmm6[1],zero
6908 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1],ymm14[2,3],ymm3[4,5,6,7]
6909 ; AVX2-FAST-NEXT: vpbroadcastd 164(%r9), %ymm14
6910 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2],ymm14[3],ymm3[4,5,6,7]
6911 ; AVX2-FAST-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6912 ; AVX2-FAST-NEXT: vmovdqa 192(%rcx), %xmm0
6913 ; AVX2-FAST-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6914 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,2,2,3]
6915 ; AVX2-FAST-NEXT: vmovdqa 192(%rdx), %xmm0
6916 ; AVX2-FAST-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6917 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm14 = xmm0[1,2,2,3]
6918 ; AVX2-FAST-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm14[0],xmm3[0],xmm14[1],xmm3[1]
6919 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,1,2,1]
6920 ; AVX2-FAST-NEXT: vmovdqa 192(%rsi), %xmm0
6921 ; AVX2-FAST-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6922 ; AVX2-FAST-NEXT: vmovdqa 192(%rdi), %xmm1
6923 ; AVX2-FAST-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6924 ; AVX2-FAST-NEXT: vpunpckhdq {{.*#+}} xmm0 = xmm1[2],xmm0[2],xmm1[3],xmm0[3]
6925 ; AVX2-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6926 ; AVX2-FAST-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm14
6927 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm14[4,5],ymm3[6,7]
6928 ; AVX2-FAST-NEXT: vmovdqa 192(%r8), %xmm0
6929 ; AVX2-FAST-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6930 ; AVX2-FAST-NEXT: vpmovzxdq {{.*#+}} xmm14 = xmm0[0],zero,xmm0[1],zero
6931 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1],ymm14[2,3],ymm3[4,5,6,7]
6932 ; AVX2-FAST-NEXT: vpbroadcastd 196(%r9), %ymm14
6933 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2],ymm14[3],ymm3[4,5,6,7]
6934 ; AVX2-FAST-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6935 ; AVX2-FAST-NEXT: vmovdqa 224(%rcx), %xmm3
6936 ; AVX2-FAST-NEXT: vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6937 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,2,2,3]
6938 ; AVX2-FAST-NEXT: vmovdqa 224(%rdx), %xmm0
6939 ; AVX2-FAST-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6940 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm14 = xmm0[1,2,2,3]
6941 ; AVX2-FAST-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm14[0],xmm3[0],xmm14[1],xmm3[1]
6942 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,1,2,1]
6943 ; AVX2-FAST-NEXT: vmovdqa 224(%rsi), %xmm0
6944 ; AVX2-FAST-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6945 ; AVX2-FAST-NEXT: vmovdqa 224(%rdi), %xmm1
6946 ; AVX2-FAST-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6947 ; AVX2-FAST-NEXT: vpunpckhdq {{.*#+}} xmm0 = xmm1[2],xmm0[2],xmm1[3],xmm0[3]
6948 ; AVX2-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6949 ; AVX2-FAST-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm14
6950 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm14[4,5],ymm3[6,7]
6951 ; AVX2-FAST-NEXT: vmovdqa 224(%r8), %xmm0
6952 ; AVX2-FAST-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill
6953 ; AVX2-FAST-NEXT: vpmovzxdq {{.*#+}} xmm14 = xmm0[0],zero,xmm0[1],zero
6954 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1],ymm14[2,3],ymm3[4,5,6,7]
6955 ; AVX2-FAST-NEXT: vpbroadcastd 228(%r9), %ymm14
6956 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2],ymm14[3],ymm3[4,5,6,7]
6957 ; AVX2-FAST-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6958 ; AVX2-FAST-NEXT: vpbroadcastd (%rcx), %xmm3
6959 ; AVX2-FAST-NEXT: vpbroadcastd (%rdx), %xmm14
6960 ; AVX2-FAST-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm14[0],xmm3[0],xmm14[1],xmm3[1]
6961 ; AVX2-FAST-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm7[0],xmm9[0],xmm7[1],xmm9[1]
6962 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,1,2,1]
6963 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1],ymm3[2,3],ymm1[4,5,6,7]
6964 ; AVX2-FAST-NEXT: vpbroadcastq %xmm2, %ymm0
6965 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5],ymm1[6,7]
6966 ; AVX2-FAST-NEXT: vpbroadcastd (%r9), %ymm1
6967 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
6968 ; AVX2-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6969 ; AVX2-FAST-NEXT: vmovdqa (%rdx), %ymm14
6970 ; AVX2-FAST-NEXT: vmovdqa (%rcx), %ymm0
6971 ; AVX2-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6972 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,1,2,2,4,5,6,6]
6973 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} ymm1 = ymm14[1,1,2,3,5,5,6,7]
6974 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3],ymm1[4],ymm0[5],ymm1[6],ymm0[7]
6975 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,1,2,3]
6976 ; AVX2-FAST-NEXT: vmovdqa (%rdi), %ymm1
6977 ; AVX2-FAST-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6978 ; AVX2-FAST-NEXT: vmovdqa (%rsi), %ymm2
6979 ; AVX2-FAST-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6980 ; AVX2-FAST-NEXT: vpunpckhdq {{.*#+}} ymm1 = ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[6],ymm2[6],ymm1[7],ymm2[7]
6981 ; AVX2-FAST-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6982 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
6983 ; AVX2-FAST-NEXT: vpmovzxdq {{.*#+}} xmm1 = mem[0],zero,mem[1],zero
6984 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
6985 ; AVX2-FAST-NEXT: vpbroadcastd 20(%r9), %ymm1
6986 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3],ymm0[4,5,6,7]
6987 ; AVX2-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6988 ; AVX2-FAST-NEXT: vpbroadcastd 32(%rcx), %xmm0
6989 ; AVX2-FAST-NEXT: vpbroadcastd 32(%rdx), %xmm1
6990 ; AVX2-FAST-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
6991 ; AVX2-FAST-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6992 ; AVX2-FAST-NEXT: vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
6993 ; AVX2-FAST-NEXT: # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1]
6994 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,1,2,1]
6995 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5,6,7]
6996 ; AVX2-FAST-NEXT: vpbroadcastq %xmm11, %ymm1
6997 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
6998 ; AVX2-FAST-NEXT: vpbroadcastd 32(%r9), %ymm1
6999 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
7000 ; AVX2-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7001 ; AVX2-FAST-NEXT: vmovdqa 32(%rdx), %ymm1
7002 ; AVX2-FAST-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7003 ; AVX2-FAST-NEXT: vmovdqa 32(%rcx), %ymm0
7004 ; AVX2-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7005 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,1,2,2,4,5,6,6]
7006 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[1,1,2,3,5,5,6,7]
7007 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3],ymm1[4],ymm0[5],ymm1[6],ymm0[7]
7008 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,1,2,3]
7009 ; AVX2-FAST-NEXT: vmovdqa 32(%rdi), %ymm1
7010 ; AVX2-FAST-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7011 ; AVX2-FAST-NEXT: vmovdqa 32(%rsi), %ymm2
7012 ; AVX2-FAST-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7013 ; AVX2-FAST-NEXT: vpunpckhdq {{.*#+}} ymm1 = ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[6],ymm2[6],ymm1[7],ymm2[7]
7014 ; AVX2-FAST-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7015 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
7016 ; AVX2-FAST-NEXT: vpmovzxdq {{.*#+}} xmm1 = mem[0],zero,mem[1],zero
7017 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
7018 ; AVX2-FAST-NEXT: vpbroadcastd 52(%r9), %ymm1
7019 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3],ymm0[4,5,6,7]
7020 ; AVX2-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7021 ; AVX2-FAST-NEXT: vpbroadcastd 64(%rcx), %xmm0
7022 ; AVX2-FAST-NEXT: vpbroadcastd 64(%rdx), %xmm1
7023 ; AVX2-FAST-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
7024 ; AVX2-FAST-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm13[0],xmm15[0],xmm13[1],xmm15[1]
7025 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,1,2,1]
7026 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5,6,7]
7027 ; AVX2-FAST-NEXT: vpbroadcastq %xmm4, %ymm1
7028 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
7029 ; AVX2-FAST-NEXT: vpbroadcastd 64(%r9), %ymm1
7030 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
7031 ; AVX2-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7032 ; AVX2-FAST-NEXT: vmovdqa 64(%rdx), %ymm1
7033 ; AVX2-FAST-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7034 ; AVX2-FAST-NEXT: vmovdqa 64(%rcx), %ymm0
7035 ; AVX2-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7036 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,1,2,2,4,5,6,6]
7037 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[1,1,2,3,5,5,6,7]
7038 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3],ymm1[4],ymm0[5],ymm1[6],ymm0[7]
7039 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,1,2,3]
7040 ; AVX2-FAST-NEXT: vmovdqa 64(%rdi), %ymm1
7041 ; AVX2-FAST-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7042 ; AVX2-FAST-NEXT: vmovdqa 64(%rsi), %ymm2
7043 ; AVX2-FAST-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7044 ; AVX2-FAST-NEXT: vpunpckhdq {{.*#+}} ymm1 = ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[6],ymm2[6],ymm1[7],ymm2[7]
7045 ; AVX2-FAST-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7046 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
7047 ; AVX2-FAST-NEXT: vpmovzxdq {{.*#+}} xmm1 = mem[0],zero,mem[1],zero
7048 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
7049 ; AVX2-FAST-NEXT: vpbroadcastd 84(%r9), %ymm1
7050 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3],ymm0[4,5,6,7]
7051 ; AVX2-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7052 ; AVX2-FAST-NEXT: vpbroadcastd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
7053 ; AVX2-FAST-NEXT: vpbroadcastd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
7054 ; AVX2-FAST-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
7055 ; AVX2-FAST-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm12[0],xmm10[0],xmm12[1],xmm10[1]
7056 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,1,2,1]
7057 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5,6,7]
7058 ; AVX2-FAST-NEXT: vpbroadcastq %xmm8, %ymm1
7059 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
7060 ; AVX2-FAST-NEXT: vpbroadcastd 96(%r9), %ymm1
7061 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
7062 ; AVX2-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7063 ; AVX2-FAST-NEXT: vmovdqa 96(%rdx), %ymm1
7064 ; AVX2-FAST-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7065 ; AVX2-FAST-NEXT: vmovdqa 96(%rcx), %ymm2
7066 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} ymm0 = ymm2[0,1,2,2,4,5,6,6]
7067 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} ymm4 = ymm1[1,1,2,3,5,5,6,7]
7068 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm4[0],ymm0[1],ymm4[2],ymm0[3],ymm4[4],ymm0[5],ymm4[6],ymm0[7]
7069 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,1,2,3]
7070 ; AVX2-FAST-NEXT: vmovdqa 96(%rdi), %ymm1
7071 ; AVX2-FAST-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7072 ; AVX2-FAST-NEXT: vmovdqa 96(%rsi), %ymm3
7073 ; AVX2-FAST-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7074 ; AVX2-FAST-NEXT: vpunpckhdq {{.*#+}} ymm1 = ymm1[2],ymm3[2],ymm1[3],ymm3[3],ymm1[6],ymm3[6],ymm1[7],ymm3[7]
7075 ; AVX2-FAST-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7076 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
7077 ; AVX2-FAST-NEXT: vpmovzxdq {{.*#+}} xmm4 = mem[0],zero,mem[1],zero
7078 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm4[2,3],ymm0[4,5,6,7]
7079 ; AVX2-FAST-NEXT: vpbroadcastd 116(%r9), %ymm4
7080 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm4[3],ymm0[4,5,6,7]
7081 ; AVX2-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7082 ; AVX2-FAST-NEXT: vpbroadcastd 128(%rcx), %xmm0
7083 ; AVX2-FAST-NEXT: vpbroadcastd 128(%rdx), %xmm4
7084 ; AVX2-FAST-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm4[0],xmm0[0],xmm4[1],xmm0[1]
7085 ; AVX2-FAST-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
7086 ; AVX2-FAST-NEXT: vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm4 # 16-byte Folded Reload
7087 ; AVX2-FAST-NEXT: # xmm4 = xmm1[0],mem[0],xmm1[1],mem[1]
7088 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,1,2,1]
7089 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm4[0,1],ymm0[2,3],ymm4[4,5,6,7]
7090 ; AVX2-FAST-NEXT: vpbroadcastq %xmm5, %ymm4
7091 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm4[4,5],ymm0[6,7]
7092 ; AVX2-FAST-NEXT: vpbroadcastd 128(%r9), %ymm4
7093 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm4[5],ymm0[6,7]
7094 ; AVX2-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7095 ; AVX2-FAST-NEXT: vmovdqa 128(%rdx), %ymm12
7096 ; AVX2-FAST-NEXT: vmovdqa 128(%rcx), %ymm9
7097 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} ymm0 = ymm9[0,1,2,2,4,5,6,6]
7098 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} ymm4 = ymm12[1,1,2,3,5,5,6,7]
7099 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm4[0],ymm0[1],ymm4[2],ymm0[3],ymm4[4],ymm0[5],ymm4[6],ymm0[7]
7100 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,1,2,3]
7101 ; AVX2-FAST-NEXT: vmovdqa 128(%rdi), %ymm1
7102 ; AVX2-FAST-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7103 ; AVX2-FAST-NEXT: vmovdqa 128(%rsi), %ymm3
7104 ; AVX2-FAST-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7105 ; AVX2-FAST-NEXT: vpunpckhdq {{.*#+}} ymm1 = ymm1[2],ymm3[2],ymm1[3],ymm3[3],ymm1[6],ymm3[6],ymm1[7],ymm3[7]
7106 ; AVX2-FAST-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7107 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
7108 ; AVX2-FAST-NEXT: vpmovzxdq {{.*#+}} xmm4 = mem[0],zero,mem[1],zero
7109 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm4[2,3],ymm0[4,5,6,7]
7110 ; AVX2-FAST-NEXT: vpbroadcastd 148(%r9), %ymm4
7111 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm4[3],ymm0[4,5,6,7]
7112 ; AVX2-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7113 ; AVX2-FAST-NEXT: vpbroadcastd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
7114 ; AVX2-FAST-NEXT: vpbroadcastd {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
7115 ; AVX2-FAST-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm4[0],xmm0[0],xmm4[1],xmm0[1]
7116 ; AVX2-FAST-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
7117 ; AVX2-FAST-NEXT: vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm4 # 16-byte Folded Reload
7118 ; AVX2-FAST-NEXT: # xmm4 = xmm1[0],mem[0],xmm1[1],mem[1]
7119 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,1,2,1]
7120 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm4[0,1],ymm0[2,3],ymm4[4,5,6,7]
7121 ; AVX2-FAST-NEXT: vpbroadcastq %xmm6, %ymm4
7122 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm4[4,5],ymm0[6,7]
7123 ; AVX2-FAST-NEXT: vpbroadcastd 160(%r9), %ymm4
7124 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm4[5],ymm0[6,7]
7125 ; AVX2-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7126 ; AVX2-FAST-NEXT: vmovdqa 160(%rdx), %ymm10
7127 ; AVX2-FAST-NEXT: vmovdqa 160(%rcx), %ymm7
7128 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} ymm0 = ymm7[0,1,2,2,4,5,6,6]
7129 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} ymm4 = ymm10[1,1,2,3,5,5,6,7]
7130 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm4[0],ymm0[1],ymm4[2],ymm0[3],ymm4[4],ymm0[5],ymm4[6],ymm0[7]
7131 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,1,2,3]
7132 ; AVX2-FAST-NEXT: vmovdqa 160(%rdi), %ymm1
7133 ; AVX2-FAST-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7134 ; AVX2-FAST-NEXT: vmovdqa 160(%rsi), %ymm3
7135 ; AVX2-FAST-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7136 ; AVX2-FAST-NEXT: vpunpckhdq {{.*#+}} ymm1 = ymm1[2],ymm3[2],ymm1[3],ymm3[3],ymm1[6],ymm3[6],ymm1[7],ymm3[7]
7137 ; AVX2-FAST-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7138 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
7139 ; AVX2-FAST-NEXT: vpmovzxdq {{.*#+}} xmm4 = mem[0],zero,mem[1],zero
7140 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm4[2,3],ymm0[4,5,6,7]
7141 ; AVX2-FAST-NEXT: vpbroadcastd 180(%r9), %ymm4
7142 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm4[3],ymm0[4,5,6,7]
7143 ; AVX2-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7144 ; AVX2-FAST-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
7145 ; AVX2-FAST-NEXT: vpbroadcastd %xmm11, %xmm0
7146 ; AVX2-FAST-NEXT: vpbroadcastd {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
7147 ; AVX2-FAST-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm4[0],xmm0[0],xmm4[1],xmm0[1]
7148 ; AVX2-FAST-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
7149 ; AVX2-FAST-NEXT: vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm4 # 16-byte Folded Reload
7150 ; AVX2-FAST-NEXT: # xmm4 = xmm1[0],mem[0],xmm1[1],mem[1]
7151 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,1,2,1]
7152 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm4[0,1],ymm0[2,3],ymm4[4,5,6,7]
7153 ; AVX2-FAST-NEXT: vpbroadcastq {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 16-byte Folded Reload
7154 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm4[4,5],ymm0[6,7]
7155 ; AVX2-FAST-NEXT: vpbroadcastd 192(%r9), %ymm4
7156 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm4[5],ymm0[6,7]
7157 ; AVX2-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7158 ; AVX2-FAST-NEXT: vmovdqa 192(%rdx), %ymm1
7159 ; AVX2-FAST-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7160 ; AVX2-FAST-NEXT: vmovdqa 192(%rcx), %ymm0
7161 ; AVX2-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7162 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,1,2,2,4,5,6,6]
7163 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} ymm4 = ymm1[1,1,2,3,5,5,6,7]
7164 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm4[0],ymm0[1],ymm4[2],ymm0[3],ymm4[4],ymm0[5],ymm4[6],ymm0[7]
7165 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,1,2,3]
7166 ; AVX2-FAST-NEXT: vmovdqa 192(%rdi), %ymm1
7167 ; AVX2-FAST-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7168 ; AVX2-FAST-NEXT: vmovdqa 192(%rsi), %ymm3
7169 ; AVX2-FAST-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7170 ; AVX2-FAST-NEXT: vpunpckhdq {{.*#+}} ymm1 = ymm1[2],ymm3[2],ymm1[3],ymm3[3],ymm1[6],ymm3[6],ymm1[7],ymm3[7]
7171 ; AVX2-FAST-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7172 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
7173 ; AVX2-FAST-NEXT: vpmovzxdq {{.*#+}} xmm4 = mem[0],zero,mem[1],zero
7174 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm4[2,3],ymm0[4,5,6,7]
7175 ; AVX2-FAST-NEXT: vpbroadcastd 212(%r9), %ymm4
7176 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm4[3],ymm0[4,5,6,7]
7177 ; AVX2-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7178 ; AVX2-FAST-NEXT: vbroadcastss 224(%rcx), %xmm0
7179 ; AVX2-FAST-NEXT: vbroadcastss 224(%rdx), %xmm4
7180 ; AVX2-FAST-NEXT: vunpcklps {{.*#+}} xmm0 = xmm4[0],xmm0[0],xmm4[1],xmm0[1]
7181 ; AVX2-FAST-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
7182 ; AVX2-FAST-NEXT: vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm4 # 16-byte Folded Reload
7183 ; AVX2-FAST-NEXT: # xmm4 = xmm1[0],mem[0],xmm1[1],mem[1]
7184 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,1,2,1]
7185 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm4[0,1],ymm0[2,3],ymm4[4,5,6,7]
7186 ; AVX2-FAST-NEXT: vbroadcastsd (%rsp), %ymm4 # 16-byte Folded Reload
7187 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm4[4,5],ymm0[6,7]
7188 ; AVX2-FAST-NEXT: vbroadcastss 224(%r9), %ymm4
7189 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm4[5],ymm0[6,7]
7190 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7191 ; AVX2-FAST-NEXT: vmovdqa 224(%rdx), %ymm1
7192 ; AVX2-FAST-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7193 ; AVX2-FAST-NEXT: vmovdqa 224(%rcx), %ymm8
7194 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} ymm0 = ymm8[0,1,2,2,4,5,6,6]
7195 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} ymm4 = ymm1[1,1,2,3,5,5,6,7]
7196 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm4[0],ymm0[1],ymm4[2],ymm0[3],ymm4[4],ymm0[5],ymm4[6],ymm0[7]
7197 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,1,2,3]
7198 ; AVX2-FAST-NEXT: vmovdqa 224(%rdi), %ymm1
7199 ; AVX2-FAST-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7200 ; AVX2-FAST-NEXT: vmovdqa 224(%rsi), %ymm3
7201 ; AVX2-FAST-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7202 ; AVX2-FAST-NEXT: vpunpckhdq {{.*#+}} ymm1 = ymm1[2],ymm3[2],ymm1[3],ymm3[3],ymm1[6],ymm3[6],ymm1[7],ymm3[7]
7203 ; AVX2-FAST-NEXT: vmovdqu %ymm1, (%rsp) # 32-byte Spill
7204 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
7205 ; AVX2-FAST-NEXT: vpmovzxdq {{.*#+}} xmm4 = mem[0],zero,mem[1],zero
7206 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm4[2,3],ymm0[4,5,6,7]
7207 ; AVX2-FAST-NEXT: vpbroadcastd 244(%r9), %ymm4
7208 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm4[3],ymm0[4,5,6,7]
7209 ; AVX2-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7210 ; AVX2-FAST-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7211 ; AVX2-FAST-NEXT: vpunpckhdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
7212 ; AVX2-FAST-NEXT: # xmm0 = xmm0[2],mem[2],xmm0[3],mem[3]
7213 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
7214 ; AVX2-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
7215 ; AVX2-FAST-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm4
7216 ; AVX2-FAST-NEXT: vmovdqa (%r8), %ymm0
7217 ; AVX2-FAST-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [2,2,3,3,2,2,3,3]
7218 ; AVX2-FAST-NEXT: # ymm1 = mem[0,1,0,1]
7219 ; AVX2-FAST-NEXT: vpermd %ymm0, %ymm1, %ymm15
7220 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm4 = ymm15[0,1],ymm4[2,3,4,5],ymm15[6,7]
7221 ; AVX2-FAST-NEXT: vmovdqa (%r9), %ymm15
7222 ; AVX2-FAST-NEXT: vpermd %ymm15, %ymm1, %ymm3
7223 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0],ymm3[1],ymm4[2,3,4,5,6],ymm3[7]
7224 ; AVX2-FAST-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7225 ; AVX2-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
7226 ; AVX2-FAST-NEXT: vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
7227 ; AVX2-FAST-NEXT: # ymm3 = ymm3[0],mem[0],ymm3[1],mem[1],ymm3[4],mem[4],ymm3[5],mem[5]
7228 ; AVX2-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
7229 ; AVX2-FAST-NEXT: vpunpckldq {{.*#+}} ymm4 = ymm14[0],ymm5[0],ymm14[1],ymm5[1],ymm14[4],ymm5[4],ymm14[5],ymm5[5]
7230 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,2,2,2]
7231 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,1,2,3]
7232 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1],ymm4[2,3],ymm3[4,5,6,7]
7233 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm0[4,5],ymm3[6,7]
7234 ; AVX2-FAST-NEXT: vpbroadcastd 16(%r9), %ymm4
7235 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3,4],ymm4[5],ymm3[6,7]
7236 ; AVX2-FAST-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7237 ; AVX2-FAST-NEXT: vpunpckhdq {{.*#+}} ymm3 = ymm14[2],ymm5[2],ymm14[3],ymm5[3],ymm14[6],ymm5[6],ymm14[7],ymm5[7]
7238 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} ymm3 = ymm3[2,3,2,3,6,7,6,7]
7239 ; AVX2-FAST-NEXT: vperm2i128 $19, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
7240 ; AVX2-FAST-NEXT: # ymm3 = mem[2,3],ymm3[2,3]
7241 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm6 = [6,5,3,3,6,5,7,7]
7242 ; AVX2-FAST-NEXT: vpermd %ymm0, %ymm6, %ymm0
7243 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm3[2,3,4,5],ymm0[6,7]
7244 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm5 = [4,6,2,3,4,6,6,7]
7245 ; AVX2-FAST-NEXT: vpermd %ymm15, %ymm5, %ymm15
7246 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm15[1],ymm0[2,3,4,5,6],ymm15[7]
7247 ; AVX2-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7248 ; AVX2-FAST-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7249 ; AVX2-FAST-NEXT: vpunpckhdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
7250 ; AVX2-FAST-NEXT: # xmm0 = xmm0[2],mem[2],xmm0[3],mem[3]
7251 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
7252 ; AVX2-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
7253 ; AVX2-FAST-NEXT: vinserti128 $1, %xmm0, %ymm3, %ymm0
7254 ; AVX2-FAST-NEXT: vmovdqa 32(%r8), %ymm15
7255 ; AVX2-FAST-NEXT: vpermd %ymm15, %ymm1, %ymm13
7256 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm13[0,1],ymm0[2,3,4,5],ymm13[6,7]
7257 ; AVX2-FAST-NEXT: vmovdqa 32(%r9), %ymm13
7258 ; AVX2-FAST-NEXT: vpermd %ymm13, %ymm1, %ymm14
7259 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm14[1],ymm0[2,3,4,5,6],ymm14[7]
7260 ; AVX2-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7261 ; AVX2-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
7262 ; AVX2-FAST-NEXT: vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
7263 ; AVX2-FAST-NEXT: # ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5]
7264 ; AVX2-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
7265 ; AVX2-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
7266 ; AVX2-FAST-NEXT: vpunpckldq {{.*#+}} ymm14 = ymm4[0],ymm3[0],ymm4[1],ymm3[1],ymm4[4],ymm3[4],ymm4[5],ymm3[5]
7267 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm14 = ymm14[2,2,2,2]
7268 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,1,2,3]
7269 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm14[2,3],ymm0[4,5,6,7]
7270 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm15[4,5],ymm0[6,7]
7271 ; AVX2-FAST-NEXT: vpbroadcastd 48(%r9), %ymm14
7272 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm14[5],ymm0[6,7]
7273 ; AVX2-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7274 ; AVX2-FAST-NEXT: vpunpckhdq {{.*#+}} ymm0 = ymm4[2],ymm3[2],ymm4[3],ymm3[3],ymm4[6],ymm3[6],ymm4[7],ymm3[7]
7275 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[2,3,2,3,6,7,6,7]
7276 ; AVX2-FAST-NEXT: vperm2i128 $19, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
7277 ; AVX2-FAST-NEXT: # ymm0 = mem[2,3],ymm0[2,3]
7278 ; AVX2-FAST-NEXT: vpermd %ymm15, %ymm6, %ymm14
7279 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm14[0,1],ymm0[2,3,4,5],ymm14[6,7]
7280 ; AVX2-FAST-NEXT: vpermd %ymm13, %ymm5, %ymm13
7281 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm13[1],ymm0[2,3,4,5,6],ymm13[7]
7282 ; AVX2-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7283 ; AVX2-FAST-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7284 ; AVX2-FAST-NEXT: vpunpckhdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
7285 ; AVX2-FAST-NEXT: # xmm0 = xmm0[2],mem[2],xmm0[3],mem[3]
7286 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
7287 ; AVX2-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
7288 ; AVX2-FAST-NEXT: vinserti128 $1, %xmm0, %ymm3, %ymm0
7289 ; AVX2-FAST-NEXT: vmovdqa 64(%r8), %ymm13
7290 ; AVX2-FAST-NEXT: vpermd %ymm13, %ymm1, %ymm14
7291 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm14[0,1],ymm0[2,3,4,5],ymm14[6,7]
7292 ; AVX2-FAST-NEXT: vmovdqa 64(%r9), %ymm14
7293 ; AVX2-FAST-NEXT: vpermd %ymm14, %ymm1, %ymm15
7294 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm15[1],ymm0[2,3,4,5,6],ymm15[7]
7295 ; AVX2-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7296 ; AVX2-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
7297 ; AVX2-FAST-NEXT: vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
7298 ; AVX2-FAST-NEXT: # ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5]
7299 ; AVX2-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
7300 ; AVX2-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
7301 ; AVX2-FAST-NEXT: vpunpckldq {{.*#+}} ymm15 = ymm4[0],ymm3[0],ymm4[1],ymm3[1],ymm4[4],ymm3[4],ymm4[5],ymm3[5]
7302 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm15 = ymm15[2,2,2,2]
7303 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,1,2,3]
7304 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm15[2,3],ymm0[4,5,6,7]
7305 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm13[4,5],ymm0[6,7]
7306 ; AVX2-FAST-NEXT: vpbroadcastd 80(%r9), %ymm15
7307 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm15[5],ymm0[6,7]
7308 ; AVX2-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7309 ; AVX2-FAST-NEXT: vpunpckhdq {{.*#+}} ymm0 = ymm4[2],ymm3[2],ymm4[3],ymm3[3],ymm4[6],ymm3[6],ymm4[7],ymm3[7]
7310 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[2,3,2,3,6,7,6,7]
7311 ; AVX2-FAST-NEXT: vperm2i128 $19, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
7312 ; AVX2-FAST-NEXT: # ymm0 = mem[2,3],ymm0[2,3]
7313 ; AVX2-FAST-NEXT: vpermd %ymm13, %ymm6, %ymm13
7314 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm13[0,1],ymm0[2,3,4,5],ymm13[6,7]
7315 ; AVX2-FAST-NEXT: vpermd %ymm14, %ymm5, %ymm13
7316 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm13[1],ymm0[2,3,4,5,6],ymm13[7]
7317 ; AVX2-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7318 ; AVX2-FAST-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7319 ; AVX2-FAST-NEXT: vpunpckhdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
7320 ; AVX2-FAST-NEXT: # xmm0 = xmm0[2],mem[2],xmm0[3],mem[3]
7321 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
7322 ; AVX2-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
7323 ; AVX2-FAST-NEXT: vinserti128 $1, %xmm0, %ymm3, %ymm0
7324 ; AVX2-FAST-NEXT: vmovdqa 96(%r8), %ymm13
7325 ; AVX2-FAST-NEXT: vpermd %ymm13, %ymm1, %ymm14
7326 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm14[0,1],ymm0[2,3,4,5],ymm14[6,7]
7327 ; AVX2-FAST-NEXT: vmovdqa 96(%r9), %ymm14
7328 ; AVX2-FAST-NEXT: vpermd %ymm14, %ymm1, %ymm15
7329 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm15[1],ymm0[2,3,4,5,6],ymm15[7]
7330 ; AVX2-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7331 ; AVX2-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
7332 ; AVX2-FAST-NEXT: vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
7333 ; AVX2-FAST-NEXT: # ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5]
7334 ; AVX2-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
7335 ; AVX2-FAST-NEXT: vpunpckldq {{.*#+}} ymm15 = ymm3[0],ymm2[0],ymm3[1],ymm2[1],ymm3[4],ymm2[4],ymm3[5],ymm2[5]
7336 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm15 = ymm15[2,2,2,2]
7337 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,1,2,3]
7338 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm15[2,3],ymm0[4,5,6,7]
7339 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm13[4,5],ymm0[6,7]
7340 ; AVX2-FAST-NEXT: vpbroadcastd 112(%r9), %ymm15
7341 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm15[5],ymm0[6,7]
7342 ; AVX2-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7343 ; AVX2-FAST-NEXT: vpunpckhdq {{.*#+}} ymm0 = ymm3[2],ymm2[2],ymm3[3],ymm2[3],ymm3[6],ymm2[6],ymm3[7],ymm2[7]
7344 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[2,3,2,3,6,7,6,7]
7345 ; AVX2-FAST-NEXT: vperm2i128 $19, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
7346 ; AVX2-FAST-NEXT: # ymm0 = mem[2,3],ymm0[2,3]
7347 ; AVX2-FAST-NEXT: vpermd %ymm13, %ymm6, %ymm2
7348 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1],ymm0[2,3,4,5],ymm2[6,7]
7349 ; AVX2-FAST-NEXT: vpermd %ymm14, %ymm5, %ymm2
7350 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2,3,4,5,6],ymm2[7]
7351 ; AVX2-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7352 ; AVX2-FAST-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7353 ; AVX2-FAST-NEXT: vpunpckhdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
7354 ; AVX2-FAST-NEXT: # xmm0 = xmm0[2],mem[2],xmm0[3],mem[3]
7355 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
7356 ; AVX2-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
7357 ; AVX2-FAST-NEXT: vinserti128 $1, %xmm0, %ymm2, %ymm0
7358 ; AVX2-FAST-NEXT: vmovdqa 128(%r8), %ymm2
7359 ; AVX2-FAST-NEXT: vpermd %ymm2, %ymm1, %ymm13
7360 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm13[0,1],ymm0[2,3,4,5],ymm13[6,7]
7361 ; AVX2-FAST-NEXT: vmovdqa 128(%r9), %ymm3
7362 ; AVX2-FAST-NEXT: vpermd %ymm3, %ymm1, %ymm13
7363 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm13[1],ymm0[2,3,4,5,6],ymm13[7]
7364 ; AVX2-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7365 ; AVX2-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
7366 ; AVX2-FAST-NEXT: vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
7367 ; AVX2-FAST-NEXT: # ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5]
7368 ; AVX2-FAST-NEXT: vpunpckldq {{.*#+}} ymm14 = ymm12[0],ymm9[0],ymm12[1],ymm9[1],ymm12[4],ymm9[4],ymm12[5],ymm9[5]
7369 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm14 = ymm14[2,2,2,2]
7370 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,1,2,3]
7371 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm14[2,3],ymm0[4,5,6,7]
7372 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5],ymm0[6,7]
7373 ; AVX2-FAST-NEXT: vpbroadcastd 144(%r9), %ymm14
7374 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm14[5],ymm0[6,7]
7375 ; AVX2-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7376 ; AVX2-FAST-NEXT: vpunpckhdq {{.*#+}} ymm0 = ymm12[2],ymm9[2],ymm12[3],ymm9[3],ymm12[6],ymm9[6],ymm12[7],ymm9[7]
7377 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[2,3,2,3,6,7,6,7]
7378 ; AVX2-FAST-NEXT: vperm2i128 $19, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
7379 ; AVX2-FAST-NEXT: # ymm0 = mem[2,3],ymm0[2,3]
7380 ; AVX2-FAST-NEXT: vpermd %ymm2, %ymm6, %ymm2
7381 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1],ymm0[2,3,4,5],ymm2[6,7]
7382 ; AVX2-FAST-NEXT: vpermd %ymm3, %ymm5, %ymm2
7383 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm14 = ymm0[0],ymm2[1],ymm0[2,3,4,5,6],ymm2[7]
7384 ; AVX2-FAST-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7385 ; AVX2-FAST-NEXT: vpunpckhdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
7386 ; AVX2-FAST-NEXT: # xmm0 = xmm0[2],mem[2],xmm0[3],mem[3]
7387 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
7388 ; AVX2-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
7389 ; AVX2-FAST-NEXT: vinserti128 $1, %xmm0, %ymm2, %ymm0
7390 ; AVX2-FAST-NEXT: vmovdqa 160(%r8), %ymm3
7391 ; AVX2-FAST-NEXT: vpermd %ymm3, %ymm1, %ymm2
7392 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1],ymm0[2,3,4,5],ymm2[6,7]
7393 ; AVX2-FAST-NEXT: vmovdqa 160(%r9), %ymm2
7394 ; AVX2-FAST-NEXT: vpermd %ymm2, %ymm1, %ymm4
7395 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm12 = ymm0[0],ymm4[1],ymm0[2,3,4,5,6],ymm4[7]
7396 ; AVX2-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
7397 ; AVX2-FAST-NEXT: vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
7398 ; AVX2-FAST-NEXT: # ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5]
7399 ; AVX2-FAST-NEXT: vpunpckldq {{.*#+}} ymm4 = ymm10[0],ymm7[0],ymm10[1],ymm7[1],ymm10[4],ymm7[4],ymm10[5],ymm7[5]
7400 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,2,2,2]
7401 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,1,2,3]
7402 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm4[2,3],ymm0[4,5,6,7]
7403 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm3[4,5],ymm0[6,7]
7404 ; AVX2-FAST-NEXT: vpbroadcastd 176(%r9), %ymm4
7405 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm9 = ymm0[0,1,2,3,4],ymm4[5],ymm0[6,7]
7406 ; AVX2-FAST-NEXT: vpunpckhdq {{.*#+}} ymm0 = ymm10[2],ymm7[2],ymm10[3],ymm7[3],ymm10[6],ymm7[6],ymm10[7],ymm7[7]
7407 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[2,3,2,3,6,7,6,7]
7408 ; AVX2-FAST-NEXT: vperm2i128 $19, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
7409 ; AVX2-FAST-NEXT: # ymm0 = mem[2,3],ymm0[2,3]
7410 ; AVX2-FAST-NEXT: vpermd %ymm3, %ymm6, %ymm3
7411 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm3[0,1],ymm0[2,3,4,5],ymm3[6,7]
7412 ; AVX2-FAST-NEXT: vpermd %ymm2, %ymm5, %ymm2
7413 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm7 = ymm0[0],ymm2[1],ymm0[2,3,4,5,6],ymm2[7]
7414 ; AVX2-FAST-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7415 ; AVX2-FAST-NEXT: vpunpckhdq {{.*#+}} xmm0 = xmm0[2],xmm11[2],xmm0[3],xmm11[3]
7416 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
7417 ; AVX2-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
7418 ; AVX2-FAST-NEXT: vinserti128 $1, %xmm0, %ymm2, %ymm0
7419 ; AVX2-FAST-NEXT: vmovdqa 192(%r8), %ymm2
7420 ; AVX2-FAST-NEXT: vpermd %ymm2, %ymm1, %ymm3
7421 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm3[0,1],ymm0[2,3,4,5],ymm3[6,7]
7422 ; AVX2-FAST-NEXT: vmovdqa 192(%r9), %ymm3
7423 ; AVX2-FAST-NEXT: vpermd %ymm3, %ymm1, %ymm10
7424 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm10 = ymm0[0],ymm10[1],ymm0[2,3,4,5,6],ymm10[7]
7425 ; AVX2-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
7426 ; AVX2-FAST-NEXT: vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
7427 ; AVX2-FAST-NEXT: # ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5]
7428 ; AVX2-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
7429 ; AVX2-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
7430 ; AVX2-FAST-NEXT: vpunpckldq {{.*#+}} ymm15 = ymm13[0],ymm11[0],ymm13[1],ymm11[1],ymm13[4],ymm11[4],ymm13[5],ymm11[5]
7431 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm15 = ymm15[2,2,2,2]
7432 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,1,2,3]
7433 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm15[2,3],ymm0[4,5,6,7]
7434 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5],ymm0[6,7]
7435 ; AVX2-FAST-NEXT: vpbroadcastd 208(%r9), %ymm15
7436 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm4 = ymm0[0,1,2,3,4],ymm15[5],ymm0[6,7]
7437 ; AVX2-FAST-NEXT: vpunpckhdq {{.*#+}} ymm15 = ymm13[2],ymm11[2],ymm13[3],ymm11[3],ymm13[6],ymm11[6],ymm13[7],ymm11[7]
7438 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} ymm15 = ymm15[2,3,2,3,6,7,6,7]
7439 ; AVX2-FAST-NEXT: vperm2i128 $19, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm15 # 32-byte Folded Reload
7440 ; AVX2-FAST-NEXT: # ymm15 = mem[2,3],ymm15[2,3]
7441 ; AVX2-FAST-NEXT: vpermd %ymm2, %ymm6, %ymm2
7442 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1],ymm15[2,3,4,5],ymm2[6,7]
7443 ; AVX2-FAST-NEXT: vpermd %ymm3, %ymm5, %ymm3
7444 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0],ymm3[1],ymm2[2,3,4,5,6],ymm3[7]
7445 ; AVX2-FAST-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
7446 ; AVX2-FAST-NEXT: vpunpckhdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm3 # 16-byte Folded Reload
7447 ; AVX2-FAST-NEXT: # xmm3 = xmm3[2],mem[2],xmm3[3],mem[3]
7448 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[2,3,2,3]
7449 ; AVX2-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
7450 ; AVX2-FAST-NEXT: vinserti128 $1, %xmm3, %ymm11, %ymm3
7451 ; AVX2-FAST-NEXT: vmovdqa 224(%r8), %ymm15
7452 ; AVX2-FAST-NEXT: vpermd %ymm15, %ymm1, %ymm13
7453 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm3 = ymm13[0,1],ymm3[2,3,4,5],ymm13[6,7]
7454 ; AVX2-FAST-NEXT: vmovdqa 224(%r9), %ymm13
7455 ; AVX2-FAST-NEXT: vpermd %ymm13, %ymm1, %ymm1
7456 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm1 = ymm3[0],ymm1[1],ymm3[2,3,4,5,6],ymm1[7]
7457 ; AVX2-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
7458 ; AVX2-FAST-NEXT: vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm3 # 32-byte Folded Reload
7459 ; AVX2-FAST-NEXT: # ymm3 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5]
7460 ; AVX2-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
7461 ; AVX2-FAST-NEXT: vpunpckldq {{.*#+}} ymm11 = ymm0[0],ymm8[0],ymm0[1],ymm8[1],ymm0[4],ymm8[4],ymm0[5],ymm8[5]
7462 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,2,2,2]
7463 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,1,2,3]
7464 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1],ymm11[2,3],ymm3[4,5,6,7]
7465 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm15[4,5],ymm3[6,7]
7466 ; AVX2-FAST-NEXT: vpbroadcastd 240(%r9), %ymm11
7467 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3,4],ymm11[5],ymm3[6,7]
7468 ; AVX2-FAST-NEXT: vpunpckhdq {{.*#+}} ymm8 = ymm0[2],ymm8[2],ymm0[3],ymm8[3],ymm0[6],ymm8[6],ymm0[7],ymm8[7]
7469 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} ymm8 = ymm8[2,3,2,3,6,7,6,7]
7470 ; AVX2-FAST-NEXT: vperm2i128 $19, (%rsp), %ymm8, %ymm8 # 32-byte Folded Reload
7471 ; AVX2-FAST-NEXT: # ymm8 = mem[2,3],ymm8[2,3]
7472 ; AVX2-FAST-NEXT: vpermd %ymm15, %ymm6, %ymm6
7473 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm6 = ymm6[0,1],ymm8[2,3,4,5],ymm6[6,7]
7474 ; AVX2-FAST-NEXT: vpermd %ymm13, %ymm5, %ymm5
7475 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm5 = ymm6[0],ymm5[1],ymm6[2,3,4,5,6],ymm5[7]
7476 ; AVX2-FAST-NEXT: movq {{[0-9]+}}(%rsp), %rax
7477 ; AVX2-FAST-NEXT: vmovdqa %ymm5, 1504(%rax)
7478 ; AVX2-FAST-NEXT: vmovdqa %ymm3, 1440(%rax)
7479 ; AVX2-FAST-NEXT: vmovdqa %ymm1, 1408(%rax)
7480 ; AVX2-FAST-NEXT: vmovdqa %ymm2, 1312(%rax)
7481 ; AVX2-FAST-NEXT: vmovdqa %ymm4, 1248(%rax)
7482 ; AVX2-FAST-NEXT: vmovdqa %ymm10, 1216(%rax)
7483 ; AVX2-FAST-NEXT: vmovdqa %ymm7, 1120(%rax)
7484 ; AVX2-FAST-NEXT: vmovdqa %ymm9, 1056(%rax)
7485 ; AVX2-FAST-NEXT: vmovdqa %ymm12, 1024(%rax)
7486 ; AVX2-FAST-NEXT: vmovdqa %ymm14, 928(%rax)
7487 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
7488 ; AVX2-FAST-NEXT: vmovaps %ymm0, 864(%rax)
7489 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
7490 ; AVX2-FAST-NEXT: vmovaps %ymm0, 832(%rax)
7491 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
7492 ; AVX2-FAST-NEXT: vmovaps %ymm0, 736(%rax)
7493 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
7494 ; AVX2-FAST-NEXT: vmovaps %ymm0, 672(%rax)
7495 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
7496 ; AVX2-FAST-NEXT: vmovaps %ymm0, 640(%rax)
7497 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
7498 ; AVX2-FAST-NEXT: vmovaps %ymm0, 544(%rax)
7499 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
7500 ; AVX2-FAST-NEXT: vmovaps %ymm0, 480(%rax)
7501 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
7502 ; AVX2-FAST-NEXT: vmovaps %ymm0, 448(%rax)
7503 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
7504 ; AVX2-FAST-NEXT: vmovaps %ymm0, 352(%rax)
7505 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
7506 ; AVX2-FAST-NEXT: vmovaps %ymm0, 288(%rax)
7507 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
7508 ; AVX2-FAST-NEXT: vmovaps %ymm0, 256(%rax)
7509 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
7510 ; AVX2-FAST-NEXT: vmovaps %ymm0, 160(%rax)
7511 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
7512 ; AVX2-FAST-NEXT: vmovaps %ymm0, 96(%rax)
7513 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
7514 ; AVX2-FAST-NEXT: vmovaps %ymm0, 64(%rax)
7515 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
7516 ; AVX2-FAST-NEXT: vmovaps %ymm0, 1472(%rax)
7517 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
7518 ; AVX2-FAST-NEXT: vmovaps %ymm0, 1344(%rax)
7519 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
7520 ; AVX2-FAST-NEXT: vmovaps %ymm0, 1280(%rax)
7521 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
7522 ; AVX2-FAST-NEXT: vmovaps %ymm0, 1152(%rax)
7523 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
7524 ; AVX2-FAST-NEXT: vmovaps %ymm0, 1088(%rax)
7525 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
7526 ; AVX2-FAST-NEXT: vmovaps %ymm0, 960(%rax)
7527 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
7528 ; AVX2-FAST-NEXT: vmovaps %ymm0, 896(%rax)
7529 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
7530 ; AVX2-FAST-NEXT: vmovaps %ymm0, 768(%rax)
7531 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
7532 ; AVX2-FAST-NEXT: vmovaps %ymm0, 704(%rax)
7533 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
7534 ; AVX2-FAST-NEXT: vmovaps %ymm0, 576(%rax)
7535 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
7536 ; AVX2-FAST-NEXT: vmovaps %ymm0, 512(%rax)
7537 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
7538 ; AVX2-FAST-NEXT: vmovaps %ymm0, 384(%rax)
7539 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
7540 ; AVX2-FAST-NEXT: vmovaps %ymm0, 320(%rax)
7541 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
7542 ; AVX2-FAST-NEXT: vmovaps %ymm0, 192(%rax)
7543 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
7544 ; AVX2-FAST-NEXT: vmovaps %ymm0, 128(%rax)
7545 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
7546 ; AVX2-FAST-NEXT: vmovaps %ymm0, (%rax)
7547 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
7548 ; AVX2-FAST-NEXT: vmovaps %ymm0, 1376(%rax)
7549 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
7550 ; AVX2-FAST-NEXT: vmovaps %ymm0, 1184(%rax)
7551 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
7552 ; AVX2-FAST-NEXT: vmovaps %ymm0, 992(%rax)
7553 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
7554 ; AVX2-FAST-NEXT: vmovaps %ymm0, 800(%rax)
7555 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
7556 ; AVX2-FAST-NEXT: vmovaps %ymm0, 608(%rax)
7557 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
7558 ; AVX2-FAST-NEXT: vmovaps %ymm0, 416(%rax)
7559 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
7560 ; AVX2-FAST-NEXT: vmovaps %ymm0, 224(%rax)
7561 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
7562 ; AVX2-FAST-NEXT: vmovaps %ymm0, 32(%rax)
7563 ; AVX2-FAST-NEXT: addq $2376, %rsp # imm = 0x948
7564 ; AVX2-FAST-NEXT: vzeroupper
7565 ; AVX2-FAST-NEXT: retq
7567 ; AVX2-FAST-PERLANE-LABEL: store_i32_stride6_vf64:
7568 ; AVX2-FAST-PERLANE: # %bb.0:
7569 ; AVX2-FAST-PERLANE-NEXT: subq $2504, %rsp # imm = 0x9C8
7570 ; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rsi), %xmm0
7571 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7572 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 32(%rsi), %xmm1
7573 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7574 ; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rdi), %xmm3
7575 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 32(%rdi), %xmm10
7576 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7577 ; AVX2-FAST-PERLANE-NEXT: vpunpckhdq {{.*#+}} xmm0 = xmm3[2],xmm0[2],xmm3[3],xmm0[3]
7578 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7579 ; AVX2-FAST-PERLANE-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm4
7580 ; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rcx), %xmm0
7581 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7582 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 32(%rcx), %xmm8
7583 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7584 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 64(%rcx), %xmm7
7585 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7586 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm5 = xmm0[1,2,2,3]
7587 ; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rdx), %xmm0
7588 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7589 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 32(%rdx), %xmm9
7590 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7591 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm6 = xmm0[1,2,2,3]
7592 ; AVX2-FAST-PERLANE-NEXT: vpunpckldq {{.*#+}} xmm5 = xmm6[0],xmm5[0],xmm6[1],xmm5[1]
7593 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm5 = ymm5[0,1,2,1]
7594 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm4 = ymm5[0,1,2,3],ymm4[4,5],ymm5[6,7]
7595 ; AVX2-FAST-PERLANE-NEXT: vmovdqa (%r8), %xmm2
7596 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 32(%r8), %xmm6
7597 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7598 ; AVX2-FAST-PERLANE-NEXT: vpmovzxdq {{.*#+}} xmm5 = xmm2[0],zero,xmm2[1],zero
7599 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7600 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1],ymm5[2,3],ymm4[4,5,6,7]
7601 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastd 4(%r9), %ymm5
7602 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm4[0,1,2],ymm5[3],ymm4[4,5,6,7]
7603 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7604 ; AVX2-FAST-PERLANE-NEXT: vpunpckhdq {{.*#+}} xmm0 = xmm10[2],xmm1[2],xmm10[3],xmm1[3]
7605 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7606 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm4 = xmm8[1,2,2,3]
7607 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm5 = xmm9[1,2,2,3]
7608 ; AVX2-FAST-PERLANE-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm5[0],xmm4[0],xmm5[1],xmm4[1]
7609 ; AVX2-FAST-PERLANE-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm5
7610 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,1,2,1]
7611 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm5[4,5],ymm4[6,7]
7612 ; AVX2-FAST-PERLANE-NEXT: vpmovzxdq {{.*#+}} xmm5 = xmm6[0],zero,xmm6[1],zero
7613 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1],ymm5[2,3],ymm4[4,5,6,7]
7614 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastd 36(%r9), %ymm5
7615 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm4[0,1,2],ymm5[3],ymm4[4,5,6,7]
7616 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7617 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 64(%rdx), %xmm0
7618 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7619 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm4 = xmm7[1,2,2,3]
7620 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm5 = xmm0[1,2,2,3]
7621 ; AVX2-FAST-PERLANE-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm5[0],xmm4[0],xmm5[1],xmm4[1]
7622 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm6 = ymm4[0,1,2,1]
7623 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 64(%rsi), %xmm4
7624 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 64(%rdi), %xmm5
7625 ; AVX2-FAST-PERLANE-NEXT: vpunpckhdq {{.*#+}} xmm0 = xmm5[2],xmm4[2],xmm5[3],xmm4[3]
7626 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7627 ; AVX2-FAST-PERLANE-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm7
7628 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm7[4,5],ymm6[6,7]
7629 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 64(%r8), %xmm0
7630 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7631 ; AVX2-FAST-PERLANE-NEXT: vpmovzxdq {{.*#+}} xmm7 = xmm0[0],zero,xmm0[1],zero
7632 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm6 = ymm6[0,1],ymm7[2,3],ymm6[4,5,6,7]
7633 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastd 68(%r9), %ymm7
7634 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm6[0,1,2],ymm7[3],ymm6[4,5,6,7]
7635 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7636 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 96(%rcx), %xmm0
7637 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7638 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm6 = xmm0[1,2,2,3]
7639 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 96(%rdx), %xmm0
7640 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7641 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm7 = xmm0[1,2,2,3]
7642 ; AVX2-FAST-PERLANE-NEXT: vpunpckldq {{.*#+}} xmm6 = xmm7[0],xmm6[0],xmm7[1],xmm6[1]
7643 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm8 = ymm6[0,1,2,1]
7644 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 96(%rsi), %xmm6
7645 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 96(%rdi), %xmm7
7646 ; AVX2-FAST-PERLANE-NEXT: vpunpckhdq {{.*#+}} xmm0 = xmm7[2],xmm6[2],xmm7[3],xmm6[3]
7647 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7648 ; AVX2-FAST-PERLANE-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm9
7649 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm9[4,5],ymm8[6,7]
7650 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 96(%r8), %xmm0
7651 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7652 ; AVX2-FAST-PERLANE-NEXT: vpmovzxdq {{.*#+}} xmm9 = xmm0[0],zero,xmm0[1],zero
7653 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm8 = ymm8[0,1],ymm9[2,3],ymm8[4,5,6,7]
7654 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastd 100(%r9), %ymm9
7655 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm8[0,1,2],ymm9[3],ymm8[4,5,6,7]
7656 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7657 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 128(%rcx), %xmm0
7658 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7659 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm8 = xmm0[1,2,2,3]
7660 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 128(%rdx), %xmm0
7661 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7662 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm9 = xmm0[1,2,2,3]
7663 ; AVX2-FAST-PERLANE-NEXT: vpunpckldq {{.*#+}} xmm8 = xmm9[0],xmm8[0],xmm9[1],xmm8[1]
7664 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm10 = ymm8[0,1,2,1]
7665 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 128(%rsi), %xmm8
7666 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 128(%rdi), %xmm9
7667 ; AVX2-FAST-PERLANE-NEXT: vpunpckhdq {{.*#+}} xmm0 = xmm9[2],xmm8[2],xmm9[3],xmm8[3]
7668 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7669 ; AVX2-FAST-PERLANE-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm11
7670 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm10 = ymm10[0,1,2,3],ymm11[4,5],ymm10[6,7]
7671 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 128(%r8), %xmm0
7672 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7673 ; AVX2-FAST-PERLANE-NEXT: vpmovzxdq {{.*#+}} xmm11 = xmm0[0],zero,xmm0[1],zero
7674 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm10 = ymm10[0,1],ymm11[2,3],ymm10[4,5,6,7]
7675 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastd 132(%r9), %ymm11
7676 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm10[0,1,2],ymm11[3],ymm10[4,5,6,7]
7677 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7678 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 160(%rcx), %xmm0
7679 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7680 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm10 = xmm0[1,2,2,3]
7681 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 160(%rdx), %xmm0
7682 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7683 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm11 = xmm0[1,2,2,3]
7684 ; AVX2-FAST-PERLANE-NEXT: vpunpckldq {{.*#+}} xmm10 = xmm11[0],xmm10[0],xmm11[1],xmm10[1]
7685 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm12 = ymm10[0,1,2,1]
7686 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 160(%rsi), %xmm10
7687 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 160(%rdi), %xmm11
7688 ; AVX2-FAST-PERLANE-NEXT: vpunpckhdq {{.*#+}} xmm0 = xmm11[2],xmm10[2],xmm11[3],xmm10[3]
7689 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7690 ; AVX2-FAST-PERLANE-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm13
7691 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm12 = ymm12[0,1,2,3],ymm13[4,5],ymm12[6,7]
7692 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 160(%r8), %xmm0
7693 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7694 ; AVX2-FAST-PERLANE-NEXT: vpmovzxdq {{.*#+}} xmm13 = xmm0[0],zero,xmm0[1],zero
7695 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm12 = ymm12[0,1],ymm13[2,3],ymm12[4,5,6,7]
7696 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastd 164(%r9), %ymm13
7697 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm12[0,1,2],ymm13[3],ymm12[4,5,6,7]
7698 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7699 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 192(%rcx), %xmm0
7700 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7701 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm12 = xmm0[1,2,2,3]
7702 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 192(%rdx), %xmm0
7703 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7704 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm13 = xmm0[1,2,2,3]
7705 ; AVX2-FAST-PERLANE-NEXT: vpunpckldq {{.*#+}} xmm12 = xmm13[0],xmm12[0],xmm13[1],xmm12[1]
7706 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm14 = ymm12[0,1,2,1]
7707 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 192(%rsi), %xmm12
7708 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 192(%rdi), %xmm13
7709 ; AVX2-FAST-PERLANE-NEXT: vpunpckhdq {{.*#+}} xmm0 = xmm13[2],xmm12[2],xmm13[3],xmm12[3]
7710 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7711 ; AVX2-FAST-PERLANE-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm15
7712 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm14 = ymm14[0,1,2,3],ymm15[4,5],ymm14[6,7]
7713 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 192(%r8), %xmm0
7714 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7715 ; AVX2-FAST-PERLANE-NEXT: vpmovzxdq {{.*#+}} xmm15 = xmm0[0],zero,xmm0[1],zero
7716 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm14 = ymm14[0,1],ymm15[2,3],ymm14[4,5,6,7]
7717 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastd 196(%r9), %ymm15
7718 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm14[0,1,2],ymm15[3],ymm14[4,5,6,7]
7719 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7720 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 224(%rcx), %xmm0
7721 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7722 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm14 = xmm0[1,2,2,3]
7723 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 224(%rdx), %xmm0
7724 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7725 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm15 = xmm0[1,2,2,3]
7726 ; AVX2-FAST-PERLANE-NEXT: vpunpckldq {{.*#+}} xmm14 = xmm15[0],xmm14[0],xmm15[1],xmm14[1]
7727 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm1 = ymm14[0,1,2,1]
7728 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 224(%rsi), %xmm14
7729 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7730 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 224(%rdi), %xmm0
7731 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7732 ; AVX2-FAST-PERLANE-NEXT: vpunpckhdq {{.*#+}} xmm14 = xmm0[2],xmm14[2],xmm0[3],xmm14[3]
7733 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7734 ; AVX2-FAST-PERLANE-NEXT: vinserti128 $1, %xmm14, %ymm0, %ymm14
7735 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm14[4,5],ymm1[6,7]
7736 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 224(%r8), %xmm14
7737 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %xmm14, (%rsp) # 16-byte Spill
7738 ; AVX2-FAST-PERLANE-NEXT: vpmovzxdq {{.*#+}} xmm14 = xmm14[0],zero,xmm14[1],zero
7739 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm14[2,3],ymm0[4,5,6,7]
7740 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastd 228(%r9), %ymm14
7741 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm14[3],ymm0[4,5,6,7]
7742 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7743 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastd (%rcx), %xmm0
7744 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastd (%rdx), %xmm14
7745 ; AVX2-FAST-PERLANE-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm14[0],xmm0[0],xmm14[1],xmm0[1]
7746 ; AVX2-FAST-PERLANE-NEXT: vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm1 # 16-byte Folded Reload
7747 ; AVX2-FAST-PERLANE-NEXT: # xmm1 = xmm3[0],mem[0],xmm3[1],mem[1]
7748 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,1,2,1]
7749 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5,6,7]
7750 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastq %xmm2, %ymm1
7751 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
7752 ; AVX2-FAST-PERLANE-NEXT: vmovdqa (%r9), %xmm1
7753 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7754 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastd %xmm1, %ymm1
7755 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
7756 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7757 ; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rdx), %ymm0
7758 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7759 ; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rcx), %ymm1
7760 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7761 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm2 = ymm1[0,1,2,2,4,5,6,6]
7762 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm14 = ymm0[1,1,2,3,5,5,6,7]
7763 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm2 = ymm14[0],ymm2[1],ymm14[2],ymm2[3],ymm14[4],ymm2[5],ymm14[6],ymm2[7]
7764 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,1,2,3]
7765 ; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rdi), %ymm0
7766 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7767 ; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rsi), %ymm1
7768 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7769 ; AVX2-FAST-PERLANE-NEXT: vpunpckhdq {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
7770 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7771 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm0[4,5],ymm2[6,7]
7772 ; AVX2-FAST-PERLANE-NEXT: vpmovzxdq {{.*#+}} xmm14 = mem[0],zero,mem[1],zero
7773 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1],ymm14[2,3],ymm2[4,5,6,7]
7774 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastd 20(%r9), %ymm14
7775 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2],ymm14[3],ymm2[4,5,6,7]
7776 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7777 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss 32(%rcx), %xmm2
7778 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss 32(%rdx), %xmm14
7779 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} xmm2 = xmm14[0],xmm2[0],xmm14[1],xmm2[1]
7780 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7781 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm3 # 16-byte Folded Reload
7782 ; AVX2-FAST-PERLANE-NEXT: # xmm3 = xmm0[0],mem[0],xmm0[1],mem[1]
7783 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[0,1,2,1]
7784 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3],ymm3[4,5,6,7]
7785 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 16-byte Folded Reload
7786 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5],ymm2[6,7]
7787 ; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%r9), %xmm0
7788 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7789 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss %xmm0, %ymm3
7790 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3,4],ymm3[5],ymm2[6,7]
7791 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7792 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 32(%rdx), %ymm0
7793 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7794 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 32(%rcx), %ymm3
7795 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm14 = ymm3[0,1,2,2,4,5,6,6]
7796 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm15 = ymm0[1,1,2,3,5,5,6,7]
7797 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm14 = ymm15[0],ymm14[1],ymm15[2],ymm14[3],ymm15[4],ymm14[5],ymm15[6],ymm14[7]
7798 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm14 = ymm14[2,1,2,3]
7799 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 32(%rdi), %ymm0
7800 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7801 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 32(%rsi), %ymm1
7802 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7803 ; AVX2-FAST-PERLANE-NEXT: vpunpckhdq {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
7804 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7805 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm14 = ymm14[0,1,2,3],ymm0[4,5],ymm14[6,7]
7806 ; AVX2-FAST-PERLANE-NEXT: vpmovzxdq {{.*#+}} xmm15 = mem[0],zero,mem[1],zero
7807 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm14 = ymm14[0,1],ymm15[2,3],ymm14[4,5,6,7]
7808 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastd 52(%r9), %ymm15
7809 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm14[0,1,2],ymm15[3],ymm14[4,5,6,7]
7810 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7811 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastd 64(%rcx), %xmm14
7812 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastd 64(%rdx), %xmm15
7813 ; AVX2-FAST-PERLANE-NEXT: vpunpckldq {{.*#+}} xmm14 = xmm15[0],xmm14[0],xmm15[1],xmm14[1]
7814 ; AVX2-FAST-PERLANE-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm5[0],xmm4[0],xmm5[1],xmm4[1]
7815 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,1,2,1]
7816 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1],ymm14[2,3],ymm4[4,5,6,7]
7817 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastq {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 16-byte Folded Reload
7818 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm5[4,5],ymm4[6,7]
7819 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 64(%r9), %xmm0
7820 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7821 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastd %xmm0, %ymm5
7822 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm4[0,1,2,3,4],ymm5[5],ymm4[6,7]
7823 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7824 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 64(%rdx), %ymm5
7825 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 64(%rcx), %ymm4
7826 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm14 = ymm4[0,1,2,2,4,5,6,6]
7827 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm15 = ymm5[1,1,2,3,5,5,6,7]
7828 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm14 = ymm15[0],ymm14[1],ymm15[2],ymm14[3],ymm15[4],ymm14[5],ymm15[6],ymm14[7]
7829 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm14 = ymm14[2,1,2,3]
7830 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 64(%rdi), %ymm0
7831 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7832 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 64(%rsi), %ymm1
7833 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7834 ; AVX2-FAST-PERLANE-NEXT: vpunpckhdq {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
7835 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7836 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm14 = ymm14[0,1,2,3],ymm0[4,5],ymm14[6,7]
7837 ; AVX2-FAST-PERLANE-NEXT: vpmovzxdq {{.*#+}} xmm15 = mem[0],zero,mem[1],zero
7838 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm14 = ymm14[0,1],ymm15[2,3],ymm14[4,5,6,7]
7839 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastd 84(%r9), %ymm15
7840 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm14[0,1,2],ymm15[3],ymm14[4,5,6,7]
7841 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7842 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastd {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Folded Reload
7843 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastd {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Folded Reload
7844 ; AVX2-FAST-PERLANE-NEXT: vpunpckldq {{.*#+}} xmm14 = xmm15[0],xmm14[0],xmm15[1],xmm14[1]
7845 ; AVX2-FAST-PERLANE-NEXT: vpunpckldq {{.*#+}} xmm6 = xmm7[0],xmm6[0],xmm7[1],xmm6[1]
7846 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm6 = ymm6[0,1,2,1]
7847 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm6 = ymm6[0,1],ymm14[2,3],ymm6[4,5,6,7]
7848 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastq {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 16-byte Folded Reload
7849 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm7[4,5],ymm6[6,7]
7850 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 96(%r9), %xmm0
7851 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7852 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastd %xmm0, %ymm7
7853 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm6[0,1,2,3,4],ymm7[5],ymm6[6,7]
7854 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7855 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 96(%rdx), %ymm7
7856 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 96(%rcx), %ymm6
7857 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm14 = ymm6[0,1,2,2,4,5,6,6]
7858 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm15 = ymm7[1,1,2,3,5,5,6,7]
7859 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm14 = ymm15[0],ymm14[1],ymm15[2],ymm14[3],ymm15[4],ymm14[5],ymm15[6],ymm14[7]
7860 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm14 = ymm14[2,1,2,3]
7861 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 96(%rdi), %ymm0
7862 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7863 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 96(%rsi), %ymm1
7864 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7865 ; AVX2-FAST-PERLANE-NEXT: vpunpckhdq {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
7866 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7867 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm14 = ymm14[0,1,2,3],ymm0[4,5],ymm14[6,7]
7868 ; AVX2-FAST-PERLANE-NEXT: vpmovzxdq {{.*#+}} xmm15 = mem[0],zero,mem[1],zero
7869 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm14 = ymm14[0,1],ymm15[2,3],ymm14[4,5,6,7]
7870 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastd 116(%r9), %ymm15
7871 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm14[0,1,2],ymm15[3],ymm14[4,5,6,7]
7872 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7873 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastd 128(%rcx), %xmm14
7874 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastd 128(%rdx), %xmm15
7875 ; AVX2-FAST-PERLANE-NEXT: vpunpckldq {{.*#+}} xmm14 = xmm15[0],xmm14[0],xmm15[1],xmm14[1]
7876 ; AVX2-FAST-PERLANE-NEXT: vpunpckldq {{.*#+}} xmm8 = xmm9[0],xmm8[0],xmm9[1],xmm8[1]
7877 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm8 = ymm8[0,1,2,1]
7878 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm8 = ymm8[0,1],ymm14[2,3],ymm8[4,5,6,7]
7879 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastq {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 16-byte Folded Reload
7880 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm9[4,5],ymm8[6,7]
7881 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 128(%r9), %xmm0
7882 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7883 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastd %xmm0, %ymm9
7884 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm8[0,1,2,3,4],ymm9[5],ymm8[6,7]
7885 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7886 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 128(%rdx), %ymm9
7887 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 128(%rcx), %ymm8
7888 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm14 = ymm8[0,1,2,2,4,5,6,6]
7889 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm15 = ymm9[1,1,2,3,5,5,6,7]
7890 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm14 = ymm15[0],ymm14[1],ymm15[2],ymm14[3],ymm15[4],ymm14[5],ymm15[6],ymm14[7]
7891 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm14 = ymm14[2,1,2,3]
7892 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 128(%rdi), %ymm0
7893 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7894 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 128(%rsi), %ymm1
7895 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7896 ; AVX2-FAST-PERLANE-NEXT: vpunpckhdq {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
7897 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7898 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm14 = ymm14[0,1,2,3],ymm0[4,5],ymm14[6,7]
7899 ; AVX2-FAST-PERLANE-NEXT: vpmovzxdq {{.*#+}} xmm15 = mem[0],zero,mem[1],zero
7900 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm14 = ymm14[0,1],ymm15[2,3],ymm14[4,5,6,7]
7901 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastd 148(%r9), %ymm15
7902 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm14[0,1,2],ymm15[3],ymm14[4,5,6,7]
7903 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7904 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastd {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Folded Reload
7905 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastd {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Folded Reload
7906 ; AVX2-FAST-PERLANE-NEXT: vpunpckldq {{.*#+}} xmm14 = xmm15[0],xmm14[0],xmm15[1],xmm14[1]
7907 ; AVX2-FAST-PERLANE-NEXT: vpunpckldq {{.*#+}} xmm10 = xmm11[0],xmm10[0],xmm11[1],xmm10[1]
7908 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm10 = ymm10[0,1,2,1]
7909 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm10 = ymm10[0,1],ymm14[2,3],ymm10[4,5,6,7]
7910 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastq {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 16-byte Folded Reload
7911 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm10 = ymm10[0,1,2,3],ymm11[4,5],ymm10[6,7]
7912 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 160(%r9), %xmm0
7913 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7914 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastd %xmm0, %ymm11
7915 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm10[0,1,2,3,4],ymm11[5],ymm10[6,7]
7916 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7917 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 160(%rdx), %ymm11
7918 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 160(%rcx), %ymm10
7919 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm14 = ymm10[0,1,2,2,4,5,6,6]
7920 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm15 = ymm11[1,1,2,3,5,5,6,7]
7921 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm14 = ymm15[0],ymm14[1],ymm15[2],ymm14[3],ymm15[4],ymm14[5],ymm15[6],ymm14[7]
7922 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm14 = ymm14[2,1,2,3]
7923 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 160(%rdi), %ymm0
7924 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7925 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 160(%rsi), %ymm1
7926 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7927 ; AVX2-FAST-PERLANE-NEXT: vpunpckhdq {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
7928 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7929 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm14 = ymm14[0,1,2,3],ymm0[4,5],ymm14[6,7]
7930 ; AVX2-FAST-PERLANE-NEXT: vpmovzxdq {{.*#+}} xmm15 = mem[0],zero,mem[1],zero
7931 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm14 = ymm14[0,1],ymm15[2,3],ymm14[4,5,6,7]
7932 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastd 180(%r9), %ymm15
7933 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm14[0,1,2],ymm15[3],ymm14[4,5,6,7]
7934 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7935 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastd {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Folded Reload
7936 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastd {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Folded Reload
7937 ; AVX2-FAST-PERLANE-NEXT: vpunpckldq {{.*#+}} xmm14 = xmm15[0],xmm14[0],xmm15[1],xmm14[1]
7938 ; AVX2-FAST-PERLANE-NEXT: vpunpckldq {{.*#+}} xmm12 = xmm13[0],xmm12[0],xmm13[1],xmm12[1]
7939 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm12 = ymm12[0,1,2,1]
7940 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm12 = ymm12[0,1],ymm14[2,3],ymm12[4,5,6,7]
7941 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastq {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 16-byte Folded Reload
7942 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm12 = ymm12[0,1,2,3],ymm13[4,5],ymm12[6,7]
7943 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 192(%r9), %xmm0
7944 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7945 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastd %xmm0, %ymm13
7946 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm12[0,1,2,3,4],ymm13[5],ymm12[6,7]
7947 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7948 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 192(%rdx), %ymm13
7949 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 192(%rcx), %ymm12
7950 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm14 = ymm12[0,1,2,2,4,5,6,6]
7951 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm15 = ymm13[1,1,2,3,5,5,6,7]
7952 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm14 = ymm15[0],ymm14[1],ymm15[2],ymm14[3],ymm15[4],ymm14[5],ymm15[6],ymm14[7]
7953 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm14 = ymm14[2,1,2,3]
7954 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 192(%rdi), %ymm0
7955 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7956 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 192(%rsi), %ymm1
7957 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7958 ; AVX2-FAST-PERLANE-NEXT: vpunpckhdq {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
7959 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7960 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm14 = ymm14[0,1,2,3],ymm0[4,5],ymm14[6,7]
7961 ; AVX2-FAST-PERLANE-NEXT: vpmovzxdq {{.*#+}} xmm15 = mem[0],zero,mem[1],zero
7962 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm14 = ymm14[0,1],ymm15[2,3],ymm14[4,5,6,7]
7963 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastd 212(%r9), %ymm15
7964 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm14[0,1,2],ymm15[3],ymm14[4,5,6,7]
7965 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7966 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss 224(%rcx), %xmm14
7967 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss 224(%rdx), %xmm15
7968 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} xmm14 = xmm15[0],xmm14[0],xmm15[1],xmm14[1]
7969 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7970 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm15 # 16-byte Folded Reload
7971 ; AVX2-FAST-PERLANE-NEXT: # xmm15 = xmm0[0],mem[0],xmm0[1],mem[1]
7972 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm15 = ymm15[0,1,2,1]
7973 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm14 = ymm15[0,1],ymm14[2,3],ymm15[4,5,6,7]
7974 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd (%rsp), %ymm15 # 16-byte Folded Reload
7975 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm14 = ymm14[0,1,2,3],ymm15[4,5],ymm14[6,7]
7976 ; AVX2-FAST-PERLANE-NEXT: vmovaps 224(%r9), %xmm0
7977 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7978 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss %xmm0, %ymm15
7979 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm14[0,1,2,3,4],ymm15[5],ymm14[6,7]
7980 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7981 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 224(%rdx), %ymm14
7982 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7983 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 224(%rcx), %ymm0
7984 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7985 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,1,2,2,4,5,6,6]
7986 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm15 = ymm14[1,1,2,3,5,5,6,7]
7987 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm15[0],ymm0[1],ymm15[2],ymm0[3],ymm15[4],ymm0[5],ymm15[6],ymm0[7]
7988 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm1 = ymm0[2,1,2,3]
7989 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 224(%rdi), %ymm14
7990 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7991 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 224(%rsi), %ymm0
7992 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7993 ; AVX2-FAST-PERLANE-NEXT: vpunpckhdq {{.*#+}} ymm14 = ymm14[2],ymm0[2],ymm14[3],ymm0[3],ymm14[6],ymm0[6],ymm14[7],ymm0[7]
7994 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7995 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm14[4,5],ymm1[6,7]
7996 ; AVX2-FAST-PERLANE-NEXT: vpmovzxdq {{.*#+}} xmm15 = mem[0],zero,mem[1],zero
7997 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm15[2,3],ymm0[4,5,6,7]
7998 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastd 244(%r9), %ymm15
7999 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm15[3],ymm0[4,5,6,7]
8000 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8001 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
8002 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
8003 ; AVX2-FAST-PERLANE-NEXT: # xmm0 = xmm0[2],mem[2],xmm0[3],mem[3]
8004 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,3,2,3]
8005 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
8006 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm0, %ymm14, %ymm0
8007 ; AVX2-FAST-PERLANE-NEXT: vpermilps $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Folded Reload
8008 ; AVX2-FAST-PERLANE-NEXT: # xmm15 = mem[2,2,3,3]
8009 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm15 = ymm15[0,1,2,1]
8010 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm15[0,1],ymm0[2,3,4,5],ymm15[6,7]
8011 ; AVX2-FAST-PERLANE-NEXT: vpermilps $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Folded Reload
8012 ; AVX2-FAST-PERLANE-NEXT: # xmm15 = mem[2,2,3,3]
8013 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm15 = ymm15[0,1,2,1]
8014 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm15[1],ymm0[2,3,4,5,6],ymm15[7]
8015 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8016 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8017 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
8018 ; AVX2-FAST-PERLANE-NEXT: # ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5]
8019 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
8020 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8021 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} ymm15 = ymm1[0],ymm2[0],ymm1[1],ymm2[1],ymm1[4],ymm2[4],ymm1[5],ymm2[5]
8022 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm15 = ymm15[2,2,2,2]
8023 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,1,2,3]
8024 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm15[2,3],ymm0[4,5,6,7]
8025 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%r8), %ymm15
8026 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm15[4,5],ymm0[6,7]
8027 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss 16(%r9), %ymm14
8028 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm14[5],ymm0[6,7]
8029 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8030 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm0 = ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[6],ymm2[6],ymm1[7],ymm2[7]
8031 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,3,2,3,6,7,6,7]
8032 ; AVX2-FAST-PERLANE-NEXT: vperm2f128 $19, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
8033 ; AVX2-FAST-PERLANE-NEXT: # ymm0 = mem[2,3],ymm0[2,3]
8034 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm1 = ymm15[2,1,3,3,6,5,7,7]
8035 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
8036 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
8037 ; AVX2-FAST-PERLANE-NEXT: vpermilps {{.*#+}} ymm1 = mem[0,2,2,3,4,6,6,7]
8038 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
8039 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4,5,6],ymm1[7]
8040 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8041 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
8042 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
8043 ; AVX2-FAST-PERLANE-NEXT: # xmm0 = xmm0[2],mem[2],xmm0[3],mem[3]
8044 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,3,2,3]
8045 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8046 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
8047 ; AVX2-FAST-PERLANE-NEXT: vpermilps $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
8048 ; AVX2-FAST-PERLANE-NEXT: # xmm1 = mem[2,2,3,3]
8049 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1]
8050 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
8051 ; AVX2-FAST-PERLANE-NEXT: vpermilps $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
8052 ; AVX2-FAST-PERLANE-NEXT: # xmm1 = mem[2,2,3,3]
8053 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1]
8054 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4,5,6],ymm1[7]
8055 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8056 ; AVX2-FAST-PERLANE-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8057 ; AVX2-FAST-PERLANE-NEXT: vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
8058 ; AVX2-FAST-PERLANE-NEXT: # ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5]
8059 ; AVX2-FAST-PERLANE-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
8060 ; AVX2-FAST-PERLANE-NEXT: vpunpckldq {{.*#+}} ymm1 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[4],ymm3[4],ymm2[5],ymm3[5]
8061 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,2,2,2]
8062 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,1,2,3]
8063 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
8064 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 32(%r8), %ymm1
8065 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
8066 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastd 48(%r9), %ymm14
8067 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm14[5],ymm0[6,7]
8068 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8069 ; AVX2-FAST-PERLANE-NEXT: vpunpckhdq {{.*#+}} ymm0 = ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[6],ymm3[6],ymm2[7],ymm3[7]
8070 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[2,3,2,3,6,7,6,7]
8071 ; AVX2-FAST-PERLANE-NEXT: vperm2i128 $19, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
8072 ; AVX2-FAST-PERLANE-NEXT: # ymm0 = mem[2,3],ymm0[2,3]
8073 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[2,1,3,3,6,5,7,7]
8074 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,1,2,3]
8075 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
8076 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm1 = mem[0,2,2,3,4,6,6,7]
8077 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,1,2,3]
8078 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4,5,6],ymm1[7]
8079 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8080 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
8081 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
8082 ; AVX2-FAST-PERLANE-NEXT: # xmm0 = xmm0[2],mem[2],xmm0[3],mem[3]
8083 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,3,2,3]
8084 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8085 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
8086 ; AVX2-FAST-PERLANE-NEXT: vpermilps $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
8087 ; AVX2-FAST-PERLANE-NEXT: # xmm1 = mem[2,2,3,3]
8088 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1]
8089 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
8090 ; AVX2-FAST-PERLANE-NEXT: vpermilps $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
8091 ; AVX2-FAST-PERLANE-NEXT: # xmm1 = mem[2,2,3,3]
8092 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1]
8093 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4,5,6],ymm1[7]
8094 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8095 ; AVX2-FAST-PERLANE-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8096 ; AVX2-FAST-PERLANE-NEXT: vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
8097 ; AVX2-FAST-PERLANE-NEXT: # ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5]
8098 ; AVX2-FAST-PERLANE-NEXT: vpunpckldq {{.*#+}} ymm1 = ymm5[0],ymm4[0],ymm5[1],ymm4[1],ymm5[4],ymm4[4],ymm5[5],ymm4[5]
8099 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,2,2,2]
8100 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,1,2,3]
8101 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
8102 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 64(%r8), %ymm1
8103 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
8104 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastd 80(%r9), %ymm2
8105 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm2[5],ymm0[6,7]
8106 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8107 ; AVX2-FAST-PERLANE-NEXT: vpunpckhdq {{.*#+}} ymm0 = ymm5[2],ymm4[2],ymm5[3],ymm4[3],ymm5[6],ymm4[6],ymm5[7],ymm4[7]
8108 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[2,3,2,3,6,7,6,7]
8109 ; AVX2-FAST-PERLANE-NEXT: vperm2i128 $19, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
8110 ; AVX2-FAST-PERLANE-NEXT: # ymm0 = mem[2,3],ymm0[2,3]
8111 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[2,1,3,3,6,5,7,7]
8112 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,1,2,3]
8113 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
8114 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm1 = mem[0,2,2,3,4,6,6,7]
8115 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,1,2,3]
8116 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4,5,6],ymm1[7]
8117 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8118 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
8119 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
8120 ; AVX2-FAST-PERLANE-NEXT: # xmm0 = xmm0[2],mem[2],xmm0[3],mem[3]
8121 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,3,2,3]
8122 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8123 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
8124 ; AVX2-FAST-PERLANE-NEXT: vpermilps $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
8125 ; AVX2-FAST-PERLANE-NEXT: # xmm1 = mem[2,2,3,3]
8126 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1]
8127 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
8128 ; AVX2-FAST-PERLANE-NEXT: vpermilps $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
8129 ; AVX2-FAST-PERLANE-NEXT: # xmm1 = mem[2,2,3,3]
8130 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1]
8131 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4,5,6],ymm1[7]
8132 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8133 ; AVX2-FAST-PERLANE-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8134 ; AVX2-FAST-PERLANE-NEXT: vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
8135 ; AVX2-FAST-PERLANE-NEXT: # ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5]
8136 ; AVX2-FAST-PERLANE-NEXT: vpunpckldq {{.*#+}} ymm1 = ymm7[0],ymm6[0],ymm7[1],ymm6[1],ymm7[4],ymm6[4],ymm7[5],ymm6[5]
8137 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,2,2,2]
8138 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,1,2,3]
8139 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
8140 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 96(%r8), %ymm1
8141 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
8142 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastd 112(%r9), %ymm2
8143 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm2[5],ymm0[6,7]
8144 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8145 ; AVX2-FAST-PERLANE-NEXT: vpunpckhdq {{.*#+}} ymm0 = ymm7[2],ymm6[2],ymm7[3],ymm6[3],ymm7[6],ymm6[6],ymm7[7],ymm6[7]
8146 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[2,3,2,3,6,7,6,7]
8147 ; AVX2-FAST-PERLANE-NEXT: vperm2i128 $19, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
8148 ; AVX2-FAST-PERLANE-NEXT: # ymm0 = mem[2,3],ymm0[2,3]
8149 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[2,1,3,3,6,5,7,7]
8150 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,1,2,3]
8151 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
8152 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm1 = mem[0,2,2,3,4,6,6,7]
8153 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,1,2,3]
8154 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm6 = ymm0[0],ymm1[1],ymm0[2,3,4,5,6],ymm1[7]
8155 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
8156 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
8157 ; AVX2-FAST-PERLANE-NEXT: # xmm0 = xmm0[2],mem[2],xmm0[3],mem[3]
8158 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,3,2,3]
8159 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8160 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
8161 ; AVX2-FAST-PERLANE-NEXT: vpermilps $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
8162 ; AVX2-FAST-PERLANE-NEXT: # xmm1 = mem[2,2,3,3]
8163 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1]
8164 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
8165 ; AVX2-FAST-PERLANE-NEXT: vpermilps $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
8166 ; AVX2-FAST-PERLANE-NEXT: # xmm1 = mem[2,2,3,3]
8167 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1]
8168 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm7 = ymm0[0],ymm1[1],ymm0[2,3,4,5,6],ymm1[7]
8169 ; AVX2-FAST-PERLANE-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8170 ; AVX2-FAST-PERLANE-NEXT: vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
8171 ; AVX2-FAST-PERLANE-NEXT: # ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5]
8172 ; AVX2-FAST-PERLANE-NEXT: vpunpckldq {{.*#+}} ymm1 = ymm9[0],ymm8[0],ymm9[1],ymm8[1],ymm9[4],ymm8[4],ymm9[5],ymm8[5]
8173 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,2,2,2]
8174 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,1,2,3]
8175 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
8176 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 128(%r8), %ymm1
8177 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
8178 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastd 144(%r9), %ymm2
8179 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm5 = ymm0[0,1,2,3,4],ymm2[5],ymm0[6,7]
8180 ; AVX2-FAST-PERLANE-NEXT: vpunpckhdq {{.*#+}} ymm0 = ymm9[2],ymm8[2],ymm9[3],ymm8[3],ymm9[6],ymm8[6],ymm9[7],ymm8[7]
8181 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[2,3,2,3,6,7,6,7]
8182 ; AVX2-FAST-PERLANE-NEXT: vperm2i128 $19, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
8183 ; AVX2-FAST-PERLANE-NEXT: # ymm0 = mem[2,3],ymm0[2,3]
8184 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[2,1,3,3,6,5,7,7]
8185 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,1,2,3]
8186 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
8187 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm1 = mem[0,2,2,3,4,6,6,7]
8188 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,1,2,3]
8189 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm8 = ymm0[0],ymm1[1],ymm0[2,3,4,5,6],ymm1[7]
8190 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
8191 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
8192 ; AVX2-FAST-PERLANE-NEXT: # xmm0 = xmm0[2],mem[2],xmm0[3],mem[3]
8193 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,3,2,3]
8194 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8195 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
8196 ; AVX2-FAST-PERLANE-NEXT: vpermilps $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
8197 ; AVX2-FAST-PERLANE-NEXT: # xmm1 = mem[2,2,3,3]
8198 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1]
8199 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
8200 ; AVX2-FAST-PERLANE-NEXT: vpermilps $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
8201 ; AVX2-FAST-PERLANE-NEXT: # xmm1 = mem[2,2,3,3]
8202 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1]
8203 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm9 = ymm0[0],ymm1[1],ymm0[2,3,4,5,6],ymm1[7]
8204 ; AVX2-FAST-PERLANE-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8205 ; AVX2-FAST-PERLANE-NEXT: vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
8206 ; AVX2-FAST-PERLANE-NEXT: # ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5]
8207 ; AVX2-FAST-PERLANE-NEXT: vpunpckldq {{.*#+}} ymm1 = ymm11[0],ymm10[0],ymm11[1],ymm10[1],ymm11[4],ymm10[4],ymm11[5],ymm10[5]
8208 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,2,2,2]
8209 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,1,2,3]
8210 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
8211 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 160(%r8), %ymm14
8212 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm14[4,5],ymm0[6,7]
8213 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastd 176(%r9), %ymm1
8214 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm3 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
8215 ; AVX2-FAST-PERLANE-NEXT: vpunpckhdq {{.*#+}} ymm0 = ymm11[2],ymm10[2],ymm11[3],ymm10[3],ymm11[6],ymm10[6],ymm11[7],ymm10[7]
8216 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[2,3,2,3,6,7,6,7]
8217 ; AVX2-FAST-PERLANE-NEXT: vperm2i128 $19, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
8218 ; AVX2-FAST-PERLANE-NEXT: # ymm0 = mem[2,3],ymm0[2,3]
8219 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm10 = ymm14[2,1,3,3,6,5,7,7]
8220 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm10 = ymm10[2,1,2,3]
8221 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm10[0,1],ymm0[2,3,4,5],ymm10[6,7]
8222 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm10 = mem[0,2,2,3,4,6,6,7]
8223 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm10 = ymm10[2,1,2,3]
8224 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm10 = ymm0[0],ymm10[1],ymm0[2,3,4,5,6],ymm10[7]
8225 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
8226 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
8227 ; AVX2-FAST-PERLANE-NEXT: # xmm0 = xmm0[2],mem[2],xmm0[3],mem[3]
8228 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,3,2,3]
8229 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
8230 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm0, %ymm4, %ymm0
8231 ; AVX2-FAST-PERLANE-NEXT: vpermilps $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Folded Reload
8232 ; AVX2-FAST-PERLANE-NEXT: # xmm11 = mem[2,2,3,3]
8233 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm11 = ymm11[0,1,2,1]
8234 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm11[0,1],ymm0[2,3,4,5],ymm11[6,7]
8235 ; AVX2-FAST-PERLANE-NEXT: vpermilps $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Folded Reload
8236 ; AVX2-FAST-PERLANE-NEXT: # xmm11 = mem[2,2,3,3]
8237 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm11 = ymm11[0,1,2,1]
8238 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm11 = ymm0[0],ymm11[1],ymm0[2,3,4,5,6],ymm11[7]
8239 ; AVX2-FAST-PERLANE-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8240 ; AVX2-FAST-PERLANE-NEXT: vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
8241 ; AVX2-FAST-PERLANE-NEXT: # ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5]
8242 ; AVX2-FAST-PERLANE-NEXT: vpunpckldq {{.*#+}} ymm14 = ymm13[0],ymm12[0],ymm13[1],ymm12[1],ymm13[4],ymm12[4],ymm13[5],ymm12[5]
8243 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm14 = ymm14[2,2,2,2]
8244 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,1,2,3]
8245 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm14[2,3],ymm0[4,5,6,7]
8246 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 192(%r8), %ymm14
8247 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm14[4,5],ymm0[6,7]
8248 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastd 208(%r9), %ymm15
8249 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm2 = ymm0[0,1,2,3,4],ymm15[5],ymm0[6,7]
8250 ; AVX2-FAST-PERLANE-NEXT: vpunpckhdq {{.*#+}} ymm12 = ymm13[2],ymm12[2],ymm13[3],ymm12[3],ymm13[6],ymm12[6],ymm13[7],ymm12[7]
8251 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm12 = ymm12[2,3,2,3,6,7,6,7]
8252 ; AVX2-FAST-PERLANE-NEXT: vperm2i128 $19, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm12 # 32-byte Folded Reload
8253 ; AVX2-FAST-PERLANE-NEXT: # ymm12 = mem[2,3],ymm12[2,3]
8254 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm13 = ymm14[2,1,3,3,6,5,7,7]
8255 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm13 = ymm13[2,1,2,3]
8256 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm12 = ymm13[0,1],ymm12[2,3,4,5],ymm13[6,7]
8257 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm13 = mem[0,2,2,3,4,6,6,7]
8258 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm13 = ymm13[2,1,2,3]
8259 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm12 = ymm12[0],ymm13[1],ymm12[2,3,4,5,6],ymm13[7]
8260 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
8261 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm13 # 16-byte Folded Reload
8262 ; AVX2-FAST-PERLANE-NEXT: # xmm13 = xmm4[2],mem[2],xmm4[3],mem[3]
8263 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm13 = xmm13[2,3,2,3]
8264 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
8265 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm13, %ymm4, %ymm13
8266 ; AVX2-FAST-PERLANE-NEXT: vpermilps $250, (%rsp), %xmm14 # 16-byte Folded Reload
8267 ; AVX2-FAST-PERLANE-NEXT: # xmm14 = mem[2,2,3,3]
8268 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm14 = ymm14[0,1,2,1]
8269 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm13 = ymm14[0,1],ymm13[2,3,4,5],ymm14[6,7]
8270 ; AVX2-FAST-PERLANE-NEXT: vpermilps $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Folded Reload
8271 ; AVX2-FAST-PERLANE-NEXT: # xmm14 = mem[2,2,3,3]
8272 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm14 = ymm14[0,1,2,1]
8273 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm13 = ymm13[0],ymm14[1],ymm13[2,3,4,5,6],ymm14[7]
8274 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8275 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm14 # 32-byte Folded Reload
8276 ; AVX2-FAST-PERLANE-NEXT: # ymm14 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5]
8277 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8278 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8279 ; AVX2-FAST-PERLANE-NEXT: vunpcklps {{.*#+}} ymm15 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
8280 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm15 = ymm15[2,2,2,2]
8281 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm14 = ymm14[2,1,2,3]
8282 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm14 = ymm14[0,1],ymm15[2,3],ymm14[4,5,6,7]
8283 ; AVX2-FAST-PERLANE-NEXT: vmovaps 224(%r8), %ymm15
8284 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm14 = ymm14[0,1,2,3],ymm15[4,5],ymm14[6,7]
8285 ; AVX2-FAST-PERLANE-NEXT: vbroadcastss 240(%r9), %ymm4
8286 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm4 = ymm14[0,1,2,3,4],ymm4[5],ymm14[6,7]
8287 ; AVX2-FAST-PERLANE-NEXT: vunpckhps {{.*#+}} ymm14 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
8288 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm14 = ymm14[2,3,2,3,6,7,6,7]
8289 ; AVX2-FAST-PERLANE-NEXT: vperm2f128 $19, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
8290 ; AVX2-FAST-PERLANE-NEXT: # ymm14 = mem[2,3],ymm14[2,3]
8291 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm15 = ymm15[2,1,3,3,6,5,7,7]
8292 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm15 = ymm15[2,1,2,3]
8293 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm14 = ymm15[0,1],ymm14[2,3,4,5],ymm15[6,7]
8294 ; AVX2-FAST-PERLANE-NEXT: vpermilps {{.*#+}} ymm15 = mem[0,2,2,3,4,6,6,7]
8295 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm15 = ymm15[2,1,2,3]
8296 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm14 = ymm14[0],ymm15[1],ymm14[2,3,4,5,6],ymm15[7]
8297 ; AVX2-FAST-PERLANE-NEXT: movq {{[0-9]+}}(%rsp), %rax
8298 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm14, 1504(%rax)
8299 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm4, 1440(%rax)
8300 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm13, 1408(%rax)
8301 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm12, 1312(%rax)
8302 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm2, 1248(%rax)
8303 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm11, 1216(%rax)
8304 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm10, 1120(%rax)
8305 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm3, 1056(%rax)
8306 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm9, 1024(%rax)
8307 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm8, 928(%rax)
8308 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm5, 864(%rax)
8309 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm7, 832(%rax)
8310 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm6, 736(%rax)
8311 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8312 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 672(%rax)
8313 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8314 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 640(%rax)
8315 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8316 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 544(%rax)
8317 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8318 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 480(%rax)
8319 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8320 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 448(%rax)
8321 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8322 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 352(%rax)
8323 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8324 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 288(%rax)
8325 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8326 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 256(%rax)
8327 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8328 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 160(%rax)
8329 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8330 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 96(%rax)
8331 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8332 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 64(%rax)
8333 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8334 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 1472(%rax)
8335 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8336 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 1344(%rax)
8337 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8338 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 1280(%rax)
8339 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8340 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 1152(%rax)
8341 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8342 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 1088(%rax)
8343 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8344 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 960(%rax)
8345 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8346 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 896(%rax)
8347 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8348 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 768(%rax)
8349 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8350 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 704(%rax)
8351 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8352 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 576(%rax)
8353 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8354 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 512(%rax)
8355 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8356 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 384(%rax)
8357 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8358 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 320(%rax)
8359 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8360 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 192(%rax)
8361 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8362 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 128(%rax)
8363 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8364 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, (%rax)
8365 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8366 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 1376(%rax)
8367 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8368 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 1184(%rax)
8369 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8370 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 992(%rax)
8371 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8372 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 800(%rax)
8373 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8374 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 608(%rax)
8375 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8376 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 416(%rax)
8377 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8378 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 224(%rax)
8379 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8380 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 32(%rax)
8381 ; AVX2-FAST-PERLANE-NEXT: addq $2504, %rsp # imm = 0x9C8
8382 ; AVX2-FAST-PERLANE-NEXT: vzeroupper
8383 ; AVX2-FAST-PERLANE-NEXT: retq
8385 ; AVX512F-SLOW-LABEL: store_i32_stride6_vf64:
8386 ; AVX512F-SLOW: # %bb.0:
8387 ; AVX512F-SLOW-NEXT: subq $456, %rsp # imm = 0x1C8
8388 ; AVX512F-SLOW-NEXT: vmovdqa64 128(%rdi), %zmm9
8389 ; AVX512F-SLOW-NEXT: vmovdqa64 64(%rdi), %zmm30
8390 ; AVX512F-SLOW-NEXT: vmovdqa64 (%rdi), %zmm11
8391 ; AVX512F-SLOW-NEXT: vmovdqa64 (%rsi), %zmm10
8392 ; AVX512F-SLOW-NEXT: vmovdqa64 64(%rsi), %zmm8
8393 ; AVX512F-SLOW-NEXT: vmovdqa64 128(%rsi), %zmm4
8394 ; AVX512F-SLOW-NEXT: vmovdqa64 192(%rdx), %zmm15
8395 ; AVX512F-SLOW-NEXT: vmovdqa64 128(%rdx), %zmm28
8396 ; AVX512F-SLOW-NEXT: vmovdqa64 64(%rdx), %zmm17
8397 ; AVX512F-SLOW-NEXT: vmovdqa64 (%rdx), %zmm24
8398 ; AVX512F-SLOW-NEXT: vmovdqa64 (%rcx), %zmm22
8399 ; AVX512F-SLOW-NEXT: vmovdqa64 64(%rcx), %zmm18
8400 ; AVX512F-SLOW-NEXT: vmovdqa64 128(%rcx), %zmm16
8401 ; AVX512F-SLOW-NEXT: vmovdqa64 192(%rcx), %zmm13
8402 ; AVX512F-SLOW-NEXT: vbroadcasti64x4 {{.*#+}} zmm29 = [4,20,3,19,0,0,5,21,4,20,3,19,0,0,5,21]
8403 ; AVX512F-SLOW-NEXT: # zmm29 = mem[0,1,2,3,0,1,2,3]
8404 ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm11, %zmm0
8405 ; AVX512F-SLOW-NEXT: vpermt2d %zmm10, %zmm29, %zmm0
8406 ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm0, %zmm3
8407 ; AVX512F-SLOW-NEXT: vmovdqa (%rdx), %ymm14
8408 ; AVX512F-SLOW-NEXT: vbroadcasti64x4 {{.*#+}} zmm0 = [1,17,0,16,0,0,2,18,1,17,0,16,0,0,2,18]
8409 ; AVX512F-SLOW-NEXT: # zmm0 = mem[0,1,2,3,0,1,2,3]
8410 ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm24, %zmm20
8411 ; AVX512F-SLOW-NEXT: vpermt2d %zmm22, %zmm0, %zmm20
8412 ; AVX512F-SLOW-NEXT: vbroadcasti64x4 {{.*#+}} zmm1 = [9,25,8,24,0,0,10,26,9,25,8,24,0,0,10,26]
8413 ; AVX512F-SLOW-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3]
8414 ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm24, %zmm23
8415 ; AVX512F-SLOW-NEXT: vpermt2d %zmm22, %zmm1, %zmm23
8416 ; AVX512F-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm2 = [3,19,0,16,11,27,8,24,15,31,12,28,15,31,12,28]
8417 ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm24, %zmm5
8418 ; AVX512F-SLOW-NEXT: vpermt2d %zmm22, %zmm2, %zmm5
8419 ; AVX512F-SLOW-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
8420 ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm17, %zmm25
8421 ; AVX512F-SLOW-NEXT: vpermt2d %zmm18, %zmm0, %zmm25
8422 ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm17, %zmm26
8423 ; AVX512F-SLOW-NEXT: vpermt2d %zmm18, %zmm1, %zmm26
8424 ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm17, %zmm5
8425 ; AVX512F-SLOW-NEXT: vpermt2d %zmm18, %zmm2, %zmm5
8426 ; AVX512F-SLOW-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
8427 ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm28, %zmm5
8428 ; AVX512F-SLOW-NEXT: vpermt2d %zmm16, %zmm0, %zmm5
8429 ; AVX512F-SLOW-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
8430 ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm0, %zmm5
8431 ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm28, %zmm27
8432 ; AVX512F-SLOW-NEXT: vpermt2d %zmm16, %zmm1, %zmm27
8433 ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm1, %zmm6
8434 ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm28, %zmm0
8435 ; AVX512F-SLOW-NEXT: vpermt2d %zmm16, %zmm2, %zmm0
8436 ; AVX512F-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
8437 ; AVX512F-SLOW-NEXT: vbroadcasti64x4 {{.*#+}} zmm21 = [5,21,0,0,7,23,6,22,5,21,0,0,7,23,6,22]
8438 ; AVX512F-SLOW-NEXT: # zmm21 = mem[0,1,2,3,0,1,2,3]
8439 ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm24, %zmm19
8440 ; AVX512F-SLOW-NEXT: vpermt2d %zmm22, %zmm21, %zmm19
8441 ; AVX512F-SLOW-NEXT: vbroadcasti64x4 {{.*#+}} zmm0 = [13,29,0,0,15,31,14,30,13,29,0,0,15,31,14,30]
8442 ; AVX512F-SLOW-NEXT: # zmm0 = mem[0,1,2,3,0,1,2,3]
8443 ; AVX512F-SLOW-NEXT: vpermt2d %zmm22, %zmm0, %zmm24
8444 ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm17, %zmm22
8445 ; AVX512F-SLOW-NEXT: vpermt2d %zmm18, %zmm21, %zmm22
8446 ; AVX512F-SLOW-NEXT: vpermt2d %zmm18, %zmm0, %zmm17
8447 ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm28, %zmm18
8448 ; AVX512F-SLOW-NEXT: vpermt2d %zmm16, %zmm21, %zmm18
8449 ; AVX512F-SLOW-NEXT: vpermt2d %zmm16, %zmm0, %zmm28
8450 ; AVX512F-SLOW-NEXT: vmovdqa 64(%rdx), %ymm1
8451 ; AVX512F-SLOW-NEXT: vpermi2d %zmm13, %zmm15, %zmm5
8452 ; AVX512F-SLOW-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
8453 ; AVX512F-SLOW-NEXT: vpermi2d %zmm13, %zmm15, %zmm6
8454 ; AVX512F-SLOW-NEXT: vmovdqu64 %zmm6, (%rsp) # 64-byte Spill
8455 ; AVX512F-SLOW-NEXT: vpermi2d %zmm13, %zmm15, %zmm2
8456 ; AVX512F-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
8457 ; AVX512F-SLOW-NEXT: vpermi2d %zmm13, %zmm15, %zmm21
8458 ; AVX512F-SLOW-NEXT: vpermt2d %zmm13, %zmm0, %zmm15
8459 ; AVX512F-SLOW-NEXT: vmovdqa 128(%rdx), %ymm0
8460 ; AVX512F-SLOW-NEXT: vmovdqa {{.*#+}} ymm2 = [3,11,0,8,7,15,4,12]
8461 ; AVX512F-SLOW-NEXT: vpermt2d (%rcx), %ymm2, %ymm14
8462 ; AVX512F-SLOW-NEXT: movb $36, %al
8463 ; AVX512F-SLOW-NEXT: kmovw %eax, %k1
8464 ; AVX512F-SLOW-NEXT: vshufi64x2 {{.*#+}} zmm3 {%k1} = zmm14[0,1,0,1,2,3,6,7]
8465 ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm30, %zmm13
8466 ; AVX512F-SLOW-NEXT: vpermt2d %zmm8, %zmm29, %zmm13
8467 ; AVX512F-SLOW-NEXT: vpermt2d 64(%rcx), %ymm2, %ymm1
8468 ; AVX512F-SLOW-NEXT: vshufi64x2 {{.*#+}} zmm13 {%k1} = zmm1[0,1,0,1,2,3,6,7]
8469 ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm9, %zmm14
8470 ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm4, %zmm1
8471 ; AVX512F-SLOW-NEXT: vpermt2d %zmm4, %zmm29, %zmm14
8472 ; AVX512F-SLOW-NEXT: vpermt2d 128(%rcx), %ymm2, %ymm0
8473 ; AVX512F-SLOW-NEXT: vshufi64x2 {{.*#+}} zmm14 {%k1} = zmm0[0,1,0,1,2,3,6,7]
8474 ; AVX512F-SLOW-NEXT: vmovdqa 192(%rdx), %ymm0
8475 ; AVX512F-SLOW-NEXT: vpermt2d 192(%rcx), %ymm2, %ymm0
8476 ; AVX512F-SLOW-NEXT: vmovdqa64 192(%rdi), %zmm12
8477 ; AVX512F-SLOW-NEXT: vmovdqa64 192(%rsi), %zmm7
8478 ; AVX512F-SLOW-NEXT: vpermi2d %zmm7, %zmm12, %zmm29
8479 ; AVX512F-SLOW-NEXT: vshufi64x2 {{.*#+}} zmm29 {%k1} = zmm0[0,1,0,1,2,3,6,7]
8480 ; AVX512F-SLOW-NEXT: vmovdqa64 (%r8), %zmm4
8481 ; AVX512F-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm0 = <18,u,2,3,4,5,19,u,8,9,10,11,20,u,14,15>
8482 ; AVX512F-SLOW-NEXT: vpermt2d %zmm4, %zmm0, %zmm3
8483 ; AVX512F-SLOW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
8484 ; AVX512F-SLOW-NEXT: vmovdqa64 64(%r8), %zmm3
8485 ; AVX512F-SLOW-NEXT: vpermt2d %zmm3, %zmm0, %zmm13
8486 ; AVX512F-SLOW-NEXT: vmovdqa64 128(%r8), %zmm2
8487 ; AVX512F-SLOW-NEXT: vpermt2d %zmm2, %zmm0, %zmm14
8488 ; AVX512F-SLOW-NEXT: vmovdqa64 192(%r8), %zmm6
8489 ; AVX512F-SLOW-NEXT: vpermt2d %zmm6, %zmm0, %zmm29
8490 ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm11, %zmm16
8491 ; AVX512F-SLOW-NEXT: vbroadcasti64x4 {{.*#+}} zmm31 = [0,16,0,0,2,18,1,17,0,16,0,0,2,18,1,17]
8492 ; AVX512F-SLOW-NEXT: # zmm31 = mem[0,1,2,3,0,1,2,3]
8493 ; AVX512F-SLOW-NEXT: vpermt2d %zmm10, %zmm31, %zmm16
8494 ; AVX512F-SLOW-NEXT: movb $-110, %al
8495 ; AVX512F-SLOW-NEXT: kmovw %eax, %k2
8496 ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm20, %zmm16 {%k2}
8497 ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm11, %zmm20
8498 ; AVX512F-SLOW-NEXT: vbroadcasti64x4 {{.*#+}} zmm5 = [8,24,0,0,10,26,9,25,8,24,0,0,10,26,9,25]
8499 ; AVX512F-SLOW-NEXT: # zmm5 = mem[0,1,2,3,0,1,2,3]
8500 ; AVX512F-SLOW-NEXT: vpermt2d %zmm10, %zmm5, %zmm20
8501 ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm23, %zmm20 {%k2}
8502 ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm11, %zmm23
8503 ; AVX512F-SLOW-NEXT: vpunpckhdq {{.*#+}} zmm0 = zmm11[2],zmm10[2],zmm11[3],zmm10[3],zmm11[6],zmm10[6],zmm11[7],zmm10[7],zmm11[10],zmm10[10],zmm11[11],zmm10[11],zmm11[14],zmm10[14],zmm11[15],zmm10[15]
8504 ; AVX512F-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
8505 ; AVX512F-SLOW-NEXT: vbroadcasti64x4 {{.*#+}} zmm11 = [12,28,11,27,0,0,13,29,12,28,11,27,0,0,13,29]
8506 ; AVX512F-SLOW-NEXT: # zmm11 = mem[0,1,2,3,0,1,2,3]
8507 ; AVX512F-SLOW-NEXT: vpermt2d %zmm10, %zmm11, %zmm23
8508 ; AVX512F-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
8509 ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm0, %zmm23 {%k1}
8510 ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm30, %zmm10
8511 ; AVX512F-SLOW-NEXT: vpermt2d %zmm8, %zmm31, %zmm10
8512 ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm25, %zmm10 {%k2}
8513 ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm30, %zmm25
8514 ; AVX512F-SLOW-NEXT: vpermt2d %zmm8, %zmm5, %zmm25
8515 ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm26, %zmm25 {%k2}
8516 ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm30, %zmm26
8517 ; AVX512F-SLOW-NEXT: vpunpckhdq {{.*#+}} zmm30 = zmm30[2],zmm8[2],zmm30[3],zmm8[3],zmm30[6],zmm8[6],zmm30[7],zmm8[7],zmm30[10],zmm8[10],zmm30[11],zmm8[11],zmm30[14],zmm8[14],zmm30[15],zmm8[15]
8518 ; AVX512F-SLOW-NEXT: vpermt2d %zmm8, %zmm11, %zmm26
8519 ; AVX512F-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
8520 ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm0, %zmm26 {%k1}
8521 ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm9, %zmm0
8522 ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm9, %zmm8
8523 ; AVX512F-SLOW-NEXT: vpermt2d %zmm1, %zmm31, %zmm8
8524 ; AVX512F-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
8525 ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm9, %zmm8 {%k2}
8526 ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm0, %zmm9
8527 ; AVX512F-SLOW-NEXT: vpermt2d %zmm1, %zmm5, %zmm9
8528 ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm27, %zmm9 {%k2}
8529 ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm0, %zmm27
8530 ; AVX512F-SLOW-NEXT: vpunpckhdq {{.*#+}} zmm0 = zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[14],zmm1[14],zmm0[15],zmm1[15]
8531 ; AVX512F-SLOW-NEXT: vpermt2d %zmm1, %zmm11, %zmm27
8532 ; AVX512F-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
8533 ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm1, %zmm27 {%k1}
8534 ; AVX512F-SLOW-NEXT: vpermi2d %zmm7, %zmm12, %zmm31
8535 ; AVX512F-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
8536 ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm1, %zmm31 {%k2}
8537 ; AVX512F-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm1 = <0,1,2,3,16,u,6,7,8,9,17,u,12,13,14,15>
8538 ; AVX512F-SLOW-NEXT: vpermt2d %zmm4, %zmm1, %zmm16
8539 ; AVX512F-SLOW-NEXT: vpermt2d %zmm3, %zmm1, %zmm10
8540 ; AVX512F-SLOW-NEXT: vpermt2d %zmm2, %zmm1, %zmm8
8541 ; AVX512F-SLOW-NEXT: vpermt2d %zmm6, %zmm1, %zmm31
8542 ; AVX512F-SLOW-NEXT: vpermi2d %zmm7, %zmm12, %zmm5
8543 ; AVX512F-SLOW-NEXT: vmovdqu64 (%rsp), %zmm1 # 64-byte Reload
8544 ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm1, %zmm5 {%k2}
8545 ; AVX512F-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm1 = <0,1,2,3,24,u,6,7,8,9,25,u,12,13,14,15>
8546 ; AVX512F-SLOW-NEXT: vpermt2d %zmm4, %zmm1, %zmm20
8547 ; AVX512F-SLOW-NEXT: vpermt2d %zmm3, %zmm1, %zmm25
8548 ; AVX512F-SLOW-NEXT: vpermt2d %zmm2, %zmm1, %zmm9
8549 ; AVX512F-SLOW-NEXT: vpermt2d %zmm6, %zmm1, %zmm5
8550 ; AVX512F-SLOW-NEXT: vpermi2d %zmm7, %zmm12, %zmm11
8551 ; AVX512F-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
8552 ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm1, %zmm11 {%k1}
8553 ; AVX512F-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm1 = <26,u,2,3,4,5,27,u,8,9,10,11,28,u,14,15>
8554 ; AVX512F-SLOW-NEXT: vpermt2d %zmm4, %zmm1, %zmm23
8555 ; AVX512F-SLOW-NEXT: vpermt2d %zmm3, %zmm1, %zmm26
8556 ; AVX512F-SLOW-NEXT: vpermt2d %zmm2, %zmm1, %zmm27
8557 ; AVX512F-SLOW-NEXT: vpermt2d %zmm6, %zmm1, %zmm11
8558 ; AVX512F-SLOW-NEXT: vmovdqa (%rdi), %ymm1
8559 ; AVX512F-SLOW-NEXT: vpunpckhdq {{.*#+}} ymm1 = ymm1[2],mem[2],ymm1[3],mem[3],ymm1[6],mem[6],ymm1[7],mem[7]
8560 ; AVX512F-SLOW-NEXT: vshufi64x2 {{.*#+}} zmm19 {%k1} = zmm1[2,3,2,3,2,3,2,3]
8561 ; AVX512F-SLOW-NEXT: vmovdqa 64(%rdi), %ymm1
8562 ; AVX512F-SLOW-NEXT: vpunpckhdq {{.*#+}} ymm1 = ymm1[2],mem[2],ymm1[3],mem[3],ymm1[6],mem[6],ymm1[7],mem[7]
8563 ; AVX512F-SLOW-NEXT: vshufi64x2 {{.*#+}} zmm22 {%k1} = zmm1[2,3,2,3,2,3,2,3]
8564 ; AVX512F-SLOW-NEXT: vmovdqa 128(%rdi), %ymm1
8565 ; AVX512F-SLOW-NEXT: vpunpckhdq {{.*#+}} ymm1 = ymm1[2],mem[2],ymm1[3],mem[3],ymm1[6],mem[6],ymm1[7],mem[7]
8566 ; AVX512F-SLOW-NEXT: vshufi64x2 {{.*#+}} zmm18 {%k1} = zmm1[2,3,2,3,2,3,2,3]
8567 ; AVX512F-SLOW-NEXT: vmovdqa 192(%rdi), %ymm1
8568 ; AVX512F-SLOW-NEXT: vpunpckhdq {{.*#+}} ymm1 = ymm1[2],mem[2],ymm1[3],mem[3],ymm1[6],mem[6],ymm1[7],mem[7]
8569 ; AVX512F-SLOW-NEXT: vshufi64x2 {{.*#+}} zmm21 {%k1} = zmm1[2,3,2,3,2,3,2,3]
8570 ; AVX512F-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm1 = <0,1,21,u,4,5,6,7,22,u,10,11,12,13,23,u>
8571 ; AVX512F-SLOW-NEXT: vpermt2d %zmm4, %zmm1, %zmm19
8572 ; AVX512F-SLOW-NEXT: vpermt2d %zmm3, %zmm1, %zmm22
8573 ; AVX512F-SLOW-NEXT: vpermt2d %zmm2, %zmm1, %zmm18
8574 ; AVX512F-SLOW-NEXT: vpermt2d %zmm6, %zmm1, %zmm21
8575 ; AVX512F-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
8576 ; AVX512F-SLOW-NEXT: vshufi64x2 {{.*#+}} zmm24 {%k1} = zmm1[6,7,6,7,6,7,6,7]
8577 ; AVX512F-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm1 = <0,1,29,u,4,5,6,7,30,u,10,11,12,13,31,u>
8578 ; AVX512F-SLOW-NEXT: vpermt2d %zmm4, %zmm1, %zmm24
8579 ; AVX512F-SLOW-NEXT: vshufi64x2 {{.*#+}} zmm17 {%k1} = zmm30[6,7,6,7,6,7,6,7]
8580 ; AVX512F-SLOW-NEXT: vpermt2d %zmm3, %zmm1, %zmm17
8581 ; AVX512F-SLOW-NEXT: vshufi64x2 {{.*#+}} zmm28 {%k1} = zmm0[6,7,6,7,6,7,6,7]
8582 ; AVX512F-SLOW-NEXT: vpermt2d %zmm2, %zmm1, %zmm28
8583 ; AVX512F-SLOW-NEXT: vmovdqa64 (%r9), %zmm2
8584 ; AVX512F-SLOW-NEXT: vpunpckhdq {{.*#+}} zmm3 = zmm12[2],zmm7[2],zmm12[3],zmm7[3],zmm12[6],zmm7[6],zmm12[7],zmm7[7],zmm12[10],zmm7[10],zmm12[11],zmm7[11],zmm12[14],zmm7[14],zmm12[15],zmm7[15]
8585 ; AVX512F-SLOW-NEXT: vmovdqa64 64(%r9), %zmm4
8586 ; AVX512F-SLOW-NEXT: vshufi64x2 {{.*#+}} zmm15 {%k1} = zmm3[6,7,6,7,6,7,6,7]
8587 ; AVX512F-SLOW-NEXT: vmovdqa64 128(%r9), %zmm3
8588 ; AVX512F-SLOW-NEXT: vpermt2d %zmm6, %zmm1, %zmm15
8589 ; AVX512F-SLOW-NEXT: vmovdqa64 192(%r9), %zmm1
8590 ; AVX512F-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm0 = [0,18,2,3,4,5,6,19,8,9,10,11,12,20,14,15]
8591 ; AVX512F-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
8592 ; AVX512F-SLOW-NEXT: vpermt2d %zmm2, %zmm0, %zmm6
8593 ; AVX512F-SLOW-NEXT: vpermt2d %zmm4, %zmm0, %zmm13
8594 ; AVX512F-SLOW-NEXT: vpermt2d %zmm3, %zmm0, %zmm14
8595 ; AVX512F-SLOW-NEXT: vpermt2d %zmm1, %zmm0, %zmm29
8596 ; AVX512F-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm0 = [0,1,2,3,4,16,6,7,8,9,10,17,12,13,14,15]
8597 ; AVX512F-SLOW-NEXT: vpermt2d %zmm2, %zmm0, %zmm16
8598 ; AVX512F-SLOW-NEXT: vpermt2d %zmm4, %zmm0, %zmm10
8599 ; AVX512F-SLOW-NEXT: vpermt2d %zmm3, %zmm0, %zmm8
8600 ; AVX512F-SLOW-NEXT: vpermt2d %zmm1, %zmm0, %zmm31
8601 ; AVX512F-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm0 = [0,1,2,3,4,24,6,7,8,9,10,25,12,13,14,15]
8602 ; AVX512F-SLOW-NEXT: vpermt2d %zmm2, %zmm0, %zmm20
8603 ; AVX512F-SLOW-NEXT: vpermt2d %zmm4, %zmm0, %zmm25
8604 ; AVX512F-SLOW-NEXT: vpermt2d %zmm3, %zmm0, %zmm9
8605 ; AVX512F-SLOW-NEXT: vpermt2d %zmm1, %zmm0, %zmm5
8606 ; AVX512F-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm0 = [0,26,2,3,4,5,6,27,8,9,10,11,12,28,14,15]
8607 ; AVX512F-SLOW-NEXT: vpermt2d %zmm2, %zmm0, %zmm23
8608 ; AVX512F-SLOW-NEXT: vpermt2d %zmm4, %zmm0, %zmm26
8609 ; AVX512F-SLOW-NEXT: vpermt2d %zmm3, %zmm0, %zmm27
8610 ; AVX512F-SLOW-NEXT: vpermt2d %zmm1, %zmm0, %zmm11
8611 ; AVX512F-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm0 = [0,1,2,21,4,5,6,7,8,22,10,11,12,13,14,23]
8612 ; AVX512F-SLOW-NEXT: vpermt2d %zmm2, %zmm0, %zmm19
8613 ; AVX512F-SLOW-NEXT: vpermt2d %zmm4, %zmm0, %zmm22
8614 ; AVX512F-SLOW-NEXT: vpermt2d %zmm3, %zmm0, %zmm18
8615 ; AVX512F-SLOW-NEXT: vpermt2d %zmm1, %zmm0, %zmm21
8616 ; AVX512F-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm0 = [0,1,2,29,4,5,6,7,8,30,10,11,12,13,14,31]
8617 ; AVX512F-SLOW-NEXT: vpermt2d %zmm2, %zmm0, %zmm24
8618 ; AVX512F-SLOW-NEXT: vpermt2d %zmm4, %zmm0, %zmm17
8619 ; AVX512F-SLOW-NEXT: vpermt2d %zmm3, %zmm0, %zmm28
8620 ; AVX512F-SLOW-NEXT: vpermt2d %zmm1, %zmm0, %zmm15
8621 ; AVX512F-SLOW-NEXT: movq {{[0-9]+}}(%rsp), %rax
8622 ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm15, 1472(%rax)
8623 ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm11, 1408(%rax)
8624 ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm5, 1344(%rax)
8625 ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm31, 1152(%rax)
8626 ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm28, 1088(%rax)
8627 ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm27, 1024(%rax)
8628 ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm9, 960(%rax)
8629 ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm8, 768(%rax)
8630 ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm17, 704(%rax)
8631 ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm26, 640(%rax)
8632 ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm25, 576(%rax)
8633 ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm10, 384(%rax)
8634 ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm24, 320(%rax)
8635 ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm23, 256(%rax)
8636 ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm20, 192(%rax)
8637 ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm16, (%rax)
8638 ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm21, 1280(%rax)
8639 ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm29, 1216(%rax)
8640 ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm18, 896(%rax)
8641 ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm14, 832(%rax)
8642 ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm22, 512(%rax)
8643 ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm13, 448(%rax)
8644 ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm19, 128(%rax)
8645 ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm6, 64(%rax)
8646 ; AVX512F-SLOW-NEXT: addq $456, %rsp # imm = 0x1C8
8647 ; AVX512F-SLOW-NEXT: vzeroupper
8648 ; AVX512F-SLOW-NEXT: retq
8650 ; AVX512F-FAST-LABEL: store_i32_stride6_vf64:
8651 ; AVX512F-FAST: # %bb.0:
8652 ; AVX512F-FAST-NEXT: subq $1160, %rsp # imm = 0x488
8653 ; AVX512F-FAST-NEXT: vmovdqa64 (%rdi), %zmm8
8654 ; AVX512F-FAST-NEXT: vmovdqa64 64(%rdi), %zmm5
8655 ; AVX512F-FAST-NEXT: vmovdqa64 128(%rdi), %zmm3
8656 ; AVX512F-FAST-NEXT: vmovdqa64 192(%rdi), %zmm2
8657 ; AVX512F-FAST-NEXT: vmovdqa64 (%rsi), %zmm0
8658 ; AVX512F-FAST-NEXT: vmovdqa64 64(%rsi), %zmm24
8659 ; AVX512F-FAST-NEXT: vmovdqa64 128(%rsi), %zmm29
8660 ; AVX512F-FAST-NEXT: vmovdqa64 192(%rsi), %zmm23
8661 ; AVX512F-FAST-NEXT: vmovdqa64 (%rdx), %zmm4
8662 ; AVX512F-FAST-NEXT: vmovdqa64 (%rcx), %zmm21
8663 ; AVX512F-FAST-NEXT: vbroadcasti64x4 {{.*#+}} zmm20 = [1,17,0,16,0,0,2,18,1,17,0,16,0,0,2,18]
8664 ; AVX512F-FAST-NEXT: # zmm20 = mem[0,1,2,3,0,1,2,3]
8665 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm4, %zmm6
8666 ; AVX512F-FAST-NEXT: vpermt2d %zmm21, %zmm20, %zmm6
8667 ; AVX512F-FAST-NEXT: vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
8668 ; AVX512F-FAST-NEXT: vbroadcasti64x4 {{.*#+}} zmm7 = [0,16,0,0,2,18,1,17,0,16,0,0,2,18,1,17]
8669 ; AVX512F-FAST-NEXT: # zmm7 = mem[0,1,2,3,0,1,2,3]
8670 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm8, %zmm1
8671 ; AVX512F-FAST-NEXT: vpermt2d %zmm0, %zmm7, %zmm1
8672 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm1, %zmm16
8673 ; AVX512F-FAST-NEXT: vmovdqa64 {{.*#+}} zmm9 = <3,19,0,16,3,19,0,16,7,23,4,20,u,u,u,u>
8674 ; AVX512F-FAST-NEXT: vbroadcasti64x4 {{.*#+}} zmm10 = [4,20,3,19,0,0,5,21,4,20,3,19,0,0,5,21]
8675 ; AVX512F-FAST-NEXT: # zmm10 = mem[0,1,2,3,0,1,2,3]
8676 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm8, %zmm1
8677 ; AVX512F-FAST-NEXT: vpermt2d %zmm0, %zmm10, %zmm1
8678 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm1, %zmm17
8679 ; AVX512F-FAST-NEXT: vbroadcasti32x4 {{.*#+}} zmm1 = [6,22,7,23,6,22,7,23,6,22,7,23,6,22,7,23]
8680 ; AVX512F-FAST-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
8681 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm8, %zmm31
8682 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm8, %zmm25
8683 ; AVX512F-FAST-NEXT: vpermt2d %zmm0, %zmm1, %zmm31
8684 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm1, %zmm8
8685 ; AVX512F-FAST-NEXT: vbroadcasti64x4 {{.*#+}} zmm11 = [8,24,0,0,10,26,9,25,8,24,0,0,10,26,9,25]
8686 ; AVX512F-FAST-NEXT: # zmm11 = mem[0,1,2,3,0,1,2,3]
8687 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm25, %zmm1
8688 ; AVX512F-FAST-NEXT: vpermt2d %zmm0, %zmm11, %zmm1
8689 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm1, %zmm27
8690 ; AVX512F-FAST-NEXT: vbroadcasti64x4 {{.*#+}} zmm12 = [12,28,11,27,0,0,13,29,12,28,11,27,0,0,13,29]
8691 ; AVX512F-FAST-NEXT: # zmm12 = mem[0,1,2,3,0,1,2,3]
8692 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm25, %zmm1
8693 ; AVX512F-FAST-NEXT: vpermt2d %zmm0, %zmm12, %zmm1
8694 ; AVX512F-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
8695 ; AVX512F-FAST-NEXT: vbroadcasti32x4 {{.*#+}} zmm30 = [14,30,15,31,14,30,15,31,14,30,15,31,14,30,15,31]
8696 ; AVX512F-FAST-NEXT: # zmm30 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
8697 ; AVX512F-FAST-NEXT: vpermt2d %zmm0, %zmm30, %zmm25
8698 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm5, %zmm0
8699 ; AVX512F-FAST-NEXT: vpermt2d %zmm24, %zmm7, %zmm0
8700 ; AVX512F-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
8701 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm5, %zmm0
8702 ; AVX512F-FAST-NEXT: vpermt2d %zmm24, %zmm10, %zmm0
8703 ; AVX512F-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
8704 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm5, %zmm0
8705 ; AVX512F-FAST-NEXT: vpermt2d %zmm24, %zmm8, %zmm0
8706 ; AVX512F-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
8707 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm5, %zmm0
8708 ; AVX512F-FAST-NEXT: vpermt2d %zmm24, %zmm11, %zmm0
8709 ; AVX512F-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
8710 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm5, %zmm0
8711 ; AVX512F-FAST-NEXT: vpermt2d %zmm24, %zmm12, %zmm0
8712 ; AVX512F-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
8713 ; AVX512F-FAST-NEXT: vpermt2d %zmm24, %zmm30, %zmm5
8714 ; AVX512F-FAST-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
8715 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm3, %zmm0
8716 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm3, %zmm1
8717 ; AVX512F-FAST-NEXT: vpermt2d %zmm29, %zmm7, %zmm1
8718 ; AVX512F-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
8719 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm3, %zmm1
8720 ; AVX512F-FAST-NEXT: vpermt2d %zmm29, %zmm10, %zmm1
8721 ; AVX512F-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
8722 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm3, %zmm1
8723 ; AVX512F-FAST-NEXT: vpermt2d %zmm29, %zmm8, %zmm1
8724 ; AVX512F-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
8725 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm3, %zmm26
8726 ; AVX512F-FAST-NEXT: vpermt2d %zmm29, %zmm11, %zmm26
8727 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm3, %zmm28
8728 ; AVX512F-FAST-NEXT: vpermt2d %zmm29, %zmm12, %zmm28
8729 ; AVX512F-FAST-NEXT: vpermt2d %zmm29, %zmm30, %zmm0
8730 ; AVX512F-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
8731 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm4, %zmm19
8732 ; AVX512F-FAST-NEXT: vpermt2d %zmm21, %zmm9, %zmm19
8733 ; AVX512F-FAST-NEXT: vpermi2d %zmm23, %zmm2, %zmm7
8734 ; AVX512F-FAST-NEXT: vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
8735 ; AVX512F-FAST-NEXT: vpermi2d %zmm23, %zmm2, %zmm10
8736 ; AVX512F-FAST-NEXT: vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
8737 ; AVX512F-FAST-NEXT: vpermi2d %zmm23, %zmm2, %zmm8
8738 ; AVX512F-FAST-NEXT: vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
8739 ; AVX512F-FAST-NEXT: vpermi2d %zmm23, %zmm2, %zmm11
8740 ; AVX512F-FAST-NEXT: vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
8741 ; AVX512F-FAST-NEXT: vpermi2d %zmm23, %zmm2, %zmm12
8742 ; AVX512F-FAST-NEXT: vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
8743 ; AVX512F-FAST-NEXT: vpermt2d %zmm23, %zmm30, %zmm2
8744 ; AVX512F-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
8745 ; AVX512F-FAST-NEXT: vbroadcasti64x4 {{.*#+}} zmm29 = [5,21,0,0,7,23,6,22,5,21,0,0,7,23,6,22]
8746 ; AVX512F-FAST-NEXT: # zmm29 = mem[0,1,2,3,0,1,2,3]
8747 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm4, %zmm5
8748 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm4, %zmm23
8749 ; AVX512F-FAST-NEXT: vpermt2d %zmm21, %zmm29, %zmm23
8750 ; AVX512F-FAST-NEXT: vbroadcasti64x4 {{.*#+}} zmm4 = [9,25,8,24,0,0,10,26,9,25,8,24,0,0,10,26]
8751 ; AVX512F-FAST-NEXT: # zmm4 = mem[0,1,2,3,0,1,2,3]
8752 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm5, %zmm18
8753 ; AVX512F-FAST-NEXT: vpermt2d %zmm21, %zmm4, %zmm18
8754 ; AVX512F-FAST-NEXT: vmovdqa64 {{.*#+}} zmm8 = [3,19,0,16,11,27,8,24,15,31,12,28,15,31,12,28]
8755 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm5, %zmm7
8756 ; AVX512F-FAST-NEXT: vpermt2d %zmm21, %zmm8, %zmm7
8757 ; AVX512F-FAST-NEXT: vbroadcasti64x4 {{.*#+}} zmm2 = [13,29,0,0,15,31,14,30,13,29,0,0,15,31,14,30]
8758 ; AVX512F-FAST-NEXT: # zmm2 = mem[0,1,2,3,0,1,2,3]
8759 ; AVX512F-FAST-NEXT: vpermt2d %zmm21, %zmm2, %zmm5
8760 ; AVX512F-FAST-NEXT: vmovdqa64 64(%rdx), %zmm21
8761 ; AVX512F-FAST-NEXT: vmovdqa64 64(%rcx), %zmm0
8762 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm21, %zmm22
8763 ; AVX512F-FAST-NEXT: vpermt2d %zmm0, %zmm20, %zmm22
8764 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm21, %zmm14
8765 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm9, %zmm24
8766 ; AVX512F-FAST-NEXT: vpermt2d %zmm0, %zmm9, %zmm14
8767 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm21, %zmm30
8768 ; AVX512F-FAST-NEXT: vpermt2d %zmm0, %zmm29, %zmm30
8769 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm21, %zmm10
8770 ; AVX512F-FAST-NEXT: vpermt2d %zmm0, %zmm4, %zmm10
8771 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm21, %zmm6
8772 ; AVX512F-FAST-NEXT: vpermt2d %zmm0, %zmm8, %zmm6
8773 ; AVX512F-FAST-NEXT: vpermt2d %zmm0, %zmm2, %zmm21
8774 ; AVX512F-FAST-NEXT: vmovdqa64 128(%rdx), %zmm13
8775 ; AVX512F-FAST-NEXT: vmovdqa64 128(%rcx), %zmm0
8776 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm13, %zmm15
8777 ; AVX512F-FAST-NEXT: vpermt2d %zmm0, %zmm20, %zmm15
8778 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm13, %zmm12
8779 ; AVX512F-FAST-NEXT: vpermt2d %zmm0, %zmm9, %zmm12
8780 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm13, %zmm3
8781 ; AVX512F-FAST-NEXT: vpermt2d %zmm0, %zmm29, %zmm3
8782 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm13, %zmm11
8783 ; AVX512F-FAST-NEXT: vpermt2d %zmm0, %zmm4, %zmm11
8784 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm13, %zmm9
8785 ; AVX512F-FAST-NEXT: vpermt2d %zmm0, %zmm8, %zmm9
8786 ; AVX512F-FAST-NEXT: vpermt2d %zmm0, %zmm2, %zmm13
8787 ; AVX512F-FAST-NEXT: vmovdqa64 192(%rdx), %zmm1
8788 ; AVX512F-FAST-NEXT: vmovdqa64 192(%rcx), %zmm0
8789 ; AVX512F-FAST-NEXT: vpermi2d %zmm0, %zmm1, %zmm20
8790 ; AVX512F-FAST-NEXT: vpermi2d %zmm0, %zmm1, %zmm24
8791 ; AVX512F-FAST-NEXT: vpermi2d %zmm0, %zmm1, %zmm29
8792 ; AVX512F-FAST-NEXT: vpermi2d %zmm0, %zmm1, %zmm4
8793 ; AVX512F-FAST-NEXT: vpermi2d %zmm0, %zmm1, %zmm8
8794 ; AVX512F-FAST-NEXT: vpermt2d %zmm0, %zmm2, %zmm1
8795 ; AVX512F-FAST-NEXT: movb $-110, %al
8796 ; AVX512F-FAST-NEXT: kmovw %eax, %k2
8797 ; AVX512F-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
8798 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm0, %zmm16 {%k2}
8799 ; AVX512F-FAST-NEXT: movb $36, %al
8800 ; AVX512F-FAST-NEXT: kmovw %eax, %k1
8801 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm19, %zmm17 {%k1}
8802 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm31, %zmm23 {%k1}
8803 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm18, %zmm27 {%k2}
8804 ; AVX512F-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
8805 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm7, %zmm19 {%k1}
8806 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm25, %zmm5 {%k1}
8807 ; AVX512F-FAST-NEXT: vmovdqa64 (%r8), %zmm0
8808 ; AVX512F-FAST-NEXT: vmovdqa64 {{.*#+}} zmm2 = <0,1,2,3,16,u,6,7,8,9,17,u,12,13,14,15>
8809 ; AVX512F-FAST-NEXT: vpermt2d %zmm0, %zmm2, %zmm16
8810 ; AVX512F-FAST-NEXT: vmovdqu64 %zmm16, (%rsp) # 64-byte Spill
8811 ; AVX512F-FAST-NEXT: vmovdqa64 {{.*#+}} zmm7 = <18,u,2,3,4,5,19,u,8,9,10,11,20,u,14,15>
8812 ; AVX512F-FAST-NEXT: vpermt2d %zmm0, %zmm7, %zmm17
8813 ; AVX512F-FAST-NEXT: vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
8814 ; AVX512F-FAST-NEXT: vmovdqa64 {{.*#+}} zmm18 = <0,1,21,u,4,5,6,7,22,u,10,11,12,13,23,u>
8815 ; AVX512F-FAST-NEXT: vpermt2d %zmm0, %zmm18, %zmm23
8816 ; AVX512F-FAST-NEXT: vmovdqa64 {{.*#+}} zmm25 = <0,1,2,3,24,u,6,7,8,9,25,u,12,13,14,15>
8817 ; AVX512F-FAST-NEXT: vpermt2d %zmm0, %zmm25, %zmm27
8818 ; AVX512F-FAST-NEXT: vmovdqu64 %zmm27, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
8819 ; AVX512F-FAST-NEXT: vmovdqa64 {{.*#+}} zmm16 = <26,u,2,3,4,5,27,u,8,9,10,11,28,u,14,15>
8820 ; AVX512F-FAST-NEXT: vpermt2d %zmm0, %zmm16, %zmm19
8821 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm16, %zmm17
8822 ; AVX512F-FAST-NEXT: vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
8823 ; AVX512F-FAST-NEXT: vmovdqa64 {{.*#+}} zmm31 = <0,1,29,u,4,5,6,7,30,u,10,11,12,13,31,u>
8824 ; AVX512F-FAST-NEXT: vpermt2d %zmm0, %zmm31, %zmm5
8825 ; AVX512F-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload
8826 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm22, %zmm27 {%k2}
8827 ; AVX512F-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload
8828 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm14, %zmm22 {%k1}
8829 ; AVX512F-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
8830 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm0, %zmm30 {%k1}
8831 ; AVX512F-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
8832 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm10, %zmm16 {%k2}
8833 ; AVX512F-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
8834 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm6, %zmm10 {%k1}
8835 ; AVX512F-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
8836 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm0, %zmm21 {%k1}
8837 ; AVX512F-FAST-NEXT: vmovdqa64 64(%r8), %zmm0
8838 ; AVX512F-FAST-NEXT: vpermt2d %zmm0, %zmm2, %zmm27
8839 ; AVX512F-FAST-NEXT: vpermt2d %zmm0, %zmm7, %zmm22
8840 ; AVX512F-FAST-NEXT: vpermt2d %zmm0, %zmm18, %zmm30
8841 ; AVX512F-FAST-NEXT: vpermt2d %zmm0, %zmm25, %zmm16
8842 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm16, %zmm19
8843 ; AVX512F-FAST-NEXT: vpermt2d %zmm0, %zmm17, %zmm10
8844 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm17, %zmm14
8845 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm10, %zmm17
8846 ; AVX512F-FAST-NEXT: vpermt2d %zmm0, %zmm31, %zmm21
8847 ; AVX512F-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
8848 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm15, %zmm16 {%k2}
8849 ; AVX512F-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
8850 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm12, %zmm15 {%k1}
8851 ; AVX512F-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
8852 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm0, %zmm3 {%k1}
8853 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm11, %zmm26 {%k2}
8854 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm9, %zmm28 {%k1}
8855 ; AVX512F-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
8856 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm0, %zmm13 {%k1}
8857 ; AVX512F-FAST-NEXT: vmovdqa64 128(%r8), %zmm0
8858 ; AVX512F-FAST-NEXT: vpermt2d %zmm0, %zmm2, %zmm16
8859 ; AVX512F-FAST-NEXT: vpermt2d %zmm0, %zmm7, %zmm15
8860 ; AVX512F-FAST-NEXT: vpermt2d %zmm0, %zmm18, %zmm3
8861 ; AVX512F-FAST-NEXT: vpermt2d %zmm0, %zmm25, %zmm26
8862 ; AVX512F-FAST-NEXT: vpermt2d %zmm0, %zmm14, %zmm28
8863 ; AVX512F-FAST-NEXT: vpermt2d %zmm0, %zmm31, %zmm13
8864 ; AVX512F-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
8865 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm20, %zmm6 {%k2}
8866 ; AVX512F-FAST-NEXT: vmovdqa64 192(%r8), %zmm0
8867 ; AVX512F-FAST-NEXT: vpermt2d %zmm0, %zmm2, %zmm6
8868 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm6, %zmm9
8869 ; AVX512F-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
8870 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm24, %zmm2 {%k1}
8871 ; AVX512F-FAST-NEXT: vpermt2d %zmm0, %zmm7, %zmm2
8872 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm2, %zmm10
8873 ; AVX512F-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
8874 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm2, %zmm29 {%k1}
8875 ; AVX512F-FAST-NEXT: vpermt2d %zmm0, %zmm18, %zmm29
8876 ; AVX512F-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
8877 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm4, %zmm6 {%k2}
8878 ; AVX512F-FAST-NEXT: vmovdqa64 (%r9), %zmm2
8879 ; AVX512F-FAST-NEXT: vpermt2d %zmm0, %zmm25, %zmm6
8880 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm6, %zmm12
8881 ; AVX512F-FAST-NEXT: vmovdqa64 {{.*#+}} zmm4 = [0,1,2,3,4,16,6,7,8,9,10,17,12,13,14,15]
8882 ; AVX512F-FAST-NEXT: vmovdqu64 (%rsp), %zmm18 # 64-byte Reload
8883 ; AVX512F-FAST-NEXT: vpermt2d %zmm2, %zmm4, %zmm18
8884 ; AVX512F-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
8885 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm8, %zmm6 {%k1}
8886 ; AVX512F-FAST-NEXT: vmovdqa64 {{.*#+}} zmm11 = [0,18,2,3,4,5,6,19,8,9,10,11,12,20,14,15]
8887 ; AVX512F-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
8888 ; AVX512F-FAST-NEXT: vpermt2d %zmm2, %zmm11, %zmm20
8889 ; AVX512F-FAST-NEXT: vpermt2d %zmm0, %zmm14, %zmm6
8890 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm6, %zmm14
8891 ; AVX512F-FAST-NEXT: vmovdqa64 {{.*#+}} zmm6 = [0,1,2,21,4,5,6,7,8,22,10,11,12,13,14,23]
8892 ; AVX512F-FAST-NEXT: vpermt2d %zmm2, %zmm6, %zmm23
8893 ; AVX512F-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
8894 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm7, %zmm1 {%k1}
8895 ; AVX512F-FAST-NEXT: vmovdqa64 {{.*#+}} zmm7 = [0,1,2,3,4,24,6,7,8,9,10,25,12,13,14,15]
8896 ; AVX512F-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Reload
8897 ; AVX512F-FAST-NEXT: vpermt2d %zmm2, %zmm7, %zmm24
8898 ; AVX512F-FAST-NEXT: vpermt2d %zmm0, %zmm31, %zmm1
8899 ; AVX512F-FAST-NEXT: vmovdqa64 {{.*#+}} zmm0 = [0,26,2,3,4,5,6,27,8,9,10,11,12,28,14,15]
8900 ; AVX512F-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload
8901 ; AVX512F-FAST-NEXT: vpermt2d %zmm2, %zmm0, %zmm25
8902 ; AVX512F-FAST-NEXT: vmovdqa64 {{.*#+}} zmm8 = [0,1,2,29,4,5,6,7,8,30,10,11,12,13,14,31]
8903 ; AVX512F-FAST-NEXT: vpermt2d %zmm2, %zmm8, %zmm5
8904 ; AVX512F-FAST-NEXT: vmovdqa64 64(%r9), %zmm2
8905 ; AVX512F-FAST-NEXT: vpermt2d %zmm2, %zmm4, %zmm27
8906 ; AVX512F-FAST-NEXT: vpermt2d %zmm2, %zmm11, %zmm22
8907 ; AVX512F-FAST-NEXT: vpermt2d %zmm2, %zmm6, %zmm30
8908 ; AVX512F-FAST-NEXT: vpermt2d %zmm2, %zmm7, %zmm19
8909 ; AVX512F-FAST-NEXT: vpermt2d %zmm2, %zmm0, %zmm17
8910 ; AVX512F-FAST-NEXT: vpermt2d %zmm2, %zmm8, %zmm21
8911 ; AVX512F-FAST-NEXT: vmovdqa64 128(%r9), %zmm2
8912 ; AVX512F-FAST-NEXT: vpermt2d %zmm2, %zmm4, %zmm16
8913 ; AVX512F-FAST-NEXT: vpermt2d %zmm2, %zmm11, %zmm15
8914 ; AVX512F-FAST-NEXT: vpermt2d %zmm2, %zmm6, %zmm3
8915 ; AVX512F-FAST-NEXT: vpermt2d %zmm2, %zmm7, %zmm26
8916 ; AVX512F-FAST-NEXT: vpermt2d %zmm2, %zmm0, %zmm28
8917 ; AVX512F-FAST-NEXT: vpermt2d %zmm2, %zmm8, %zmm13
8918 ; AVX512F-FAST-NEXT: vmovdqa64 192(%r9), %zmm2
8919 ; AVX512F-FAST-NEXT: vpermt2d %zmm2, %zmm4, %zmm9
8920 ; AVX512F-FAST-NEXT: vpermt2d %zmm2, %zmm11, %zmm10
8921 ; AVX512F-FAST-NEXT: vpermt2d %zmm2, %zmm6, %zmm29
8922 ; AVX512F-FAST-NEXT: vpermt2d %zmm2, %zmm7, %zmm12
8923 ; AVX512F-FAST-NEXT: vpermt2d %zmm2, %zmm0, %zmm14
8924 ; AVX512F-FAST-NEXT: vpermt2d %zmm2, %zmm8, %zmm1
8925 ; AVX512F-FAST-NEXT: movq {{[0-9]+}}(%rsp), %rax
8926 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm1, 1472(%rax)
8927 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm14, 1408(%rax)
8928 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm12, 1344(%rax)
8929 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm29, 1280(%rax)
8930 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm10, 1216(%rax)
8931 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm9, 1152(%rax)
8932 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm13, 1088(%rax)
8933 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm28, 1024(%rax)
8934 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm26, 960(%rax)
8935 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm3, 896(%rax)
8936 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm15, 832(%rax)
8937 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm16, 768(%rax)
8938 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm21, 704(%rax)
8939 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm17, 640(%rax)
8940 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm19, 576(%rax)
8941 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm30, 512(%rax)
8942 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm22, 448(%rax)
8943 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm27, 384(%rax)
8944 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm5, 320(%rax)
8945 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm25, 256(%rax)
8946 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm24, 192(%rax)
8947 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm23, 128(%rax)
8948 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm20, 64(%rax)
8949 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm18, (%rax)
8950 ; AVX512F-FAST-NEXT: addq $1160, %rsp # imm = 0x488
8951 ; AVX512F-FAST-NEXT: vzeroupper
8952 ; AVX512F-FAST-NEXT: retq
8954 ; AVX512BW-SLOW-LABEL: store_i32_stride6_vf64:
8955 ; AVX512BW-SLOW: # %bb.0:
8956 ; AVX512BW-SLOW-NEXT: subq $456, %rsp # imm = 0x1C8
8957 ; AVX512BW-SLOW-NEXT: vmovdqa64 128(%rdi), %zmm9
8958 ; AVX512BW-SLOW-NEXT: vmovdqa64 64(%rdi), %zmm30
8959 ; AVX512BW-SLOW-NEXT: vmovdqa64 (%rdi), %zmm11
8960 ; AVX512BW-SLOW-NEXT: vmovdqa64 (%rsi), %zmm10
8961 ; AVX512BW-SLOW-NEXT: vmovdqa64 64(%rsi), %zmm8
8962 ; AVX512BW-SLOW-NEXT: vmovdqa64 128(%rsi), %zmm4
8963 ; AVX512BW-SLOW-NEXT: vmovdqa64 192(%rdx), %zmm15
8964 ; AVX512BW-SLOW-NEXT: vmovdqa64 128(%rdx), %zmm28
8965 ; AVX512BW-SLOW-NEXT: vmovdqa64 64(%rdx), %zmm17
8966 ; AVX512BW-SLOW-NEXT: vmovdqa64 (%rdx), %zmm24
8967 ; AVX512BW-SLOW-NEXT: vmovdqa64 (%rcx), %zmm22
8968 ; AVX512BW-SLOW-NEXT: vmovdqa64 64(%rcx), %zmm18
8969 ; AVX512BW-SLOW-NEXT: vmovdqa64 128(%rcx), %zmm16
8970 ; AVX512BW-SLOW-NEXT: vmovdqa64 192(%rcx), %zmm13
8971 ; AVX512BW-SLOW-NEXT: vbroadcasti64x4 {{.*#+}} zmm29 = [4,20,3,19,0,0,5,21,4,20,3,19,0,0,5,21]
8972 ; AVX512BW-SLOW-NEXT: # zmm29 = mem[0,1,2,3,0,1,2,3]
8973 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm11, %zmm0
8974 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm10, %zmm29, %zmm0
8975 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm0, %zmm3
8976 ; AVX512BW-SLOW-NEXT: vmovdqa (%rdx), %ymm14
8977 ; AVX512BW-SLOW-NEXT: vbroadcasti64x4 {{.*#+}} zmm0 = [1,17,0,16,0,0,2,18,1,17,0,16,0,0,2,18]
8978 ; AVX512BW-SLOW-NEXT: # zmm0 = mem[0,1,2,3,0,1,2,3]
8979 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm24, %zmm20
8980 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm22, %zmm0, %zmm20
8981 ; AVX512BW-SLOW-NEXT: vbroadcasti64x4 {{.*#+}} zmm1 = [9,25,8,24,0,0,10,26,9,25,8,24,0,0,10,26]
8982 ; AVX512BW-SLOW-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3]
8983 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm24, %zmm23
8984 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm22, %zmm1, %zmm23
8985 ; AVX512BW-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm2 = [3,19,0,16,11,27,8,24,15,31,12,28,15,31,12,28]
8986 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm24, %zmm5
8987 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm22, %zmm2, %zmm5
8988 ; AVX512BW-SLOW-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
8989 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm17, %zmm25
8990 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm18, %zmm0, %zmm25
8991 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm17, %zmm26
8992 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm18, %zmm1, %zmm26
8993 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm17, %zmm5
8994 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm18, %zmm2, %zmm5
8995 ; AVX512BW-SLOW-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
8996 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm28, %zmm5
8997 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm16, %zmm0, %zmm5
8998 ; AVX512BW-SLOW-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
8999 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm0, %zmm5
9000 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm28, %zmm27
9001 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm16, %zmm1, %zmm27
9002 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm1, %zmm6
9003 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm28, %zmm0
9004 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm16, %zmm2, %zmm0
9005 ; AVX512BW-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
9006 ; AVX512BW-SLOW-NEXT: vbroadcasti64x4 {{.*#+}} zmm21 = [5,21,0,0,7,23,6,22,5,21,0,0,7,23,6,22]
9007 ; AVX512BW-SLOW-NEXT: # zmm21 = mem[0,1,2,3,0,1,2,3]
9008 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm24, %zmm19
9009 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm22, %zmm21, %zmm19
9010 ; AVX512BW-SLOW-NEXT: vbroadcasti64x4 {{.*#+}} zmm0 = [13,29,0,0,15,31,14,30,13,29,0,0,15,31,14,30]
9011 ; AVX512BW-SLOW-NEXT: # zmm0 = mem[0,1,2,3,0,1,2,3]
9012 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm22, %zmm0, %zmm24
9013 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm17, %zmm22
9014 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm18, %zmm21, %zmm22
9015 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm18, %zmm0, %zmm17
9016 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm28, %zmm18
9017 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm16, %zmm21, %zmm18
9018 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm16, %zmm0, %zmm28
9019 ; AVX512BW-SLOW-NEXT: vmovdqa 64(%rdx), %ymm1
9020 ; AVX512BW-SLOW-NEXT: vpermi2d %zmm13, %zmm15, %zmm5
9021 ; AVX512BW-SLOW-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
9022 ; AVX512BW-SLOW-NEXT: vpermi2d %zmm13, %zmm15, %zmm6
9023 ; AVX512BW-SLOW-NEXT: vmovdqu64 %zmm6, (%rsp) # 64-byte Spill
9024 ; AVX512BW-SLOW-NEXT: vpermi2d %zmm13, %zmm15, %zmm2
9025 ; AVX512BW-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
9026 ; AVX512BW-SLOW-NEXT: vpermi2d %zmm13, %zmm15, %zmm21
9027 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm13, %zmm0, %zmm15
9028 ; AVX512BW-SLOW-NEXT: vmovdqa 128(%rdx), %ymm0
9029 ; AVX512BW-SLOW-NEXT: vmovdqa {{.*#+}} ymm2 = [3,11,0,8,7,15,4,12]
9030 ; AVX512BW-SLOW-NEXT: vpermt2d (%rcx), %ymm2, %ymm14
9031 ; AVX512BW-SLOW-NEXT: movb $36, %al
9032 ; AVX512BW-SLOW-NEXT: kmovd %eax, %k1
9033 ; AVX512BW-SLOW-NEXT: vshufi64x2 {{.*#+}} zmm3 {%k1} = zmm14[0,1,0,1,2,3,6,7]
9034 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm30, %zmm13
9035 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm8, %zmm29, %zmm13
9036 ; AVX512BW-SLOW-NEXT: vpermt2d 64(%rcx), %ymm2, %ymm1
9037 ; AVX512BW-SLOW-NEXT: vshufi64x2 {{.*#+}} zmm13 {%k1} = zmm1[0,1,0,1,2,3,6,7]
9038 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm9, %zmm14
9039 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm4, %zmm1
9040 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm4, %zmm29, %zmm14
9041 ; AVX512BW-SLOW-NEXT: vpermt2d 128(%rcx), %ymm2, %ymm0
9042 ; AVX512BW-SLOW-NEXT: vshufi64x2 {{.*#+}} zmm14 {%k1} = zmm0[0,1,0,1,2,3,6,7]
9043 ; AVX512BW-SLOW-NEXT: vmovdqa 192(%rdx), %ymm0
9044 ; AVX512BW-SLOW-NEXT: vpermt2d 192(%rcx), %ymm2, %ymm0
9045 ; AVX512BW-SLOW-NEXT: vmovdqa64 192(%rdi), %zmm12
9046 ; AVX512BW-SLOW-NEXT: vmovdqa64 192(%rsi), %zmm7
9047 ; AVX512BW-SLOW-NEXT: vpermi2d %zmm7, %zmm12, %zmm29
9048 ; AVX512BW-SLOW-NEXT: vshufi64x2 {{.*#+}} zmm29 {%k1} = zmm0[0,1,0,1,2,3,6,7]
9049 ; AVX512BW-SLOW-NEXT: vmovdqa64 (%r8), %zmm4
9050 ; AVX512BW-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm0 = <18,u,2,3,4,5,19,u,8,9,10,11,20,u,14,15>
9051 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm4, %zmm0, %zmm3
9052 ; AVX512BW-SLOW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
9053 ; AVX512BW-SLOW-NEXT: vmovdqa64 64(%r8), %zmm3
9054 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm3, %zmm0, %zmm13
9055 ; AVX512BW-SLOW-NEXT: vmovdqa64 128(%r8), %zmm2
9056 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm2, %zmm0, %zmm14
9057 ; AVX512BW-SLOW-NEXT: vmovdqa64 192(%r8), %zmm6
9058 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm6, %zmm0, %zmm29
9059 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm11, %zmm16
9060 ; AVX512BW-SLOW-NEXT: vbroadcasti64x4 {{.*#+}} zmm31 = [0,16,0,0,2,18,1,17,0,16,0,0,2,18,1,17]
9061 ; AVX512BW-SLOW-NEXT: # zmm31 = mem[0,1,2,3,0,1,2,3]
9062 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm10, %zmm31, %zmm16
9063 ; AVX512BW-SLOW-NEXT: movb $-110, %al
9064 ; AVX512BW-SLOW-NEXT: kmovd %eax, %k2
9065 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm20, %zmm16 {%k2}
9066 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm11, %zmm20
9067 ; AVX512BW-SLOW-NEXT: vbroadcasti64x4 {{.*#+}} zmm5 = [8,24,0,0,10,26,9,25,8,24,0,0,10,26,9,25]
9068 ; AVX512BW-SLOW-NEXT: # zmm5 = mem[0,1,2,3,0,1,2,3]
9069 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm10, %zmm5, %zmm20
9070 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm23, %zmm20 {%k2}
9071 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm11, %zmm23
9072 ; AVX512BW-SLOW-NEXT: vpunpckhdq {{.*#+}} zmm0 = zmm11[2],zmm10[2],zmm11[3],zmm10[3],zmm11[6],zmm10[6],zmm11[7],zmm10[7],zmm11[10],zmm10[10],zmm11[11],zmm10[11],zmm11[14],zmm10[14],zmm11[15],zmm10[15]
9073 ; AVX512BW-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
9074 ; AVX512BW-SLOW-NEXT: vbroadcasti64x4 {{.*#+}} zmm11 = [12,28,11,27,0,0,13,29,12,28,11,27,0,0,13,29]
9075 ; AVX512BW-SLOW-NEXT: # zmm11 = mem[0,1,2,3,0,1,2,3]
9076 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm10, %zmm11, %zmm23
9077 ; AVX512BW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
9078 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm0, %zmm23 {%k1}
9079 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm30, %zmm10
9080 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm8, %zmm31, %zmm10
9081 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm25, %zmm10 {%k2}
9082 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm30, %zmm25
9083 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm8, %zmm5, %zmm25
9084 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm26, %zmm25 {%k2}
9085 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm30, %zmm26
9086 ; AVX512BW-SLOW-NEXT: vpunpckhdq {{.*#+}} zmm30 = zmm30[2],zmm8[2],zmm30[3],zmm8[3],zmm30[6],zmm8[6],zmm30[7],zmm8[7],zmm30[10],zmm8[10],zmm30[11],zmm8[11],zmm30[14],zmm8[14],zmm30[15],zmm8[15]
9087 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm8, %zmm11, %zmm26
9088 ; AVX512BW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
9089 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm0, %zmm26 {%k1}
9090 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm9, %zmm0
9091 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm9, %zmm8
9092 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm1, %zmm31, %zmm8
9093 ; AVX512BW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
9094 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm9, %zmm8 {%k2}
9095 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm0, %zmm9
9096 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm1, %zmm5, %zmm9
9097 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm27, %zmm9 {%k2}
9098 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm0, %zmm27
9099 ; AVX512BW-SLOW-NEXT: vpunpckhdq {{.*#+}} zmm0 = zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[14],zmm1[14],zmm0[15],zmm1[15]
9100 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm1, %zmm11, %zmm27
9101 ; AVX512BW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
9102 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm1, %zmm27 {%k1}
9103 ; AVX512BW-SLOW-NEXT: vpermi2d %zmm7, %zmm12, %zmm31
9104 ; AVX512BW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
9105 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm1, %zmm31 {%k2}
9106 ; AVX512BW-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm1 = <0,1,2,3,16,u,6,7,8,9,17,u,12,13,14,15>
9107 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm4, %zmm1, %zmm16
9108 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm3, %zmm1, %zmm10
9109 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm2, %zmm1, %zmm8
9110 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm6, %zmm1, %zmm31
9111 ; AVX512BW-SLOW-NEXT: vpermi2d %zmm7, %zmm12, %zmm5
9112 ; AVX512BW-SLOW-NEXT: vmovdqu64 (%rsp), %zmm1 # 64-byte Reload
9113 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm1, %zmm5 {%k2}
9114 ; AVX512BW-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm1 = <0,1,2,3,24,u,6,7,8,9,25,u,12,13,14,15>
9115 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm4, %zmm1, %zmm20
9116 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm3, %zmm1, %zmm25
9117 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm2, %zmm1, %zmm9
9118 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm6, %zmm1, %zmm5
9119 ; AVX512BW-SLOW-NEXT: vpermi2d %zmm7, %zmm12, %zmm11
9120 ; AVX512BW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
9121 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm1, %zmm11 {%k1}
9122 ; AVX512BW-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm1 = <26,u,2,3,4,5,27,u,8,9,10,11,28,u,14,15>
9123 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm4, %zmm1, %zmm23
9124 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm3, %zmm1, %zmm26
9125 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm2, %zmm1, %zmm27
9126 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm6, %zmm1, %zmm11
9127 ; AVX512BW-SLOW-NEXT: vmovdqa (%rdi), %ymm1
9128 ; AVX512BW-SLOW-NEXT: vpunpckhdq {{.*#+}} ymm1 = ymm1[2],mem[2],ymm1[3],mem[3],ymm1[6],mem[6],ymm1[7],mem[7]
9129 ; AVX512BW-SLOW-NEXT: vshufi64x2 {{.*#+}} zmm19 {%k1} = zmm1[2,3,2,3,2,3,2,3]
9130 ; AVX512BW-SLOW-NEXT: vmovdqa 64(%rdi), %ymm1
9131 ; AVX512BW-SLOW-NEXT: vpunpckhdq {{.*#+}} ymm1 = ymm1[2],mem[2],ymm1[3],mem[3],ymm1[6],mem[6],ymm1[7],mem[7]
9132 ; AVX512BW-SLOW-NEXT: vshufi64x2 {{.*#+}} zmm22 {%k1} = zmm1[2,3,2,3,2,3,2,3]
9133 ; AVX512BW-SLOW-NEXT: vmovdqa 128(%rdi), %ymm1
9134 ; AVX512BW-SLOW-NEXT: vpunpckhdq {{.*#+}} ymm1 = ymm1[2],mem[2],ymm1[3],mem[3],ymm1[6],mem[6],ymm1[7],mem[7]
9135 ; AVX512BW-SLOW-NEXT: vshufi64x2 {{.*#+}} zmm18 {%k1} = zmm1[2,3,2,3,2,3,2,3]
9136 ; AVX512BW-SLOW-NEXT: vmovdqa 192(%rdi), %ymm1
9137 ; AVX512BW-SLOW-NEXT: vpunpckhdq {{.*#+}} ymm1 = ymm1[2],mem[2],ymm1[3],mem[3],ymm1[6],mem[6],ymm1[7],mem[7]
9138 ; AVX512BW-SLOW-NEXT: vshufi64x2 {{.*#+}} zmm21 {%k1} = zmm1[2,3,2,3,2,3,2,3]
9139 ; AVX512BW-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm1 = <0,1,21,u,4,5,6,7,22,u,10,11,12,13,23,u>
9140 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm4, %zmm1, %zmm19
9141 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm3, %zmm1, %zmm22
9142 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm2, %zmm1, %zmm18
9143 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm6, %zmm1, %zmm21
9144 ; AVX512BW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
9145 ; AVX512BW-SLOW-NEXT: vshufi64x2 {{.*#+}} zmm24 {%k1} = zmm1[6,7,6,7,6,7,6,7]
9146 ; AVX512BW-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm1 = <0,1,29,u,4,5,6,7,30,u,10,11,12,13,31,u>
9147 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm4, %zmm1, %zmm24
9148 ; AVX512BW-SLOW-NEXT: vshufi64x2 {{.*#+}} zmm17 {%k1} = zmm30[6,7,6,7,6,7,6,7]
9149 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm3, %zmm1, %zmm17
9150 ; AVX512BW-SLOW-NEXT: vshufi64x2 {{.*#+}} zmm28 {%k1} = zmm0[6,7,6,7,6,7,6,7]
9151 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm2, %zmm1, %zmm28
9152 ; AVX512BW-SLOW-NEXT: vmovdqa64 (%r9), %zmm2
9153 ; AVX512BW-SLOW-NEXT: vpunpckhdq {{.*#+}} zmm3 = zmm12[2],zmm7[2],zmm12[3],zmm7[3],zmm12[6],zmm7[6],zmm12[7],zmm7[7],zmm12[10],zmm7[10],zmm12[11],zmm7[11],zmm12[14],zmm7[14],zmm12[15],zmm7[15]
9154 ; AVX512BW-SLOW-NEXT: vmovdqa64 64(%r9), %zmm4
9155 ; AVX512BW-SLOW-NEXT: vshufi64x2 {{.*#+}} zmm15 {%k1} = zmm3[6,7,6,7,6,7,6,7]
9156 ; AVX512BW-SLOW-NEXT: vmovdqa64 128(%r9), %zmm3
9157 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm6, %zmm1, %zmm15
9158 ; AVX512BW-SLOW-NEXT: vmovdqa64 192(%r9), %zmm1
9159 ; AVX512BW-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm0 = [0,18,2,3,4,5,6,19,8,9,10,11,12,20,14,15]
9160 ; AVX512BW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
9161 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm2, %zmm0, %zmm6
9162 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm4, %zmm0, %zmm13
9163 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm3, %zmm0, %zmm14
9164 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm1, %zmm0, %zmm29
9165 ; AVX512BW-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm0 = [0,1,2,3,4,16,6,7,8,9,10,17,12,13,14,15]
9166 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm2, %zmm0, %zmm16
9167 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm4, %zmm0, %zmm10
9168 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm3, %zmm0, %zmm8
9169 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm1, %zmm0, %zmm31
9170 ; AVX512BW-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm0 = [0,1,2,3,4,24,6,7,8,9,10,25,12,13,14,15]
9171 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm2, %zmm0, %zmm20
9172 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm4, %zmm0, %zmm25
9173 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm3, %zmm0, %zmm9
9174 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm1, %zmm0, %zmm5
9175 ; AVX512BW-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm0 = [0,26,2,3,4,5,6,27,8,9,10,11,12,28,14,15]
9176 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm2, %zmm0, %zmm23
9177 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm4, %zmm0, %zmm26
9178 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm3, %zmm0, %zmm27
9179 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm1, %zmm0, %zmm11
9180 ; AVX512BW-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm0 = [0,1,2,21,4,5,6,7,8,22,10,11,12,13,14,23]
9181 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm2, %zmm0, %zmm19
9182 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm4, %zmm0, %zmm22
9183 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm3, %zmm0, %zmm18
9184 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm1, %zmm0, %zmm21
9185 ; AVX512BW-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm0 = [0,1,2,29,4,5,6,7,8,30,10,11,12,13,14,31]
9186 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm2, %zmm0, %zmm24
9187 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm4, %zmm0, %zmm17
9188 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm3, %zmm0, %zmm28
9189 ; AVX512BW-SLOW-NEXT: vpermt2d %zmm1, %zmm0, %zmm15
9190 ; AVX512BW-SLOW-NEXT: movq {{[0-9]+}}(%rsp), %rax
9191 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm15, 1472(%rax)
9192 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm11, 1408(%rax)
9193 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm5, 1344(%rax)
9194 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm31, 1152(%rax)
9195 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm28, 1088(%rax)
9196 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm27, 1024(%rax)
9197 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm9, 960(%rax)
9198 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm8, 768(%rax)
9199 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm17, 704(%rax)
9200 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm26, 640(%rax)
9201 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm25, 576(%rax)
9202 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm10, 384(%rax)
9203 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm24, 320(%rax)
9204 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm23, 256(%rax)
9205 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm20, 192(%rax)
9206 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm16, (%rax)
9207 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm21, 1280(%rax)
9208 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm29, 1216(%rax)
9209 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm18, 896(%rax)
9210 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm14, 832(%rax)
9211 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm22, 512(%rax)
9212 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm13, 448(%rax)
9213 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm19, 128(%rax)
9214 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm6, 64(%rax)
9215 ; AVX512BW-SLOW-NEXT: addq $456, %rsp # imm = 0x1C8
9216 ; AVX512BW-SLOW-NEXT: vzeroupper
9217 ; AVX512BW-SLOW-NEXT: retq
9219 ; AVX512BW-FAST-LABEL: store_i32_stride6_vf64:
9220 ; AVX512BW-FAST: # %bb.0:
9221 ; AVX512BW-FAST-NEXT: subq $1160, %rsp # imm = 0x488
9222 ; AVX512BW-FAST-NEXT: vmovdqa64 (%rdi), %zmm8
9223 ; AVX512BW-FAST-NEXT: vmovdqa64 64(%rdi), %zmm5
9224 ; AVX512BW-FAST-NEXT: vmovdqa64 128(%rdi), %zmm3
9225 ; AVX512BW-FAST-NEXT: vmovdqa64 192(%rdi), %zmm2
9226 ; AVX512BW-FAST-NEXT: vmovdqa64 (%rsi), %zmm0
9227 ; AVX512BW-FAST-NEXT: vmovdqa64 64(%rsi), %zmm24
9228 ; AVX512BW-FAST-NEXT: vmovdqa64 128(%rsi), %zmm29
9229 ; AVX512BW-FAST-NEXT: vmovdqa64 192(%rsi), %zmm23
9230 ; AVX512BW-FAST-NEXT: vmovdqa64 (%rdx), %zmm4
9231 ; AVX512BW-FAST-NEXT: vmovdqa64 (%rcx), %zmm21
9232 ; AVX512BW-FAST-NEXT: vbroadcasti64x4 {{.*#+}} zmm20 = [1,17,0,16,0,0,2,18,1,17,0,16,0,0,2,18]
9233 ; AVX512BW-FAST-NEXT: # zmm20 = mem[0,1,2,3,0,1,2,3]
9234 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm4, %zmm6
9235 ; AVX512BW-FAST-NEXT: vpermt2d %zmm21, %zmm20, %zmm6
9236 ; AVX512BW-FAST-NEXT: vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
9237 ; AVX512BW-FAST-NEXT: vbroadcasti64x4 {{.*#+}} zmm7 = [0,16,0,0,2,18,1,17,0,16,0,0,2,18,1,17]
9238 ; AVX512BW-FAST-NEXT: # zmm7 = mem[0,1,2,3,0,1,2,3]
9239 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm8, %zmm1
9240 ; AVX512BW-FAST-NEXT: vpermt2d %zmm0, %zmm7, %zmm1
9241 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm1, %zmm16
9242 ; AVX512BW-FAST-NEXT: vmovdqa64 {{.*#+}} zmm9 = <3,19,0,16,3,19,0,16,7,23,4,20,u,u,u,u>
9243 ; AVX512BW-FAST-NEXT: vbroadcasti64x4 {{.*#+}} zmm10 = [4,20,3,19,0,0,5,21,4,20,3,19,0,0,5,21]
9244 ; AVX512BW-FAST-NEXT: # zmm10 = mem[0,1,2,3,0,1,2,3]
9245 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm8, %zmm1
9246 ; AVX512BW-FAST-NEXT: vpermt2d %zmm0, %zmm10, %zmm1
9247 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm1, %zmm17
9248 ; AVX512BW-FAST-NEXT: vbroadcasti32x4 {{.*#+}} zmm1 = [6,22,7,23,6,22,7,23,6,22,7,23,6,22,7,23]
9249 ; AVX512BW-FAST-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
9250 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm8, %zmm31
9251 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm8, %zmm25
9252 ; AVX512BW-FAST-NEXT: vpermt2d %zmm0, %zmm1, %zmm31
9253 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm1, %zmm8
9254 ; AVX512BW-FAST-NEXT: vbroadcasti64x4 {{.*#+}} zmm11 = [8,24,0,0,10,26,9,25,8,24,0,0,10,26,9,25]
9255 ; AVX512BW-FAST-NEXT: # zmm11 = mem[0,1,2,3,0,1,2,3]
9256 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm25, %zmm1
9257 ; AVX512BW-FAST-NEXT: vpermt2d %zmm0, %zmm11, %zmm1
9258 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm1, %zmm27
9259 ; AVX512BW-FAST-NEXT: vbroadcasti64x4 {{.*#+}} zmm12 = [12,28,11,27,0,0,13,29,12,28,11,27,0,0,13,29]
9260 ; AVX512BW-FAST-NEXT: # zmm12 = mem[0,1,2,3,0,1,2,3]
9261 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm25, %zmm1
9262 ; AVX512BW-FAST-NEXT: vpermt2d %zmm0, %zmm12, %zmm1
9263 ; AVX512BW-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
9264 ; AVX512BW-FAST-NEXT: vbroadcasti32x4 {{.*#+}} zmm30 = [14,30,15,31,14,30,15,31,14,30,15,31,14,30,15,31]
9265 ; AVX512BW-FAST-NEXT: # zmm30 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
9266 ; AVX512BW-FAST-NEXT: vpermt2d %zmm0, %zmm30, %zmm25
9267 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm5, %zmm0
9268 ; AVX512BW-FAST-NEXT: vpermt2d %zmm24, %zmm7, %zmm0
9269 ; AVX512BW-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
9270 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm5, %zmm0
9271 ; AVX512BW-FAST-NEXT: vpermt2d %zmm24, %zmm10, %zmm0
9272 ; AVX512BW-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
9273 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm5, %zmm0
9274 ; AVX512BW-FAST-NEXT: vpermt2d %zmm24, %zmm8, %zmm0
9275 ; AVX512BW-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
9276 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm5, %zmm0
9277 ; AVX512BW-FAST-NEXT: vpermt2d %zmm24, %zmm11, %zmm0
9278 ; AVX512BW-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
9279 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm5, %zmm0
9280 ; AVX512BW-FAST-NEXT: vpermt2d %zmm24, %zmm12, %zmm0
9281 ; AVX512BW-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
9282 ; AVX512BW-FAST-NEXT: vpermt2d %zmm24, %zmm30, %zmm5
9283 ; AVX512BW-FAST-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
9284 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm3, %zmm0
9285 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm3, %zmm1
9286 ; AVX512BW-FAST-NEXT: vpermt2d %zmm29, %zmm7, %zmm1
9287 ; AVX512BW-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
9288 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm3, %zmm1
9289 ; AVX512BW-FAST-NEXT: vpermt2d %zmm29, %zmm10, %zmm1
9290 ; AVX512BW-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
9291 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm3, %zmm1
9292 ; AVX512BW-FAST-NEXT: vpermt2d %zmm29, %zmm8, %zmm1
9293 ; AVX512BW-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
9294 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm3, %zmm26
9295 ; AVX512BW-FAST-NEXT: vpermt2d %zmm29, %zmm11, %zmm26
9296 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm3, %zmm28
9297 ; AVX512BW-FAST-NEXT: vpermt2d %zmm29, %zmm12, %zmm28
9298 ; AVX512BW-FAST-NEXT: vpermt2d %zmm29, %zmm30, %zmm0
9299 ; AVX512BW-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
9300 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm4, %zmm19
9301 ; AVX512BW-FAST-NEXT: vpermt2d %zmm21, %zmm9, %zmm19
9302 ; AVX512BW-FAST-NEXT: vpermi2d %zmm23, %zmm2, %zmm7
9303 ; AVX512BW-FAST-NEXT: vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
9304 ; AVX512BW-FAST-NEXT: vpermi2d %zmm23, %zmm2, %zmm10
9305 ; AVX512BW-FAST-NEXT: vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
9306 ; AVX512BW-FAST-NEXT: vpermi2d %zmm23, %zmm2, %zmm8
9307 ; AVX512BW-FAST-NEXT: vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
9308 ; AVX512BW-FAST-NEXT: vpermi2d %zmm23, %zmm2, %zmm11
9309 ; AVX512BW-FAST-NEXT: vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
9310 ; AVX512BW-FAST-NEXT: vpermi2d %zmm23, %zmm2, %zmm12
9311 ; AVX512BW-FAST-NEXT: vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
9312 ; AVX512BW-FAST-NEXT: vpermt2d %zmm23, %zmm30, %zmm2
9313 ; AVX512BW-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
9314 ; AVX512BW-FAST-NEXT: vbroadcasti64x4 {{.*#+}} zmm29 = [5,21,0,0,7,23,6,22,5,21,0,0,7,23,6,22]
9315 ; AVX512BW-FAST-NEXT: # zmm29 = mem[0,1,2,3,0,1,2,3]
9316 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm4, %zmm5
9317 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm4, %zmm23
9318 ; AVX512BW-FAST-NEXT: vpermt2d %zmm21, %zmm29, %zmm23
9319 ; AVX512BW-FAST-NEXT: vbroadcasti64x4 {{.*#+}} zmm4 = [9,25,8,24,0,0,10,26,9,25,8,24,0,0,10,26]
9320 ; AVX512BW-FAST-NEXT: # zmm4 = mem[0,1,2,3,0,1,2,3]
9321 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm5, %zmm18
9322 ; AVX512BW-FAST-NEXT: vpermt2d %zmm21, %zmm4, %zmm18
9323 ; AVX512BW-FAST-NEXT: vmovdqa64 {{.*#+}} zmm8 = [3,19,0,16,11,27,8,24,15,31,12,28,15,31,12,28]
9324 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm5, %zmm7
9325 ; AVX512BW-FAST-NEXT: vpermt2d %zmm21, %zmm8, %zmm7
9326 ; AVX512BW-FAST-NEXT: vbroadcasti64x4 {{.*#+}} zmm2 = [13,29,0,0,15,31,14,30,13,29,0,0,15,31,14,30]
9327 ; AVX512BW-FAST-NEXT: # zmm2 = mem[0,1,2,3,0,1,2,3]
9328 ; AVX512BW-FAST-NEXT: vpermt2d %zmm21, %zmm2, %zmm5
9329 ; AVX512BW-FAST-NEXT: vmovdqa64 64(%rdx), %zmm21
9330 ; AVX512BW-FAST-NEXT: vmovdqa64 64(%rcx), %zmm0
9331 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm21, %zmm22
9332 ; AVX512BW-FAST-NEXT: vpermt2d %zmm0, %zmm20, %zmm22
9333 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm21, %zmm14
9334 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm9, %zmm24
9335 ; AVX512BW-FAST-NEXT: vpermt2d %zmm0, %zmm9, %zmm14
9336 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm21, %zmm30
9337 ; AVX512BW-FAST-NEXT: vpermt2d %zmm0, %zmm29, %zmm30
9338 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm21, %zmm10
9339 ; AVX512BW-FAST-NEXT: vpermt2d %zmm0, %zmm4, %zmm10
9340 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm21, %zmm6
9341 ; AVX512BW-FAST-NEXT: vpermt2d %zmm0, %zmm8, %zmm6
9342 ; AVX512BW-FAST-NEXT: vpermt2d %zmm0, %zmm2, %zmm21
9343 ; AVX512BW-FAST-NEXT: vmovdqa64 128(%rdx), %zmm13
9344 ; AVX512BW-FAST-NEXT: vmovdqa64 128(%rcx), %zmm0
9345 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm13, %zmm15
9346 ; AVX512BW-FAST-NEXT: vpermt2d %zmm0, %zmm20, %zmm15
9347 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm13, %zmm12
9348 ; AVX512BW-FAST-NEXT: vpermt2d %zmm0, %zmm9, %zmm12
9349 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm13, %zmm3
9350 ; AVX512BW-FAST-NEXT: vpermt2d %zmm0, %zmm29, %zmm3
9351 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm13, %zmm11
9352 ; AVX512BW-FAST-NEXT: vpermt2d %zmm0, %zmm4, %zmm11
9353 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm13, %zmm9
9354 ; AVX512BW-FAST-NEXT: vpermt2d %zmm0, %zmm8, %zmm9
9355 ; AVX512BW-FAST-NEXT: vpermt2d %zmm0, %zmm2, %zmm13
9356 ; AVX512BW-FAST-NEXT: vmovdqa64 192(%rdx), %zmm1
9357 ; AVX512BW-FAST-NEXT: vmovdqa64 192(%rcx), %zmm0
9358 ; AVX512BW-FAST-NEXT: vpermi2d %zmm0, %zmm1, %zmm20
9359 ; AVX512BW-FAST-NEXT: vpermi2d %zmm0, %zmm1, %zmm24
9360 ; AVX512BW-FAST-NEXT: vpermi2d %zmm0, %zmm1, %zmm29
9361 ; AVX512BW-FAST-NEXT: vpermi2d %zmm0, %zmm1, %zmm4
9362 ; AVX512BW-FAST-NEXT: vpermi2d %zmm0, %zmm1, %zmm8
9363 ; AVX512BW-FAST-NEXT: vpermt2d %zmm0, %zmm2, %zmm1
9364 ; AVX512BW-FAST-NEXT: movb $-110, %al
9365 ; AVX512BW-FAST-NEXT: kmovd %eax, %k2
9366 ; AVX512BW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
9367 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm0, %zmm16 {%k2}
9368 ; AVX512BW-FAST-NEXT: movb $36, %al
9369 ; AVX512BW-FAST-NEXT: kmovd %eax, %k1
9370 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm19, %zmm17 {%k1}
9371 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm31, %zmm23 {%k1}
9372 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm18, %zmm27 {%k2}
9373 ; AVX512BW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
9374 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm7, %zmm19 {%k1}
9375 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm25, %zmm5 {%k1}
9376 ; AVX512BW-FAST-NEXT: vmovdqa64 (%r8), %zmm0
9377 ; AVX512BW-FAST-NEXT: vmovdqa64 {{.*#+}} zmm2 = <0,1,2,3,16,u,6,7,8,9,17,u,12,13,14,15>
9378 ; AVX512BW-FAST-NEXT: vpermt2d %zmm0, %zmm2, %zmm16
9379 ; AVX512BW-FAST-NEXT: vmovdqu64 %zmm16, (%rsp) # 64-byte Spill
9380 ; AVX512BW-FAST-NEXT: vmovdqa64 {{.*#+}} zmm7 = <18,u,2,3,4,5,19,u,8,9,10,11,20,u,14,15>
9381 ; AVX512BW-FAST-NEXT: vpermt2d %zmm0, %zmm7, %zmm17
9382 ; AVX512BW-FAST-NEXT: vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
9383 ; AVX512BW-FAST-NEXT: vmovdqa64 {{.*#+}} zmm18 = <0,1,21,u,4,5,6,7,22,u,10,11,12,13,23,u>
9384 ; AVX512BW-FAST-NEXT: vpermt2d %zmm0, %zmm18, %zmm23
9385 ; AVX512BW-FAST-NEXT: vmovdqa64 {{.*#+}} zmm25 = <0,1,2,3,24,u,6,7,8,9,25,u,12,13,14,15>
9386 ; AVX512BW-FAST-NEXT: vpermt2d %zmm0, %zmm25, %zmm27
9387 ; AVX512BW-FAST-NEXT: vmovdqu64 %zmm27, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
9388 ; AVX512BW-FAST-NEXT: vmovdqa64 {{.*#+}} zmm16 = <26,u,2,3,4,5,27,u,8,9,10,11,28,u,14,15>
9389 ; AVX512BW-FAST-NEXT: vpermt2d %zmm0, %zmm16, %zmm19
9390 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm16, %zmm17
9391 ; AVX512BW-FAST-NEXT: vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
9392 ; AVX512BW-FAST-NEXT: vmovdqa64 {{.*#+}} zmm31 = <0,1,29,u,4,5,6,7,30,u,10,11,12,13,31,u>
9393 ; AVX512BW-FAST-NEXT: vpermt2d %zmm0, %zmm31, %zmm5
9394 ; AVX512BW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload
9395 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm22, %zmm27 {%k2}
9396 ; AVX512BW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload
9397 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm14, %zmm22 {%k1}
9398 ; AVX512BW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
9399 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm0, %zmm30 {%k1}
9400 ; AVX512BW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
9401 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm10, %zmm16 {%k2}
9402 ; AVX512BW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
9403 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm6, %zmm10 {%k1}
9404 ; AVX512BW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
9405 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm0, %zmm21 {%k1}
9406 ; AVX512BW-FAST-NEXT: vmovdqa64 64(%r8), %zmm0
9407 ; AVX512BW-FAST-NEXT: vpermt2d %zmm0, %zmm2, %zmm27
9408 ; AVX512BW-FAST-NEXT: vpermt2d %zmm0, %zmm7, %zmm22
9409 ; AVX512BW-FAST-NEXT: vpermt2d %zmm0, %zmm18, %zmm30
9410 ; AVX512BW-FAST-NEXT: vpermt2d %zmm0, %zmm25, %zmm16
9411 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm16, %zmm19
9412 ; AVX512BW-FAST-NEXT: vpermt2d %zmm0, %zmm17, %zmm10
9413 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm17, %zmm14
9414 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm10, %zmm17
9415 ; AVX512BW-FAST-NEXT: vpermt2d %zmm0, %zmm31, %zmm21
9416 ; AVX512BW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
9417 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm15, %zmm16 {%k2}
9418 ; AVX512BW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
9419 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm12, %zmm15 {%k1}
9420 ; AVX512BW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
9421 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm0, %zmm3 {%k1}
9422 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm11, %zmm26 {%k2}
9423 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm9, %zmm28 {%k1}
9424 ; AVX512BW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
9425 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm0, %zmm13 {%k1}
9426 ; AVX512BW-FAST-NEXT: vmovdqa64 128(%r8), %zmm0
9427 ; AVX512BW-FAST-NEXT: vpermt2d %zmm0, %zmm2, %zmm16
9428 ; AVX512BW-FAST-NEXT: vpermt2d %zmm0, %zmm7, %zmm15
9429 ; AVX512BW-FAST-NEXT: vpermt2d %zmm0, %zmm18, %zmm3
9430 ; AVX512BW-FAST-NEXT: vpermt2d %zmm0, %zmm25, %zmm26
9431 ; AVX512BW-FAST-NEXT: vpermt2d %zmm0, %zmm14, %zmm28
9432 ; AVX512BW-FAST-NEXT: vpermt2d %zmm0, %zmm31, %zmm13
9433 ; AVX512BW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
9434 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm20, %zmm6 {%k2}
9435 ; AVX512BW-FAST-NEXT: vmovdqa64 192(%r8), %zmm0
9436 ; AVX512BW-FAST-NEXT: vpermt2d %zmm0, %zmm2, %zmm6
9437 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm6, %zmm9
9438 ; AVX512BW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
9439 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm24, %zmm2 {%k1}
9440 ; AVX512BW-FAST-NEXT: vpermt2d %zmm0, %zmm7, %zmm2
9441 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm2, %zmm10
9442 ; AVX512BW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
9443 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm2, %zmm29 {%k1}
9444 ; AVX512BW-FAST-NEXT: vpermt2d %zmm0, %zmm18, %zmm29
9445 ; AVX512BW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
9446 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm4, %zmm6 {%k2}
9447 ; AVX512BW-FAST-NEXT: vmovdqa64 (%r9), %zmm2
9448 ; AVX512BW-FAST-NEXT: vpermt2d %zmm0, %zmm25, %zmm6
9449 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm6, %zmm12
9450 ; AVX512BW-FAST-NEXT: vmovdqa64 {{.*#+}} zmm4 = [0,1,2,3,4,16,6,7,8,9,10,17,12,13,14,15]
9451 ; AVX512BW-FAST-NEXT: vmovdqu64 (%rsp), %zmm18 # 64-byte Reload
9452 ; AVX512BW-FAST-NEXT: vpermt2d %zmm2, %zmm4, %zmm18
9453 ; AVX512BW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
9454 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm8, %zmm6 {%k1}
9455 ; AVX512BW-FAST-NEXT: vmovdqa64 {{.*#+}} zmm11 = [0,18,2,3,4,5,6,19,8,9,10,11,12,20,14,15]
9456 ; AVX512BW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
9457 ; AVX512BW-FAST-NEXT: vpermt2d %zmm2, %zmm11, %zmm20
9458 ; AVX512BW-FAST-NEXT: vpermt2d %zmm0, %zmm14, %zmm6
9459 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm6, %zmm14
9460 ; AVX512BW-FAST-NEXT: vmovdqa64 {{.*#+}} zmm6 = [0,1,2,21,4,5,6,7,8,22,10,11,12,13,14,23]
9461 ; AVX512BW-FAST-NEXT: vpermt2d %zmm2, %zmm6, %zmm23
9462 ; AVX512BW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
9463 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm7, %zmm1 {%k1}
9464 ; AVX512BW-FAST-NEXT: vmovdqa64 {{.*#+}} zmm7 = [0,1,2,3,4,24,6,7,8,9,10,25,12,13,14,15]
9465 ; AVX512BW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Reload
9466 ; AVX512BW-FAST-NEXT: vpermt2d %zmm2, %zmm7, %zmm24
9467 ; AVX512BW-FAST-NEXT: vpermt2d %zmm0, %zmm31, %zmm1
9468 ; AVX512BW-FAST-NEXT: vmovdqa64 {{.*#+}} zmm0 = [0,26,2,3,4,5,6,27,8,9,10,11,12,28,14,15]
9469 ; AVX512BW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload
9470 ; AVX512BW-FAST-NEXT: vpermt2d %zmm2, %zmm0, %zmm25
9471 ; AVX512BW-FAST-NEXT: vmovdqa64 {{.*#+}} zmm8 = [0,1,2,29,4,5,6,7,8,30,10,11,12,13,14,31]
9472 ; AVX512BW-FAST-NEXT: vpermt2d %zmm2, %zmm8, %zmm5
9473 ; AVX512BW-FAST-NEXT: vmovdqa64 64(%r9), %zmm2
9474 ; AVX512BW-FAST-NEXT: vpermt2d %zmm2, %zmm4, %zmm27
9475 ; AVX512BW-FAST-NEXT: vpermt2d %zmm2, %zmm11, %zmm22
9476 ; AVX512BW-FAST-NEXT: vpermt2d %zmm2, %zmm6, %zmm30
9477 ; AVX512BW-FAST-NEXT: vpermt2d %zmm2, %zmm7, %zmm19
9478 ; AVX512BW-FAST-NEXT: vpermt2d %zmm2, %zmm0, %zmm17
9479 ; AVX512BW-FAST-NEXT: vpermt2d %zmm2, %zmm8, %zmm21
9480 ; AVX512BW-FAST-NEXT: vmovdqa64 128(%r9), %zmm2
9481 ; AVX512BW-FAST-NEXT: vpermt2d %zmm2, %zmm4, %zmm16
9482 ; AVX512BW-FAST-NEXT: vpermt2d %zmm2, %zmm11, %zmm15
9483 ; AVX512BW-FAST-NEXT: vpermt2d %zmm2, %zmm6, %zmm3
9484 ; AVX512BW-FAST-NEXT: vpermt2d %zmm2, %zmm7, %zmm26
9485 ; AVX512BW-FAST-NEXT: vpermt2d %zmm2, %zmm0, %zmm28
9486 ; AVX512BW-FAST-NEXT: vpermt2d %zmm2, %zmm8, %zmm13
9487 ; AVX512BW-FAST-NEXT: vmovdqa64 192(%r9), %zmm2
9488 ; AVX512BW-FAST-NEXT: vpermt2d %zmm2, %zmm4, %zmm9
9489 ; AVX512BW-FAST-NEXT: vpermt2d %zmm2, %zmm11, %zmm10
9490 ; AVX512BW-FAST-NEXT: vpermt2d %zmm2, %zmm6, %zmm29
9491 ; AVX512BW-FAST-NEXT: vpermt2d %zmm2, %zmm7, %zmm12
9492 ; AVX512BW-FAST-NEXT: vpermt2d %zmm2, %zmm0, %zmm14
9493 ; AVX512BW-FAST-NEXT: vpermt2d %zmm2, %zmm8, %zmm1
9494 ; AVX512BW-FAST-NEXT: movq {{[0-9]+}}(%rsp), %rax
9495 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm1, 1472(%rax)
9496 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm14, 1408(%rax)
9497 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm12, 1344(%rax)
9498 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm29, 1280(%rax)
9499 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm10, 1216(%rax)
9500 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm9, 1152(%rax)
9501 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm13, 1088(%rax)
9502 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm28, 1024(%rax)
9503 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm26, 960(%rax)
9504 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm3, 896(%rax)
9505 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm15, 832(%rax)
9506 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm16, 768(%rax)
9507 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm21, 704(%rax)
9508 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm17, 640(%rax)
9509 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm19, 576(%rax)
9510 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm30, 512(%rax)
9511 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm22, 448(%rax)
9512 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm27, 384(%rax)
9513 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm5, 320(%rax)
9514 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm25, 256(%rax)
9515 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm24, 192(%rax)
9516 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm23, 128(%rax)
9517 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm20, 64(%rax)
9518 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm18, (%rax)
9519 ; AVX512BW-FAST-NEXT: addq $1160, %rsp # imm = 0x488
9520 ; AVX512BW-FAST-NEXT: vzeroupper
9521 ; AVX512BW-FAST-NEXT: retq
9522 %in.vec0 = load <64 x i32>, ptr %in.vecptr0, align 64
9523 %in.vec1 = load <64 x i32>, ptr %in.vecptr1, align 64
9524 %in.vec2 = load <64 x i32>, ptr %in.vecptr2, align 64
9525 %in.vec3 = load <64 x i32>, ptr %in.vecptr3, align 64
9526 %in.vec4 = load <64 x i32>, ptr %in.vecptr4, align 64
9527 %in.vec5 = load <64 x i32>, ptr %in.vecptr5, align 64
9528 %1 = shufflevector <64 x i32> %in.vec0, <64 x i32> %in.vec1, <128 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95, i32 96, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 112, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127>
9529 %2 = shufflevector <64 x i32> %in.vec2, <64 x i32> %in.vec3, <128 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95, i32 96, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 112, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127>
9530 %3 = shufflevector <64 x i32> %in.vec4, <64 x i32> %in.vec5, <128 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95, i32 96, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 112, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127>
9531 %4 = shufflevector <128 x i32> %1, <128 x i32> %2, <256 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95, i32 96, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 112, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127, i32 128, i32 129, i32 130, i32 131, i32 132, i32 133, i32 134, i32 135, i32 136, i32 137, i32 138, i32 139, i32 140, i32 141, i32 142, i32 143, i32 144, i32 145, i32 146, i32 147, i32 148, i32 149, i32 150, i32 151, i32 152, i32 153, i32 154, i32 155, i32 156, i32 157, i32 158, i32 159, i32 160, i32 161, i32 162, i32 163, i32 164, i32 165, i32 166, i32 167, i32 168, i32 169, i32 170, i32 171, i32 172, i32 173, i32 174, i32 175, i32 176, i32 177, i32 178, i32 179, i32 180, i32 181, i32 182, i32 183, i32 184, i32 185, i32 186, i32 187, i32 188, i32 189, i32 190, i32 191, i32 192, i32 193, i32 194, i32 195, i32 196, i32 197, i32 198, i32 199, i32 200, i32 201, i32 202, i32 203, i32 204, i32 205, i32 206, i32 207, i32 208, i32 209, i32 210, i32 211, i32 212, i32 213, i32 214, i32 215, i32 216, i32 217, i32 218, i32 219, i32 220, i32 221, i32 222, i32 223, i32 224, i32 225, i32 226, i32 227, i32 228, i32 229, i32 230, i32 231, i32 232, i32 233, i32 234, i32 235, i32 236, i32 237, i32 238, i32 239, i32 240, i32 241, i32 242, i32 243, i32 244, i32 245, i32 246, i32 247, i32 248, i32 249, i32 250, i32 251, i32 252, i32 253, i32 254, i32 255>
9532 %5 = shufflevector <128 x i32> %3, <128 x i32> poison, <256 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95, i32 96, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 112, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
9533 %6 = shufflevector <256 x i32> %4, <256 x i32> %5, <384 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95, i32 96, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 112, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127, i32 128, i32 129, i32 130, i32 131, i32 132, i32 133, i32 134, i32 135, i32 136, i32 137, i32 138, i32 139, i32 140, i32 141, i32 142, i32 143, i32 144, i32 145, i32 146, i32 147, i32 148, i32 149, i32 150, i32 151, i32 152, i32 153, i32 154, i32 155, i32 156, i32 157, i32 158, i32 159, i32 160, i32 161, i32 162, i32 163, i32 164, i32 165, i32 166, i32 167, i32 168, i32 169, i32 170, i32 171, i32 172, i32 173, i32 174, i32 175, i32 176, i32 177, i32 178, i32 179, i32 180, i32 181, i32 182, i32 183, i32 184, i32 185, i32 186, i32 187, i32 188, i32 189, i32 190, i32 191, i32 192, i32 193, i32 194, i32 195, i32 196, i32 197, i32 198, i32 199, i32 200, i32 201, i32 202, i32 203, i32 204, i32 205, i32 206, i32 207, i32 208, i32 209, i32 210, i32 211, i32 212, i32 213, i32 214, i32 215, i32 216, i32 217, i32 218, i32 219, i32 220, i32 221, i32 222, i32 223, i32 224, i32 225, i32 226, i32 227, i32 228, i32 229, i32 230, i32 231, i32 232, i32 233, i32 234, i32 235, i32 236, i32 237, i32 238, i32 239, i32 240, i32 241, i32 242, i32 243, i32 244, i32 245, i32 246, i32 247, i32 248, i32 249, i32 250, i32 251, i32 252, i32 253, i32 254, i32 255, i32 256, i32 257, i32 258, i32 259, i32 260, i32 261, i32 262, i32 263, i32 264, i32 265, i32 266, i32 267, i32 268, i32 269, i32 270, i32 271, i32 272, i32 273, i32 274, i32 275, i32 276, i32 277, i32 278, i32 279, i32 280, i32 281, i32 282, i32 283, i32 284, i32 285, i32 286, i32 287, i32 288, i32 289, i32 290, i32 291, i32 292, i32 293, i32 294, i32 295, i32 296, i32 297, i32 298, i32 299, i32 300, i32 301, i32 302, i32 303, i32 304, i32 305, i32 306, i32 307, i32 308, i32 309, i32 310, i32 311, i32 312, i32 313, i32 314, i32 315, i32 316, i32 317, i32 318, i32 319, i32 320, i32 321, i32 322, i32 323, i32 324, i32 325, i32 326, i32 327, i32 328, i32 329, i32 330, i32 331, i32 332, i32 333, i32 334, i32 335, i32 336, i32 337, i32 338, i32 339, i32 340, i32 341, i32 342, i32 343, i32 344, i32 345, i32 346, i32 347, i32 348, i32 349, i32 350, i32 351, i32 352, i32 353, i32 354, i32 355, i32 356, i32 357, i32 358, i32 359, i32 360, i32 361, i32 362, i32 363, i32 364, i32 365, i32 366, i32 367, i32 368, i32 369, i32 370, i32 371, i32 372, i32 373, i32 374, i32 375, i32 376, i32 377, i32 378, i32 379, i32 380, i32 381, i32 382, i32 383>
9534 %interleaved.vec = shufflevector <384 x i32> %6, <384 x i32> poison, <384 x i32> <i32 0, i32 64, i32 128, i32 192, i32 256, i32 320, i32 1, i32 65, i32 129, i32 193, i32 257, i32 321, i32 2, i32 66, i32 130, i32 194, i32 258, i32 322, i32 3, i32 67, i32 131, i32 195, i32 259, i32 323, i32 4, i32 68, i32 132, i32 196, i32 260, i32 324, i32 5, i32 69, i32 133, i32 197, i32 261, i32 325, i32 6, i32 70, i32 134, i32 198, i32 262, i32 326, i32 7, i32 71, i32 135, i32 199, i32 263, i32 327, i32 8, i32 72, i32 136, i32 200, i32 264, i32 328, i32 9, i32 73, i32 137, i32 201, i32 265, i32 329, i32 10, i32 74, i32 138, i32 202, i32 266, i32 330, i32 11, i32 75, i32 139, i32 203, i32 267, i32 331, i32 12, i32 76, i32 140, i32 204, i32 268, i32 332, i32 13, i32 77, i32 141, i32 205, i32 269, i32 333, i32 14, i32 78, i32 142, i32 206, i32 270, i32 334, i32 15, i32 79, i32 143, i32 207, i32 271, i32 335, i32 16, i32 80, i32 144, i32 208, i32 272, i32 336, i32 17, i32 81, i32 145, i32 209, i32 273, i32 337, i32 18, i32 82, i32 146, i32 210, i32 274, i32 338, i32 19, i32 83, i32 147, i32 211, i32 275, i32 339, i32 20, i32 84, i32 148, i32 212, i32 276, i32 340, i32 21, i32 85, i32 149, i32 213, i32 277, i32 341, i32 22, i32 86, i32 150, i32 214, i32 278, i32 342, i32 23, i32 87, i32 151, i32 215, i32 279, i32 343, i32 24, i32 88, i32 152, i32 216, i32 280, i32 344, i32 25, i32 89, i32 153, i32 217, i32 281, i32 345, i32 26, i32 90, i32 154, i32 218, i32 282, i32 346, i32 27, i32 91, i32 155, i32 219, i32 283, i32 347, i32 28, i32 92, i32 156, i32 220, i32 284, i32 348, i32 29, i32 93, i32 157, i32 221, i32 285, i32 349, i32 30, i32 94, i32 158, i32 222, i32 286, i32 350, i32 31, i32 95, i32 159, i32 223, i32 287, i32 351, i32 32, i32 96, i32 160, i32 224, i32 288, i32 352, i32 33, i32 97, i32 161, i32 225, i32 289, i32 353, i32 34, i32 98, i32 162, i32 226, i32 290, i32 354, i32 35, i32 99, i32 163, i32 227, i32 291, i32 355, i32 36, i32 100, i32 164, i32 228, i32 292, i32 356, i32 37, i32 101, i32 165, i32 229, i32 293, i32 357, i32 38, i32 102, i32 166, i32 230, i32 294, i32 358, i32 39, i32 103, i32 167, i32 231, i32 295, i32 359, i32 40, i32 104, i32 168, i32 232, i32 296, i32 360, i32 41, i32 105, i32 169, i32 233, i32 297, i32 361, i32 42, i32 106, i32 170, i32 234, i32 298, i32 362, i32 43, i32 107, i32 171, i32 235, i32 299, i32 363, i32 44, i32 108, i32 172, i32 236, i32 300, i32 364, i32 45, i32 109, i32 173, i32 237, i32 301, i32 365, i32 46, i32 110, i32 174, i32 238, i32 302, i32 366, i32 47, i32 111, i32 175, i32 239, i32 303, i32 367, i32 48, i32 112, i32 176, i32 240, i32 304, i32 368, i32 49, i32 113, i32 177, i32 241, i32 305, i32 369, i32 50, i32 114, i32 178, i32 242, i32 306, i32 370, i32 51, i32 115, i32 179, i32 243, i32 307, i32 371, i32 52, i32 116, i32 180, i32 244, i32 308, i32 372, i32 53, i32 117, i32 181, i32 245, i32 309, i32 373, i32 54, i32 118, i32 182, i32 246, i32 310, i32 374, i32 55, i32 119, i32 183, i32 247, i32 311, i32 375, i32 56, i32 120, i32 184, i32 248, i32 312, i32 376, i32 57, i32 121, i32 185, i32 249, i32 313, i32 377, i32 58, i32 122, i32 186, i32 250, i32 314, i32 378, i32 59, i32 123, i32 187, i32 251, i32 315, i32 379, i32 60, i32 124, i32 188, i32 252, i32 316, i32 380, i32 61, i32 125, i32 189, i32 253, i32 317, i32 381, i32 62, i32 126, i32 190, i32 254, i32 318, i32 382, i32 63, i32 127, i32 191, i32 255, i32 319, i32 383>
9535 store <384 x i32> %interleaved.vec, ptr %out.vec, align 64
9538 ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
9542 ; AVX512-FAST: {{.*}}
9543 ; AVX512-SLOW: {{.*}}
9545 ; AVX512BW-ONLY-FAST: {{.*}}
9546 ; AVX512BW-ONLY-SLOW: {{.*}}
9547 ; AVX512DQ-FAST: {{.*}}
9548 ; AVX512DQ-SLOW: {{.*}}
9549 ; AVX512DQBW-FAST: {{.*}}
9550 ; AVX512DQBW-SLOW: {{.*}}
9552 ; AVX512F-ONLY-FAST: {{.*}}
9553 ; AVX512F-ONLY-SLOW: {{.*}}
9556 ; FALLBACK10: {{.*}}
9557 ; FALLBACK11: {{.*}}
9558 ; FALLBACK12: {{.*}}