1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+sse2 | FileCheck %s --check-prefixes=SSE,FALLBACK0
3 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1,AVX1-ONLY,FALLBACK1
4 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-SLOW,FALLBACK2
5 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST,FALLBACK3
6 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST-PERLANE,FALLBACK4
7 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
8 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
9 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
10 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
11 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
12 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
13 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
14 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
16 ; These patterns are produced by LoopVectorizer for interleaved stores.
18 define void @store_i32_stride3_vf2(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecptr2, ptr %out.vec) nounwind {
19 ; SSE-LABEL: store_i32_stride3_vf2:
21 ; SSE-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
22 ; SSE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
23 ; SSE-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
24 ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm1[0]
25 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
26 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0],xmm2[1,0]
27 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm2[3,3,3,3]
28 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm0[0,2]
29 ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
30 ; SSE-NEXT: movq %xmm3, 16(%rcx)
31 ; SSE-NEXT: movaps %xmm2, (%rcx)
34 ; AVX1-ONLY-LABEL: store_i32_stride3_vf2:
36 ; AVX1-ONLY-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
37 ; AVX1-ONLY-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
38 ; AVX1-ONLY-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
39 ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm1 = xmm2[0],xmm1[0]
40 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm2
41 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
42 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[3,1,0,3,7,5,4,7]
43 ; AVX1-ONLY-NEXT: vpermilps {{.*#+}} ymm1 = ymm2[0,2,u,1,u,5,u,u]
44 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3],ymm0[4],ymm1[5,6,7]
45 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm0, %xmm1
46 ; AVX1-ONLY-NEXT: vmovlps %xmm1, 16(%rcx)
47 ; AVX1-ONLY-NEXT: vmovaps %xmm0, (%rcx)
48 ; AVX1-ONLY-NEXT: vzeroupper
49 ; AVX1-ONLY-NEXT: retq
51 ; AVX2-LABEL: store_i32_stride3_vf2:
53 ; AVX2-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
54 ; AVX2-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
55 ; AVX2-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
56 ; AVX2-NEXT: vmovlhps {{.*#+}} xmm1 = xmm2[0],xmm1[0]
57 ; AVX2-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
58 ; AVX2-NEXT: vmovaps {{.*#+}} ymm1 = <0,2,4,1,3,5,u,u>
59 ; AVX2-NEXT: vpermps %ymm0, %ymm1, %ymm0
60 ; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm1
61 ; AVX2-NEXT: vmovlps %xmm1, 16(%rcx)
62 ; AVX2-NEXT: vmovaps %xmm0, (%rcx)
63 ; AVX2-NEXT: vzeroupper
65 %in.vec0 = load <2 x i32>, ptr %in.vecptr0, align 64
66 %in.vec1 = load <2 x i32>, ptr %in.vecptr1, align 64
67 %in.vec2 = load <2 x i32>, ptr %in.vecptr2, align 64
68 %1 = shufflevector <2 x i32> %in.vec0, <2 x i32> %in.vec1, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
69 %2 = shufflevector <2 x i32> %in.vec2, <2 x i32> poison, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
70 %3 = shufflevector <4 x i32> %1, <4 x i32> %2, <6 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5>
71 %interleaved.vec = shufflevector <6 x i32> %3, <6 x i32> poison, <6 x i32> <i32 0, i32 2, i32 4, i32 1, i32 3, i32 5>
72 store <6 x i32> %interleaved.vec, ptr %out.vec, align 64
76 define void @store_i32_stride3_vf4(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecptr2, ptr %out.vec) nounwind {
77 ; SSE-LABEL: store_i32_stride3_vf4:
79 ; SSE-NEXT: movaps (%rdi), %xmm0
80 ; SSE-NEXT: movaps (%rsi), %xmm1
81 ; SSE-NEXT: movaps (%rdx), %xmm2
82 ; SSE-NEXT: movaps %xmm0, %xmm3
83 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,1],xmm2[0,3]
84 ; SSE-NEXT: movaps %xmm0, %xmm4
85 ; SSE-NEXT: unpcklps {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
86 ; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,1],xmm3[2,0]
87 ; SSE-NEXT: movaps %xmm0, %xmm3
88 ; SSE-NEXT: unpckhpd {{.*#+}} xmm3 = xmm3[1],xmm1[1]
89 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3],xmm1[3,3]
90 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm2[1,1]
91 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm3[0,2]
92 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,2],xmm2[2,3]
93 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0,1,3]
94 ; SSE-NEXT: movaps %xmm1, 16(%rcx)
95 ; SSE-NEXT: movaps %xmm4, (%rcx)
96 ; SSE-NEXT: movaps %xmm0, 32(%rcx)
99 ; AVX1-ONLY-LABEL: store_i32_stride3_vf4:
100 ; AVX1-ONLY: # %bb.0:
101 ; AVX1-ONLY-NEXT: vmovaps (%rdi), %xmm0
102 ; AVX1-ONLY-NEXT: vmovaps (%rsi), %xmm1
103 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm2
104 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm3
105 ; AVX1-ONLY-NEXT: vmovsldup {{.*#+}} ymm3 = ymm3[0,0,2,2,4,4,6,6]
106 ; AVX1-ONLY-NEXT: vpermilps {{.*#+}} ymm2 = ymm2[0,u,u,1,5,u,u,6]
107 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0],ymm3[1],ymm2[2,3,4,5],ymm3[6],ymm2[7]
108 ; AVX1-ONLY-NEXT: vbroadcastsd (%rdx), %ymm3
109 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm3[2],ymm2[3,4],ymm3[5],ymm2[6,7]
110 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm0[3,3],xmm1[3,3]
111 ; AVX1-ONLY-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0]
112 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2],xmm1[3]
113 ; AVX1-ONLY-NEXT: vmovaps %xmm0, 32(%rcx)
114 ; AVX1-ONLY-NEXT: vmovaps %ymm2, (%rcx)
115 ; AVX1-ONLY-NEXT: vzeroupper
116 ; AVX1-ONLY-NEXT: retq
118 ; AVX2-SLOW-LABEL: store_i32_stride3_vf4:
119 ; AVX2-SLOW: # %bb.0:
120 ; AVX2-SLOW-NEXT: vmovaps (%rdi), %xmm0
121 ; AVX2-SLOW-NEXT: vmovaps (%rsi), %xmm1
122 ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm2
123 ; AVX2-SLOW-NEXT: vmovaps {{.*#+}} ymm3 = <0,4,u,1,5,u,2,6>
124 ; AVX2-SLOW-NEXT: vpermps %ymm2, %ymm3, %ymm2
125 ; AVX2-SLOW-NEXT: vbroadcastsd (%rdx), %ymm3
126 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm3[2],ymm2[3,4],ymm3[5],ymm2[6,7]
127 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[3,3],xmm1[3,3]
128 ; AVX2-SLOW-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0]
129 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2],xmm1[3]
130 ; AVX2-SLOW-NEXT: vmovaps %xmm0, 32(%rcx)
131 ; AVX2-SLOW-NEXT: vmovaps %ymm2, (%rcx)
132 ; AVX2-SLOW-NEXT: vzeroupper
133 ; AVX2-SLOW-NEXT: retq
135 ; AVX2-FAST-LABEL: store_i32_stride3_vf4:
136 ; AVX2-FAST: # %bb.0:
137 ; AVX2-FAST-NEXT: vmovaps (%rdi), %xmm0
138 ; AVX2-FAST-NEXT: vinsertf128 $1, (%rsi), %ymm0, %ymm0
139 ; AVX2-FAST-NEXT: vbroadcastsd {{.*#+}} ymm1 = [7,3,7,3,7,3,7,3]
140 ; AVX2-FAST-NEXT: vpermps %ymm0, %ymm1, %ymm1
141 ; AVX2-FAST-NEXT: vmovddup {{.*#+}} xmm2 = mem[0,0]
142 ; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm1 = xmm2[0],xmm1[1,2],xmm2[3]
143 ; AVX2-FAST-NEXT: vmovaps {{.*#+}} ymm2 = <0,4,u,1,5,u,2,6>
144 ; AVX2-FAST-NEXT: vpermps %ymm0, %ymm2, %ymm0
145 ; AVX2-FAST-NEXT: vbroadcastsd (%rdx), %ymm2
146 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm2[2],ymm0[3,4],ymm2[5],ymm0[6,7]
147 ; AVX2-FAST-NEXT: vmovaps %ymm0, (%rcx)
148 ; AVX2-FAST-NEXT: vmovaps %xmm1, 32(%rcx)
149 ; AVX2-FAST-NEXT: vzeroupper
150 ; AVX2-FAST-NEXT: retq
152 ; AVX2-FAST-PERLANE-LABEL: store_i32_stride3_vf4:
153 ; AVX2-FAST-PERLANE: # %bb.0:
154 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rdi), %xmm0
155 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rsi), %xmm1
156 ; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm2
157 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{.*#+}} ymm3 = <0,4,u,1,5,u,2,6>
158 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm2, %ymm3, %ymm2
159 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd (%rdx), %ymm3
160 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm3[2],ymm2[3,4],ymm3[5],ymm2[6,7]
161 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm0 = xmm0[3,3],xmm1[3,3]
162 ; AVX2-FAST-PERLANE-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0]
163 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2],xmm1[3]
164 ; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm0, 32(%rcx)
165 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm2, (%rcx)
166 ; AVX2-FAST-PERLANE-NEXT: vzeroupper
167 ; AVX2-FAST-PERLANE-NEXT: retq
169 ; AVX512-LABEL: store_i32_stride3_vf4:
171 ; AVX512-NEXT: vmovaps (%rdi), %xmm0
172 ; AVX512-NEXT: vinsertf128 $1, (%rsi), %ymm0, %ymm0
173 ; AVX512-NEXT: vinsertf32x4 $2, (%rdx), %zmm0, %zmm0
174 ; AVX512-NEXT: vmovaps {{.*#+}} zmm1 = <0,4,8,1,5,9,2,6,10,3,7,11,u,u,u,u>
175 ; AVX512-NEXT: vpermps %zmm0, %zmm1, %zmm0
176 ; AVX512-NEXT: vextractf32x4 $2, %zmm0, 32(%rcx)
177 ; AVX512-NEXT: vmovaps %ymm0, (%rcx)
178 ; AVX512-NEXT: vzeroupper
180 %in.vec0 = load <4 x i32>, ptr %in.vecptr0, align 64
181 %in.vec1 = load <4 x i32>, ptr %in.vecptr1, align 64
182 %in.vec2 = load <4 x i32>, ptr %in.vecptr2, align 64
183 %1 = shufflevector <4 x i32> %in.vec0, <4 x i32> %in.vec1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
184 %2 = shufflevector <4 x i32> %in.vec2, <4 x i32> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
185 %3 = shufflevector <8 x i32> %1, <8 x i32> %2, <12 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11>
186 %interleaved.vec = shufflevector <12 x i32> %3, <12 x i32> poison, <12 x i32> <i32 0, i32 4, i32 8, i32 1, i32 5, i32 9, i32 2, i32 6, i32 10, i32 3, i32 7, i32 11>
187 store <12 x i32> %interleaved.vec, ptr %out.vec, align 64
191 define void @store_i32_stride3_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecptr2, ptr %out.vec) nounwind {
192 ; SSE-LABEL: store_i32_stride3_vf8:
194 ; SSE-NEXT: movaps (%rdi), %xmm1
195 ; SSE-NEXT: movaps 16(%rdi), %xmm0
196 ; SSE-NEXT: movaps (%rsi), %xmm5
197 ; SSE-NEXT: movaps 16(%rsi), %xmm6
198 ; SSE-NEXT: movaps (%rdx), %xmm2
199 ; SSE-NEXT: movaps 16(%rdx), %xmm3
200 ; SSE-NEXT: movaps %xmm0, %xmm7
201 ; SSE-NEXT: unpckhpd {{.*#+}} xmm7 = xmm7[1],xmm6[1]
202 ; SSE-NEXT: movaps %xmm0, %xmm8
203 ; SSE-NEXT: movaps %xmm0, %xmm4
204 ; SSE-NEXT: unpcklps {{.*#+}} xmm4 = xmm4[0],xmm6[0],xmm4[1],xmm6[1]
205 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3],xmm6[3,3]
206 ; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[1,1],xmm3[1,1]
207 ; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,2],xmm7[0,2]
208 ; SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[1,1],xmm3[0,3]
209 ; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,1],xmm8[2,0]
210 ; SSE-NEXT: movaps %xmm1, %xmm7
211 ; SSE-NEXT: unpckhpd {{.*#+}} xmm7 = xmm7[1],xmm5[1]
212 ; SSE-NEXT: movaps %xmm1, %xmm8
213 ; SSE-NEXT: movaps %xmm1, %xmm9
214 ; SSE-NEXT: unpcklps {{.*#+}} xmm9 = xmm9[0],xmm5[0],xmm9[1],xmm5[1]
215 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,3],xmm5[3,3]
216 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[1,1],xmm2[1,1]
217 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,2],xmm7[0,2]
218 ; SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[1,1],xmm2[0,3]
219 ; SSE-NEXT: shufps {{.*#+}} xmm9 = xmm9[0,1],xmm8[2,0]
220 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,2],xmm3[2,3]
221 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0,1,3]
222 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,2],xmm2[2,3]
223 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0,1,3]
224 ; SSE-NEXT: movaps %xmm9, (%rcx)
225 ; SSE-NEXT: movaps %xmm5, 16(%rcx)
226 ; SSE-NEXT: movaps %xmm4, 48(%rcx)
227 ; SSE-NEXT: movaps %xmm6, 64(%rcx)
228 ; SSE-NEXT: movaps %xmm1, 32(%rcx)
229 ; SSE-NEXT: movaps %xmm0, 80(%rcx)
232 ; AVX1-ONLY-LABEL: store_i32_stride3_vf8:
233 ; AVX1-ONLY: # %bb.0:
234 ; AVX1-ONLY-NEXT: vmovapd (%rdx), %ymm0
235 ; AVX1-ONLY-NEXT: vmovaps (%rsi), %xmm1
236 ; AVX1-ONLY-NEXT: vmovaps 16(%rsi), %xmm2
237 ; AVX1-ONLY-NEXT: vmovaps (%rdi), %xmm3
238 ; AVX1-ONLY-NEXT: vmovaps 16(%rdi), %xmm4
239 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm5 = xmm3[1],xmm1[1]
240 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm5 = xmm1[1,1],xmm5[0,2]
241 ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm1 = xmm1[0],xmm3[0]
242 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm1[2,0],xmm3[2,1]
243 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm5, %ymm1, %ymm1
244 ; AVX1-ONLY-NEXT: vbroadcastsd (%rdx), %ymm3
245 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm3[2],ymm1[3,4],ymm3[5],ymm1[6,7]
246 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm3 = xmm4[3,3],xmm2[3,3]
247 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm4 = xmm4[1],xmm2[1]
248 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm2 = xmm2[1,1],xmm4[0,2]
249 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
250 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm3 = ymm0[2,3,2,3]
251 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm3 = ymm3[0,0,3,3]
252 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0],ymm3[1],ymm2[2,3],ymm3[4],ymm2[5,6],ymm3[7]
253 ; AVX1-ONLY-NEXT: vpermilps {{.*#+}} ymm3 = mem[0,0,3,3,4,4,7,7]
254 ; AVX1-ONLY-NEXT: vpermilpd {{.*#+}} ymm4 = mem[1,0,2,2]
255 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0,1],ymm3[2],ymm4[3,4],ymm3[5],ymm4[6,7]
256 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[1,1,2,2]
257 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm3[1,2],ymm0[3],ymm3[4,5],ymm0[6],ymm3[7]
258 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 32(%rcx)
259 ; AVX1-ONLY-NEXT: vmovaps %ymm2, 64(%rcx)
260 ; AVX1-ONLY-NEXT: vmovaps %ymm1, (%rcx)
261 ; AVX1-ONLY-NEXT: vzeroupper
262 ; AVX1-ONLY-NEXT: retq
264 ; AVX2-SLOW-LABEL: store_i32_stride3_vf8:
265 ; AVX2-SLOW: # %bb.0:
266 ; AVX2-SLOW-NEXT: vmovaps (%rdi), %ymm0
267 ; AVX2-SLOW-NEXT: vmovaps (%rsi), %ymm1
268 ; AVX2-SLOW-NEXT: vmovaps (%rdx), %ymm2
269 ; AVX2-SLOW-NEXT: vpermilps {{.*#+}} xmm3 = mem[1,0,2,2]
270 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[0,1,0,1]
271 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm4 = ymm0[0,0,2,1]
272 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0],ymm3[1],ymm4[2,3],ymm3[4],ymm4[5,6],ymm3[7]
273 ; AVX2-SLOW-NEXT: vbroadcastsd (%rdx), %ymm4
274 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm4[2],ymm3[3,4],ymm4[5],ymm3[6,7]
275 ; AVX2-SLOW-NEXT: vbroadcastsd 24(%rdi), %ymm4
276 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm5 = ymm1[1,2,3,3,5,6,7,7]
277 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[2,2,2,3]
278 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0,1],ymm4[2],ymm5[3,4],ymm4[5],ymm5[6,7]
279 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm5 = ymm2[2,1,3,3]
280 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0],ymm5[1],ymm4[2,3],ymm5[4],ymm4[5,6],ymm5[7]
281 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,0,3,3,4,4,7,7]
282 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[1,1,2,2]
283 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
284 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm2[1,1,2,2]
285 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2],ymm1[3],ymm0[4,5],ymm1[6],ymm0[7]
286 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 32(%rcx)
287 ; AVX2-SLOW-NEXT: vmovaps %ymm4, 64(%rcx)
288 ; AVX2-SLOW-NEXT: vmovaps %ymm3, (%rcx)
289 ; AVX2-SLOW-NEXT: vzeroupper
290 ; AVX2-SLOW-NEXT: retq
292 ; AVX2-FAST-LABEL: store_i32_stride3_vf8:
293 ; AVX2-FAST: # %bb.0:
294 ; AVX2-FAST-NEXT: vmovaps (%rdi), %ymm0
295 ; AVX2-FAST-NEXT: vmovaps (%rsi), %ymm1
296 ; AVX2-FAST-NEXT: vmovaps (%rdx), %ymm2
297 ; AVX2-FAST-NEXT: vbroadcastf128 {{.*#+}} ymm3 = [1,0,2,2,1,0,2,2]
298 ; AVX2-FAST-NEXT: # ymm3 = mem[0,1,0,1]
299 ; AVX2-FAST-NEXT: vpermps %ymm1, %ymm3, %ymm3
300 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm4 = ymm0[0,0,2,1]
301 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0],ymm3[1],ymm4[2,3],ymm3[4],ymm4[5,6],ymm3[7]
302 ; AVX2-FAST-NEXT: vbroadcastsd (%rdx), %ymm4
303 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm4[2],ymm3[3,4],ymm4[5],ymm3[6,7]
304 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm4 = ymm1[0,0,3,3,4,4,7,7]
305 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[1,1,2,2]
306 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm4[2],ymm0[3,4],ymm4[5],ymm0[6,7]
307 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm4 = ymm2[1,1,2,2]
308 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm4[0],ymm0[1,2],ymm4[3],ymm0[4,5],ymm4[6],ymm0[7]
309 ; AVX2-FAST-NEXT: vbroadcastf128 {{.*#+}} ymm4 = [5,0,7,6,5,0,7,6]
310 ; AVX2-FAST-NEXT: # ymm4 = mem[0,1,0,1]
311 ; AVX2-FAST-NEXT: vpermps %ymm1, %ymm4, %ymm1
312 ; AVX2-FAST-NEXT: vbroadcastsd 24(%rdi), %ymm4
313 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm4[2],ymm1[3,4],ymm4[5],ymm1[6,7]
314 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[2,1,3,3]
315 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3],ymm2[4],ymm1[5,6],ymm2[7]
316 ; AVX2-FAST-NEXT: vmovaps %ymm1, 64(%rcx)
317 ; AVX2-FAST-NEXT: vmovaps %ymm0, 32(%rcx)
318 ; AVX2-FAST-NEXT: vmovaps %ymm3, (%rcx)
319 ; AVX2-FAST-NEXT: vzeroupper
320 ; AVX2-FAST-NEXT: retq
322 ; AVX2-FAST-PERLANE-LABEL: store_i32_stride3_vf8:
323 ; AVX2-FAST-PERLANE: # %bb.0:
324 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rdi), %ymm0
325 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rsi), %ymm1
326 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rdx), %ymm2
327 ; AVX2-FAST-PERLANE-NEXT: vpermilps {{.*#+}} xmm3 = mem[1,0,2,2]
328 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[0,1,0,1]
329 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm4 = ymm0[0,0,2,1]
330 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0],ymm3[1],ymm4[2,3],ymm3[4],ymm4[5,6],ymm3[7]
331 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd (%rdx), %ymm4
332 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm4[2],ymm3[3,4],ymm4[5],ymm3[6,7]
333 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 24(%rdi), %ymm4
334 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm5 = ymm1[1,2,3,3,5,6,7,7]
335 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[2,2,2,3]
336 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0,1],ymm4[2],ymm5[3,4],ymm4[5],ymm5[6,7]
337 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm5 = ymm2[2,1,3,3]
338 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0],ymm5[1],ymm4[2,3],ymm5[4],ymm4[5,6],ymm5[7]
339 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,0,3,3,4,4,7,7]
340 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[1,1,2,2]
341 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
342 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm2[1,1,2,2]
343 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2],ymm1[3],ymm0[4,5],ymm1[6],ymm0[7]
344 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 32(%rcx)
345 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm4, 64(%rcx)
346 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm3, (%rcx)
347 ; AVX2-FAST-PERLANE-NEXT: vzeroupper
348 ; AVX2-FAST-PERLANE-NEXT: retq
350 ; AVX512-LABEL: store_i32_stride3_vf8:
352 ; AVX512-NEXT: vmovdqa (%rdi), %ymm0
353 ; AVX512-NEXT: vmovdqa (%rdx), %ymm1
354 ; AVX512-NEXT: vinserti64x4 $1, (%rsi), %zmm0, %zmm0
355 ; AVX512-NEXT: vmovdqa {{.*#+}} ymm2 = [13,21,6,14,22,7,15,23]
356 ; AVX512-NEXT: vpermi2d %zmm1, %zmm0, %zmm2
357 ; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,8,16,1,9,17,2,10,18,3,11,19,4,12,20,5]
358 ; AVX512-NEXT: vpermi2d %zmm1, %zmm0, %zmm3
359 ; AVX512-NEXT: vmovdqa64 %zmm3, (%rcx)
360 ; AVX512-NEXT: vmovdqa %ymm2, 64(%rcx)
361 ; AVX512-NEXT: vzeroupper
363 %in.vec0 = load <8 x i32>, ptr %in.vecptr0, align 64
364 %in.vec1 = load <8 x i32>, ptr %in.vecptr1, align 64
365 %in.vec2 = load <8 x i32>, ptr %in.vecptr2, align 64
366 %1 = shufflevector <8 x i32> %in.vec0, <8 x i32> %in.vec1, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
367 %2 = shufflevector <8 x i32> %in.vec2, <8 x i32> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
368 %3 = shufflevector <16 x i32> %1, <16 x i32> %2, <24 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23>
369 %interleaved.vec = shufflevector <24 x i32> %3, <24 x i32> poison, <24 x i32> <i32 0, i32 8, i32 16, i32 1, i32 9, i32 17, i32 2, i32 10, i32 18, i32 3, i32 11, i32 19, i32 4, i32 12, i32 20, i32 5, i32 13, i32 21, i32 6, i32 14, i32 22, i32 7, i32 15, i32 23>
370 store <24 x i32> %interleaved.vec, ptr %out.vec, align 64
374 define void @store_i32_stride3_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecptr2, ptr %out.vec) nounwind {
375 ; SSE-LABEL: store_i32_stride3_vf16:
377 ; SSE-NEXT: movaps (%rdi), %xmm1
378 ; SSE-NEXT: movaps 16(%rdi), %xmm2
379 ; SSE-NEXT: movaps 32(%rdi), %xmm4
380 ; SSE-NEXT: movaps 48(%rdi), %xmm5
381 ; SSE-NEXT: movaps (%rsi), %xmm7
382 ; SSE-NEXT: movaps 16(%rsi), %xmm9
383 ; SSE-NEXT: movaps 32(%rsi), %xmm10
384 ; SSE-NEXT: movaps 48(%rsi), %xmm11
385 ; SSE-NEXT: movaps 16(%rdx), %xmm0
386 ; SSE-NEXT: movaps 32(%rdx), %xmm3
387 ; SSE-NEXT: movaps 48(%rdx), %xmm8
388 ; SSE-NEXT: movaps %xmm5, %xmm12
389 ; SSE-NEXT: unpckhpd {{.*#+}} xmm12 = xmm12[1],xmm11[1]
390 ; SSE-NEXT: movaps %xmm5, %xmm13
391 ; SSE-NEXT: movaps %xmm5, %xmm6
392 ; SSE-NEXT: unpcklps {{.*#+}} xmm6 = xmm6[0],xmm11[0],xmm6[1],xmm11[1]
393 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[3,3],xmm11[3,3]
394 ; SSE-NEXT: movaps %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
395 ; SSE-NEXT: shufps {{.*#+}} xmm11 = xmm11[1,1],xmm8[1,1]
396 ; SSE-NEXT: shufps {{.*#+}} xmm11 = xmm11[0,2],xmm12[0,2]
397 ; SSE-NEXT: shufps {{.*#+}} xmm13 = xmm13[1,1],xmm8[0,3]
398 ; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,1],xmm13[2,0]
399 ; SSE-NEXT: movaps %xmm4, %xmm13
400 ; SSE-NEXT: unpckhpd {{.*#+}} xmm13 = xmm13[1],xmm10[1]
401 ; SSE-NEXT: movaps %xmm4, %xmm14
402 ; SSE-NEXT: movaps %xmm4, %xmm12
403 ; SSE-NEXT: unpcklps {{.*#+}} xmm12 = xmm12[0],xmm10[0],xmm12[1],xmm10[1]
404 ; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[3,3],xmm10[3,3]
405 ; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
406 ; SSE-NEXT: shufps {{.*#+}} xmm10 = xmm10[1,1],xmm3[1,1]
407 ; SSE-NEXT: shufps {{.*#+}} xmm10 = xmm10[0,2],xmm13[0,2]
408 ; SSE-NEXT: shufps {{.*#+}} xmm14 = xmm14[1,1],xmm3[0,3]
409 ; SSE-NEXT: shufps {{.*#+}} xmm12 = xmm12[0,1],xmm14[2,0]
410 ; SSE-NEXT: movaps %xmm2, %xmm14
411 ; SSE-NEXT: unpckhpd {{.*#+}} xmm14 = xmm14[1],xmm9[1]
412 ; SSE-NEXT: movaps %xmm2, %xmm15
413 ; SSE-NEXT: movaps %xmm2, %xmm13
414 ; SSE-NEXT: unpcklps {{.*#+}} xmm13 = xmm13[0],xmm9[0],xmm13[1],xmm9[1]
415 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,3],xmm9[3,3]
416 ; SSE-NEXT: movaps %xmm0, %xmm8
417 ; SSE-NEXT: shufps {{.*#+}} xmm9 = xmm9[1,1],xmm0[1,1]
418 ; SSE-NEXT: shufps {{.*#+}} xmm9 = xmm9[0,2],xmm14[0,2]
419 ; SSE-NEXT: shufps {{.*#+}} xmm15 = xmm15[1,1],xmm0[0,3]
420 ; SSE-NEXT: shufps {{.*#+}} xmm13 = xmm13[0,1],xmm15[2,0]
421 ; SSE-NEXT: movaps %xmm1, %xmm14
422 ; SSE-NEXT: unpckhpd {{.*#+}} xmm14 = xmm14[1],xmm7[1]
423 ; SSE-NEXT: movaps %xmm1, %xmm15
424 ; SSE-NEXT: unpcklps {{.*#+}} xmm15 = xmm15[0],xmm7[0],xmm15[1],xmm7[1]
425 ; SSE-NEXT: movaps %xmm1, %xmm3
426 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,3],xmm7[3,3]
427 ; SSE-NEXT: movaps (%rdx), %xmm0
428 ; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[1,1],xmm0[1,1]
429 ; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,2],xmm14[0,2]
430 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,1],xmm0[0,3]
431 ; SSE-NEXT: shufps {{.*#+}} xmm15 = xmm15[0,1],xmm3[2,0]
432 ; SSE-NEXT: shufps $233, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Folded Reload
433 ; SSE-NEXT: # xmm5 = xmm5[1,2],mem[2,3]
434 ; SSE-NEXT: shufps $233, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
435 ; SSE-NEXT: # xmm4 = xmm4[1,2],mem[2,3]
436 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,2],xmm8[2,3]
437 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,2],xmm0[2,3]
438 ; SSE-NEXT: movaps %xmm15, (%rcx)
439 ; SSE-NEXT: movaps %xmm7, 16(%rcx)
440 ; SSE-NEXT: movaps %xmm13, 48(%rcx)
441 ; SSE-NEXT: movaps %xmm9, 64(%rcx)
442 ; SSE-NEXT: movaps %xmm12, 96(%rcx)
443 ; SSE-NEXT: movaps %xmm10, 112(%rcx)
444 ; SSE-NEXT: movaps %xmm6, 144(%rcx)
445 ; SSE-NEXT: movaps %xmm11, 160(%rcx)
446 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0,1,3]
447 ; SSE-NEXT: movaps %xmm1, 32(%rcx)
448 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[2,0,1,3]
449 ; SSE-NEXT: movaps %xmm2, 80(%rcx)
450 ; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[2,0,1,3]
451 ; SSE-NEXT: movaps %xmm4, 128(%rcx)
452 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[2,0,1,3]
453 ; SSE-NEXT: movaps %xmm5, 176(%rcx)
456 ; AVX1-ONLY-LABEL: store_i32_stride3_vf16:
457 ; AVX1-ONLY: # %bb.0:
458 ; AVX1-ONLY-NEXT: vmovapd (%rdx), %ymm1
459 ; AVX1-ONLY-NEXT: vmovapd 32(%rdx), %ymm0
460 ; AVX1-ONLY-NEXT: vmovaps (%rsi), %xmm2
461 ; AVX1-ONLY-NEXT: vmovaps 16(%rsi), %xmm3
462 ; AVX1-ONLY-NEXT: vmovaps 32(%rsi), %xmm4
463 ; AVX1-ONLY-NEXT: vmovaps 48(%rsi), %xmm5
464 ; AVX1-ONLY-NEXT: vmovaps (%rdi), %xmm6
465 ; AVX1-ONLY-NEXT: vmovaps 16(%rdi), %xmm7
466 ; AVX1-ONLY-NEXT: vmovaps 32(%rdi), %xmm8
467 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm9 = xmm6[1],xmm2[1]
468 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm9 = xmm2[1,1],xmm9[0,2]
469 ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm2[0],xmm6[0]
470 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm2 = xmm2[2,0],xmm6[2,1]
471 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm9, %ymm2, %ymm2
472 ; AVX1-ONLY-NEXT: vbroadcastsd (%rdx), %ymm6
473 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm6[2],ymm2[3,4],ymm6[5],ymm2[6,7]
474 ; AVX1-ONLY-NEXT: vmovaps 48(%rdi), %xmm6
475 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm9 = xmm6[3,3],xmm5[3,3]
476 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm6 = xmm6[1],xmm5[1]
477 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm5 = xmm5[1,1],xmm6[0,2]
478 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm9, %ymm5, %ymm5
479 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm6 = ymm0[2,3,2,3]
480 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm6 = ymm6[0,0,3,3]
481 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0],ymm6[1],ymm5[2,3],ymm6[4],ymm5[5,6],ymm6[7]
482 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm6 = xmm8[1],xmm4[1]
483 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm6 = xmm4[1,1],xmm6[0,2]
484 ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm4 = xmm4[0],xmm8[0]
485 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm4 = xmm4[2,0],xmm8[2,1]
486 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm6, %ymm4, %ymm4
487 ; AVX1-ONLY-NEXT: vbroadcastsd 32(%rdx), %ymm6
488 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm6[2],ymm4[3,4],ymm6[5],ymm4[6,7]
489 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm6 = xmm7[3,3],xmm3[3,3]
490 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm7 = xmm7[1],xmm3[1]
491 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm3 = xmm3[1,1],xmm7[0,2]
492 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm6, %ymm3, %ymm3
493 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm6 = ymm1[2,3,2,3]
494 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm6 = ymm6[0,0,3,3]
495 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0],ymm6[1],ymm3[2,3],ymm6[4],ymm3[5,6],ymm6[7]
496 ; AVX1-ONLY-NEXT: vpermilps {{.*#+}} ymm6 = mem[0,0,3,3,4,4,7,7]
497 ; AVX1-ONLY-NEXT: vpermilpd {{.*#+}} ymm7 = mem[1,0,2,2]
498 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm6 = ymm7[0,1],ymm6[2],ymm7[3,4],ymm6[5],ymm7[6,7]
499 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm1 = ymm1[1,1,2,2]
500 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm6[1,2],ymm1[3],ymm6[4,5],ymm1[6],ymm6[7]
501 ; AVX1-ONLY-NEXT: vpermilps {{.*#+}} ymm6 = mem[0,0,3,3,4,4,7,7]
502 ; AVX1-ONLY-NEXT: vpermilpd {{.*#+}} ymm7 = mem[1,0,2,2]
503 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm6 = ymm7[0,1],ymm6[2],ymm7[3,4],ymm6[5],ymm7[6,7]
504 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[1,1,2,2]
505 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm6[1,2],ymm0[3],ymm6[4,5],ymm0[6],ymm6[7]
506 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 128(%rcx)
507 ; AVX1-ONLY-NEXT: vmovaps %ymm1, 32(%rcx)
508 ; AVX1-ONLY-NEXT: vmovaps %ymm3, 64(%rcx)
509 ; AVX1-ONLY-NEXT: vmovaps %ymm4, 96(%rcx)
510 ; AVX1-ONLY-NEXT: vmovaps %ymm5, 160(%rcx)
511 ; AVX1-ONLY-NEXT: vmovaps %ymm2, (%rcx)
512 ; AVX1-ONLY-NEXT: vzeroupper
513 ; AVX1-ONLY-NEXT: retq
515 ; AVX2-SLOW-LABEL: store_i32_stride3_vf16:
516 ; AVX2-SLOW: # %bb.0:
517 ; AVX2-SLOW-NEXT: vmovaps (%rdi), %ymm4
518 ; AVX2-SLOW-NEXT: vmovaps 32(%rdi), %ymm0
519 ; AVX2-SLOW-NEXT: vmovaps (%rsi), %ymm5
520 ; AVX2-SLOW-NEXT: vmovaps 32(%rsi), %ymm3
521 ; AVX2-SLOW-NEXT: vmovaps (%rdx), %ymm6
522 ; AVX2-SLOW-NEXT: vmovaps 32(%rdx), %ymm1
523 ; AVX2-SLOW-NEXT: vpermilps {{.*#+}} xmm2 = mem[1,0,2,2]
524 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,1,0,1]
525 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm7 = ymm4[0,0,2,1]
526 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm7[0],ymm2[1],ymm7[2,3],ymm2[4],ymm7[5,6],ymm2[7]
527 ; AVX2-SLOW-NEXT: vbroadcastsd (%rdx), %ymm7
528 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm7[2],ymm2[3,4],ymm7[5],ymm2[6,7]
529 ; AVX2-SLOW-NEXT: vbroadcastsd 56(%rdi), %ymm7
530 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm8 = ymm3[1,2,3,3,5,6,7,7]
531 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm8 = ymm8[2,2,2,3]
532 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm7 = ymm8[0,1],ymm7[2],ymm8[3,4],ymm7[5],ymm8[6,7]
533 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm8 = ymm1[2,1,3,3]
534 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0],ymm8[1],ymm7[2,3],ymm8[4],ymm7[5,6],ymm8[7]
535 ; AVX2-SLOW-NEXT: vpermilps {{.*#+}} xmm8 = mem[1,0,2,2]
536 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm8 = ymm8[0,1,0,1]
537 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm9 = ymm0[0,0,2,1]
538 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm8 = ymm9[0],ymm8[1],ymm9[2,3],ymm8[4],ymm9[5,6],ymm8[7]
539 ; AVX2-SLOW-NEXT: vbroadcastsd 32(%rdx), %ymm9
540 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1],ymm9[2],ymm8[3,4],ymm9[5],ymm8[6,7]
541 ; AVX2-SLOW-NEXT: vbroadcastsd 24(%rdi), %ymm9
542 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm10 = ymm5[1,2,3,3,5,6,7,7]
543 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm10 = ymm10[2,2,2,3]
544 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm9 = ymm10[0,1],ymm9[2],ymm10[3,4],ymm9[5],ymm10[6,7]
545 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm10 = ymm6[2,1,3,3]
546 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm9 = ymm9[0],ymm10[1],ymm9[2,3],ymm10[4],ymm9[5,6],ymm10[7]
547 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm5 = ymm5[0,0,3,3,4,4,7,7]
548 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[1,1,2,2]
549 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm5[2],ymm4[3,4],ymm5[5],ymm4[6,7]
550 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm5 = ymm6[1,1,2,2]
551 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0],ymm4[1,2],ymm5[3],ymm4[4,5],ymm5[6],ymm4[7]
552 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm3 = ymm3[0,0,3,3,4,4,7,7]
553 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[1,1,2,2]
554 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm3[2],ymm0[3,4],ymm3[5],ymm0[6,7]
555 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[1,1,2,2]
556 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2],ymm1[3],ymm0[4,5],ymm1[6],ymm0[7]
557 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 128(%rcx)
558 ; AVX2-SLOW-NEXT: vmovaps %ymm4, 32(%rcx)
559 ; AVX2-SLOW-NEXT: vmovaps %ymm9, 64(%rcx)
560 ; AVX2-SLOW-NEXT: vmovaps %ymm8, 96(%rcx)
561 ; AVX2-SLOW-NEXT: vmovaps %ymm7, 160(%rcx)
562 ; AVX2-SLOW-NEXT: vmovaps %ymm2, (%rcx)
563 ; AVX2-SLOW-NEXT: vzeroupper
564 ; AVX2-SLOW-NEXT: retq
566 ; AVX2-FAST-LABEL: store_i32_stride3_vf16:
567 ; AVX2-FAST: # %bb.0:
568 ; AVX2-FAST-NEXT: vmovaps (%rdi), %ymm1
569 ; AVX2-FAST-NEXT: vmovaps 32(%rdi), %ymm2
570 ; AVX2-FAST-NEXT: vmovaps (%rsi), %ymm3
571 ; AVX2-FAST-NEXT: vmovaps 32(%rsi), %ymm4
572 ; AVX2-FAST-NEXT: vmovaps (%rdx), %ymm5
573 ; AVX2-FAST-NEXT: vmovaps 32(%rdx), %ymm6
574 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm0 = ymm3[0,0,3,3,4,4,7,7]
575 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm7 = ymm1[1,1,2,2]
576 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm7[0,1],ymm0[2],ymm7[3,4],ymm0[5],ymm7[6,7]
577 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm7 = ymm5[1,1,2,2]
578 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm7[0],ymm0[1,2],ymm7[3],ymm0[4,5],ymm7[6],ymm0[7]
579 ; AVX2-FAST-NEXT: vbroadcastf128 {{.*#+}} ymm7 = [1,0,2,2,1,0,2,2]
580 ; AVX2-FAST-NEXT: # ymm7 = mem[0,1,0,1]
581 ; AVX2-FAST-NEXT: vpermps %ymm3, %ymm7, %ymm8
582 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,0,2,1]
583 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm8[1],ymm1[2,3],ymm8[4],ymm1[5,6],ymm8[7]
584 ; AVX2-FAST-NEXT: vbroadcastsd (%rdx), %ymm8
585 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm8[2],ymm1[3,4],ymm8[5],ymm1[6,7]
586 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm8 = ymm4[0,0,3,3,4,4,7,7]
587 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm9 = ymm2[1,1,2,2]
588 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm8 = ymm9[0,1],ymm8[2],ymm9[3,4],ymm8[5],ymm9[6,7]
589 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm9 = ymm6[1,1,2,2]
590 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm8 = ymm9[0],ymm8[1,2],ymm9[3],ymm8[4,5],ymm9[6],ymm8[7]
591 ; AVX2-FAST-NEXT: vmovaps {{.*#+}} ymm9 = [5,6,5,6,5,6,7,7]
592 ; AVX2-FAST-NEXT: vpermps %ymm4, %ymm9, %ymm10
593 ; AVX2-FAST-NEXT: vbroadcastsd 56(%rdi), %ymm11
594 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0,1],ymm11[2],ymm10[3,4],ymm11[5],ymm10[6,7]
595 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[2,1,3,3]
596 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm6 = ymm10[0],ymm6[1],ymm10[2,3],ymm6[4],ymm10[5,6],ymm6[7]
597 ; AVX2-FAST-NEXT: vpermps %ymm4, %ymm7, %ymm4
598 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,0,2,1]
599 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0],ymm4[1],ymm2[2,3],ymm4[4],ymm2[5,6],ymm4[7]
600 ; AVX2-FAST-NEXT: vbroadcastsd 32(%rdx), %ymm4
601 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm4[2],ymm2[3,4],ymm4[5],ymm2[6,7]
602 ; AVX2-FAST-NEXT: vpermps %ymm3, %ymm9, %ymm3
603 ; AVX2-FAST-NEXT: vbroadcastsd 24(%rdi), %ymm4
604 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm4[2],ymm3[3,4],ymm4[5],ymm3[6,7]
605 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm4 = ymm5[2,1,3,3]
606 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0],ymm4[1],ymm3[2,3],ymm4[4],ymm3[5,6],ymm4[7]
607 ; AVX2-FAST-NEXT: vmovaps %ymm3, 64(%rcx)
608 ; AVX2-FAST-NEXT: vmovaps %ymm2, 96(%rcx)
609 ; AVX2-FAST-NEXT: vmovaps %ymm6, 160(%rcx)
610 ; AVX2-FAST-NEXT: vmovaps %ymm8, 128(%rcx)
611 ; AVX2-FAST-NEXT: vmovaps %ymm1, (%rcx)
612 ; AVX2-FAST-NEXT: vmovaps %ymm0, 32(%rcx)
613 ; AVX2-FAST-NEXT: vzeroupper
614 ; AVX2-FAST-NEXT: retq
616 ; AVX2-FAST-PERLANE-LABEL: store_i32_stride3_vf16:
617 ; AVX2-FAST-PERLANE: # %bb.0:
618 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rdi), %ymm4
619 ; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%rdi), %ymm0
620 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rsi), %ymm5
621 ; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%rsi), %ymm3
622 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rdx), %ymm6
623 ; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%rdx), %ymm1
624 ; AVX2-FAST-PERLANE-NEXT: vpermilps {{.*#+}} xmm2 = mem[1,0,2,2]
625 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,1,0,1]
626 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm7 = ymm4[0,0,2,1]
627 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm7[0],ymm2[1],ymm7[2,3],ymm2[4],ymm7[5,6],ymm2[7]
628 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd (%rdx), %ymm7
629 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm7[2],ymm2[3,4],ymm7[5],ymm2[6,7]
630 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 56(%rdi), %ymm7
631 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm8 = ymm3[1,2,3,3,5,6,7,7]
632 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm8 = ymm8[2,2,2,3]
633 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm7 = ymm8[0,1],ymm7[2],ymm8[3,4],ymm7[5],ymm8[6,7]
634 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm8 = ymm1[2,1,3,3]
635 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0],ymm8[1],ymm7[2,3],ymm8[4],ymm7[5,6],ymm8[7]
636 ; AVX2-FAST-PERLANE-NEXT: vpermilps {{.*#+}} xmm8 = mem[1,0,2,2]
637 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm8 = ymm8[0,1,0,1]
638 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm9 = ymm0[0,0,2,1]
639 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm8 = ymm9[0],ymm8[1],ymm9[2,3],ymm8[4],ymm9[5,6],ymm8[7]
640 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 32(%rdx), %ymm9
641 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1],ymm9[2],ymm8[3,4],ymm9[5],ymm8[6,7]
642 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 24(%rdi), %ymm9
643 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm10 = ymm5[1,2,3,3,5,6,7,7]
644 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm10 = ymm10[2,2,2,3]
645 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm9 = ymm10[0,1],ymm9[2],ymm10[3,4],ymm9[5],ymm10[6,7]
646 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm10 = ymm6[2,1,3,3]
647 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm9 = ymm9[0],ymm10[1],ymm9[2,3],ymm10[4],ymm9[5,6],ymm10[7]
648 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm5 = ymm5[0,0,3,3,4,4,7,7]
649 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[1,1,2,2]
650 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm5[2],ymm4[3,4],ymm5[5],ymm4[6,7]
651 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm5 = ymm6[1,1,2,2]
652 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0],ymm4[1,2],ymm5[3],ymm4[4,5],ymm5[6],ymm4[7]
653 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm3 = ymm3[0,0,3,3,4,4,7,7]
654 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[1,1,2,2]
655 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm3[2],ymm0[3,4],ymm3[5],ymm0[6,7]
656 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[1,1,2,2]
657 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2],ymm1[3],ymm0[4,5],ymm1[6],ymm0[7]
658 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 128(%rcx)
659 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm4, 32(%rcx)
660 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm9, 64(%rcx)
661 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm8, 96(%rcx)
662 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm7, 160(%rcx)
663 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm2, (%rcx)
664 ; AVX2-FAST-PERLANE-NEXT: vzeroupper
665 ; AVX2-FAST-PERLANE-NEXT: retq
667 ; AVX512-LABEL: store_i32_stride3_vf16:
669 ; AVX512-NEXT: vmovdqa64 (%rdi), %zmm0
670 ; AVX512-NEXT: vmovdqa64 (%rsi), %zmm1
671 ; AVX512-NEXT: vmovdqa64 (%rdx), %zmm2
672 ; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm3 = <0,16,u,1,17,u,2,18,u,3,19,u,4,20,u,5>
673 ; AVX512-NEXT: vpermi2d %zmm1, %zmm0, %zmm3
674 ; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm4 = [0,1,16,3,4,17,6,7,18,9,10,19,12,13,20,15]
675 ; AVX512-NEXT: vpermi2d %zmm2, %zmm3, %zmm4
676 ; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm3 = <5,u,22,6,u,23,7,u,24,8,u,25,9,u,26,10>
677 ; AVX512-NEXT: vpermi2d %zmm0, %zmm1, %zmm3
678 ; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm5 = [0,21,2,3,22,5,6,23,8,9,24,11,12,25,14,15]
679 ; AVX512-NEXT: vpermi2d %zmm2, %zmm3, %zmm5
680 ; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm3 = <u,11,27,u,12,28,u,13,29,u,14,30,u,15,31,u>
681 ; AVX512-NEXT: vpermi2d %zmm1, %zmm0, %zmm3
682 ; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm0 = [26,1,2,27,4,5,28,7,8,29,10,11,30,13,14,31]
683 ; AVX512-NEXT: vpermi2d %zmm2, %zmm3, %zmm0
684 ; AVX512-NEXT: vmovdqa64 %zmm0, 128(%rcx)
685 ; AVX512-NEXT: vmovdqa64 %zmm5, 64(%rcx)
686 ; AVX512-NEXT: vmovdqa64 %zmm4, (%rcx)
687 ; AVX512-NEXT: vzeroupper
689 %in.vec0 = load <16 x i32>, ptr %in.vecptr0, align 64
690 %in.vec1 = load <16 x i32>, ptr %in.vecptr1, align 64
691 %in.vec2 = load <16 x i32>, ptr %in.vecptr2, align 64
692 %1 = shufflevector <16 x i32> %in.vec0, <16 x i32> %in.vec1, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
693 %2 = shufflevector <16 x i32> %in.vec2, <16 x i32> poison, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
694 %3 = shufflevector <32 x i32> %1, <32 x i32> %2, <48 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47>
695 %interleaved.vec = shufflevector <48 x i32> %3, <48 x i32> poison, <48 x i32> <i32 0, i32 16, i32 32, i32 1, i32 17, i32 33, i32 2, i32 18, i32 34, i32 3, i32 19, i32 35, i32 4, i32 20, i32 36, i32 5, i32 21, i32 37, i32 6, i32 22, i32 38, i32 7, i32 23, i32 39, i32 8, i32 24, i32 40, i32 9, i32 25, i32 41, i32 10, i32 26, i32 42, i32 11, i32 27, i32 43, i32 12, i32 28, i32 44, i32 13, i32 29, i32 45, i32 14, i32 30, i32 46, i32 15, i32 31, i32 47>
696 store <48 x i32> %interleaved.vec, ptr %out.vec, align 64
700 define void @store_i32_stride3_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecptr2, ptr %out.vec) nounwind {
701 ; SSE-LABEL: store_i32_stride3_vf32:
703 ; SSE-NEXT: subq $152, %rsp
704 ; SSE-NEXT: movaps (%rdi), %xmm1
705 ; SSE-NEXT: movaps 16(%rdi), %xmm2
706 ; SSE-NEXT: movaps 32(%rdi), %xmm3
707 ; SSE-NEXT: movaps 48(%rdi), %xmm4
708 ; SSE-NEXT: movaps (%rsi), %xmm10
709 ; SSE-NEXT: movaps 16(%rsi), %xmm13
710 ; SSE-NEXT: movaps 32(%rsi), %xmm12
711 ; SSE-NEXT: movaps 48(%rsi), %xmm9
712 ; SSE-NEXT: movaps (%rdx), %xmm5
713 ; SSE-NEXT: movaps 16(%rdx), %xmm6
714 ; SSE-NEXT: movaps 32(%rdx), %xmm7
715 ; SSE-NEXT: movaps 48(%rdx), %xmm8
716 ; SSE-NEXT: movaps %xmm1, %xmm0
717 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm5[0,3]
718 ; SSE-NEXT: movaps %xmm5, %xmm11
719 ; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
720 ; SSE-NEXT: movaps %xmm1, %xmm5
721 ; SSE-NEXT: unpcklps {{.*#+}} xmm5 = xmm5[0],xmm10[0],xmm5[1],xmm10[1]
722 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,1],xmm0[2,0]
723 ; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
724 ; SSE-NEXT: movaps %xmm1, %xmm0
725 ; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm10[1]
726 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,3],xmm10[3,3]
727 ; SSE-NEXT: movaps %xmm1, %xmm15
728 ; SSE-NEXT: shufps {{.*#+}} xmm10 = xmm10[1,1],xmm11[1,1]
729 ; SSE-NEXT: shufps {{.*#+}} xmm10 = xmm10[0,2],xmm0[0,2]
730 ; SSE-NEXT: movaps %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
731 ; SSE-NEXT: movaps %xmm2, %xmm0
732 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm6[0,3]
733 ; SSE-NEXT: movaps %xmm2, %xmm1
734 ; SSE-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm13[0],xmm1[1],xmm13[1]
735 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,0]
736 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
737 ; SSE-NEXT: movaps %xmm2, %xmm0
738 ; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm13[1]
739 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,3],xmm13[3,3]
740 ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
741 ; SSE-NEXT: shufps {{.*#+}} xmm13 = xmm13[1,1],xmm6[1,1]
742 ; SSE-NEXT: movaps %xmm6, %xmm14
743 ; SSE-NEXT: shufps {{.*#+}} xmm13 = xmm13[0,2],xmm0[0,2]
744 ; SSE-NEXT: movaps %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
745 ; SSE-NEXT: movaps %xmm3, %xmm0
746 ; SSE-NEXT: movaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
747 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm7[0,3]
748 ; SSE-NEXT: movaps %xmm3, %xmm2
749 ; SSE-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm12[0],xmm2[1],xmm12[1]
750 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1],xmm0[2,0]
751 ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
752 ; SSE-NEXT: movaps %xmm3, %xmm0
753 ; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm12[1]
754 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,3],xmm12[3,3]
755 ; SSE-NEXT: movaps %xmm3, (%rsp) # 16-byte Spill
756 ; SSE-NEXT: shufps {{.*#+}} xmm12 = xmm12[1,1],xmm7[1,1]
757 ; SSE-NEXT: shufps {{.*#+}} xmm12 = xmm12[0,2],xmm0[0,2]
758 ; SSE-NEXT: movaps %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
759 ; SSE-NEXT: movaps %xmm4, %xmm0
760 ; SSE-NEXT: movaps %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
761 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm8[0,3]
762 ; SSE-NEXT: movaps %xmm4, %xmm1
763 ; SSE-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm9[0],xmm1[1],xmm9[1]
764 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,0]
765 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
766 ; SSE-NEXT: movaps %xmm4, %xmm0
767 ; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm9[1]
768 ; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[3,3],xmm9[3,3]
769 ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
770 ; SSE-NEXT: shufps {{.*#+}} xmm9 = xmm9[1,1],xmm8[1,1]
771 ; SSE-NEXT: shufps {{.*#+}} xmm9 = xmm9[0,2],xmm0[0,2]
772 ; SSE-NEXT: movaps %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
773 ; SSE-NEXT: movaps 64(%rdi), %xmm9
774 ; SSE-NEXT: movaps 64(%rdx), %xmm1
775 ; SSE-NEXT: movaps %xmm9, %xmm0
776 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm1[0,3]
777 ; SSE-NEXT: movaps %xmm1, %xmm2
778 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
779 ; SSE-NEXT: movaps 64(%rsi), %xmm12
780 ; SSE-NEXT: movaps %xmm9, %xmm1
781 ; SSE-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm12[0],xmm1[1],xmm12[1]
782 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,0]
783 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
784 ; SSE-NEXT: movaps %xmm9, %xmm0
785 ; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm12[1]
786 ; SSE-NEXT: shufps {{.*#+}} xmm9 = xmm9[3,3],xmm12[3,3]
787 ; SSE-NEXT: shufps {{.*#+}} xmm12 = xmm12[1,1],xmm2[1,1]
788 ; SSE-NEXT: shufps {{.*#+}} xmm12 = xmm12[0,2],xmm0[0,2]
789 ; SSE-NEXT: movaps 80(%rdi), %xmm10
790 ; SSE-NEXT: movaps 80(%rdx), %xmm1
791 ; SSE-NEXT: movaps %xmm10, %xmm0
792 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm1[0,3]
793 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
794 ; SSE-NEXT: movaps 80(%rsi), %xmm8
795 ; SSE-NEXT: movaps %xmm10, %xmm11
796 ; SSE-NEXT: unpcklps {{.*#+}} xmm11 = xmm11[0],xmm8[0],xmm11[1],xmm8[1]
797 ; SSE-NEXT: shufps {{.*#+}} xmm11 = xmm11[0,1],xmm0[2,0]
798 ; SSE-NEXT: movaps %xmm10, %xmm0
799 ; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm8[1]
800 ; SSE-NEXT: shufps {{.*#+}} xmm10 = xmm10[3,3],xmm8[3,3]
801 ; SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[1,1],xmm1[1,1]
802 ; SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[0,2],xmm0[0,2]
803 ; SSE-NEXT: movaps 96(%rdi), %xmm4
804 ; SSE-NEXT: movaps 96(%rdx), %xmm13
805 ; SSE-NEXT: movaps %xmm4, %xmm0
806 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm13[0,3]
807 ; SSE-NEXT: movaps 96(%rsi), %xmm5
808 ; SSE-NEXT: movaps %xmm4, %xmm6
809 ; SSE-NEXT: unpcklps {{.*#+}} xmm6 = xmm6[0],xmm5[0],xmm6[1],xmm5[1]
810 ; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,1],xmm0[2,0]
811 ; SSE-NEXT: movaps %xmm4, %xmm0
812 ; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm5[1]
813 ; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[3,3],xmm5[3,3]
814 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[1,1],xmm13[1,1]
815 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,2],xmm0[0,2]
816 ; SSE-NEXT: movaps 112(%rdi), %xmm0
817 ; SSE-NEXT: movaps 112(%rdx), %xmm7
818 ; SSE-NEXT: movaps %xmm0, %xmm2
819 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1],xmm7[0,3]
820 ; SSE-NEXT: movaps 112(%rsi), %xmm1
821 ; SSE-NEXT: movaps %xmm0, %xmm3
822 ; SSE-NEXT: unpcklps {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
823 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,1],xmm2[2,0]
824 ; SSE-NEXT: movaps %xmm0, %xmm2
825 ; SSE-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm1[1]
826 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3],xmm1[3,3]
827 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm7[1,1]
828 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
829 ; SSE-NEXT: movaps %xmm15, %xmm2
830 ; SSE-NEXT: shufps $233, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
831 ; SSE-NEXT: # xmm2 = xmm2[1,2],mem[2,3]
832 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
833 ; SSE-NEXT: shufps {{.*#+}} xmm15 = xmm15[1,2],xmm14[2,3]
834 ; SSE-NEXT: movaps %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
835 ; SSE-NEXT: movaps (%rsp), %xmm15 # 16-byte Reload
836 ; SSE-NEXT: shufps $233, {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Folded Reload
837 ; SSE-NEXT: # xmm15 = xmm15[1,2],mem[2,3]
838 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
839 ; SSE-NEXT: shufps $233, {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Folded Reload
840 ; SSE-NEXT: # xmm14 = xmm14[1,2],mem[2,3]
841 ; SSE-NEXT: movaps %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
842 ; SSE-NEXT: shufps $233, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Folded Reload
843 ; SSE-NEXT: # xmm9 = xmm9[1,2],mem[2,3]
844 ; SSE-NEXT: shufps $233, {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Folded Reload
845 ; SSE-NEXT: # xmm10 = xmm10[1,2],mem[2,3]
846 ; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[1,2],xmm13[2,3]
847 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,2],xmm7[2,3]
848 ; SSE-NEXT: movaps %xmm1, 352(%rcx)
849 ; SSE-NEXT: movaps %xmm3, 336(%rcx)
850 ; SSE-NEXT: movaps %xmm5, 304(%rcx)
851 ; SSE-NEXT: movaps %xmm6, 288(%rcx)
852 ; SSE-NEXT: movaps %xmm8, 256(%rcx)
853 ; SSE-NEXT: movaps %xmm11, 240(%rcx)
854 ; SSE-NEXT: movaps %xmm12, 208(%rcx)
855 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
856 ; SSE-NEXT: movaps %xmm1, 192(%rcx)
857 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
858 ; SSE-NEXT: movaps %xmm1, 160(%rcx)
859 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
860 ; SSE-NEXT: movaps %xmm1, 144(%rcx)
861 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
862 ; SSE-NEXT: movaps %xmm1, 112(%rcx)
863 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
864 ; SSE-NEXT: movaps %xmm1, 96(%rcx)
865 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
866 ; SSE-NEXT: movaps %xmm1, 64(%rcx)
867 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
868 ; SSE-NEXT: movaps %xmm1, 48(%rcx)
869 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
870 ; SSE-NEXT: movaps %xmm1, 16(%rcx)
871 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
872 ; SSE-NEXT: movaps %xmm1, (%rcx)
873 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0,1,3]
874 ; SSE-NEXT: movaps %xmm0, 368(%rcx)
875 ; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[2,0,1,3]
876 ; SSE-NEXT: movaps %xmm4, 320(%rcx)
877 ; SSE-NEXT: shufps {{.*#+}} xmm10 = xmm10[2,0,1,3]
878 ; SSE-NEXT: movaps %xmm10, 272(%rcx)
879 ; SSE-NEXT: shufps {{.*#+}} xmm9 = xmm9[2,0,1,3]
880 ; SSE-NEXT: movaps %xmm9, 224(%rcx)
881 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
882 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0,1,3]
883 ; SSE-NEXT: movaps %xmm0, 176(%rcx)
884 ; SSE-NEXT: movaps %xmm15, %xmm0
885 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm15[1,3]
886 ; SSE-NEXT: movaps %xmm0, 128(%rcx)
887 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
888 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0,1,3]
889 ; SSE-NEXT: movaps %xmm0, 80(%rcx)
890 ; SSE-NEXT: movaps %xmm2, %xmm0
891 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm2[1,3]
892 ; SSE-NEXT: movaps %xmm0, 32(%rcx)
893 ; SSE-NEXT: addq $152, %rsp
896 ; AVX1-ONLY-LABEL: store_i32_stride3_vf32:
897 ; AVX1-ONLY: # %bb.0:
898 ; AVX1-ONLY-NEXT: vmovapd (%rdx), %ymm4
899 ; AVX1-ONLY-NEXT: vmovapd 32(%rdx), %ymm2
900 ; AVX1-ONLY-NEXT: vmovapd 64(%rdx), %ymm3
901 ; AVX1-ONLY-NEXT: vmovapd 96(%rdx), %ymm0
902 ; AVX1-ONLY-NEXT: vmovaps (%rsi), %xmm1
903 ; AVX1-ONLY-NEXT: vmovaps 16(%rsi), %xmm7
904 ; AVX1-ONLY-NEXT: vmovaps 32(%rsi), %xmm10
905 ; AVX1-ONLY-NEXT: vmovaps 48(%rsi), %xmm9
906 ; AVX1-ONLY-NEXT: vmovaps (%rdi), %xmm5
907 ; AVX1-ONLY-NEXT: vmovaps 16(%rdi), %xmm8
908 ; AVX1-ONLY-NEXT: vmovaps 32(%rdi), %xmm11
909 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm6 = xmm5[1],xmm1[1]
910 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm6 = xmm1[1,1],xmm6[0,2]
911 ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm1 = xmm1[0],xmm5[0]
912 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm1[2,0],xmm5[2,1]
913 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm6, %ymm1, %ymm1
914 ; AVX1-ONLY-NEXT: vbroadcastsd (%rdx), %ymm5
915 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm5[2],ymm1[3,4],ymm5[5],ymm1[6,7]
916 ; AVX1-ONLY-NEXT: vmovaps 80(%rsi), %xmm5
917 ; AVX1-ONLY-NEXT: vmovaps 80(%rdi), %xmm6
918 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm12 = xmm6[3,3],xmm5[3,3]
919 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm6 = xmm6[1],xmm5[1]
920 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm5 = xmm5[1,1],xmm6[0,2]
921 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm12, %ymm5, %ymm5
922 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm6 = ymm3[2,3,2,3]
923 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm6 = ymm6[0,0,3,3]
924 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0],ymm6[1],ymm5[2,3],ymm6[4],ymm5[5,6],ymm6[7]
925 ; AVX1-ONLY-NEXT: vmovaps 64(%rsi), %xmm6
926 ; AVX1-ONLY-NEXT: vmovaps 64(%rdi), %xmm12
927 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm13 = xmm12[1],xmm6[1]
928 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm13 = xmm6[1,1],xmm13[0,2]
929 ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm6 = xmm6[0],xmm12[0]
930 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm6 = xmm6[2,0],xmm12[2,1]
931 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm13, %ymm6, %ymm6
932 ; AVX1-ONLY-NEXT: vbroadcastsd 64(%rdx), %ymm12
933 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1],ymm12[2],ymm6[3,4],ymm12[5],ymm6[6,7]
934 ; AVX1-ONLY-NEXT: vmovaps 48(%rdi), %xmm12
935 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm13 = xmm12[3,3],xmm9[3,3]
936 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm12 = xmm12[1],xmm9[1]
937 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm9 = xmm9[1,1],xmm12[0,2]
938 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm13, %ymm9, %ymm9
939 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm12 = ymm2[2,3,2,3]
940 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm12 = ymm12[0,0,3,3]
941 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm9 = ymm9[0],ymm12[1],ymm9[2,3],ymm12[4],ymm9[5,6],ymm12[7]
942 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm12 = xmm11[1],xmm10[1]
943 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm12 = xmm10[1,1],xmm12[0,2]
944 ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm10 = xmm10[0],xmm11[0]
945 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm10 = xmm10[2,0],xmm11[2,1]
946 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm12, %ymm10, %ymm10
947 ; AVX1-ONLY-NEXT: vbroadcastsd 32(%rdx), %ymm11
948 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0,1],ymm11[2],ymm10[3,4],ymm11[5],ymm10[6,7]
949 ; AVX1-ONLY-NEXT: vmovaps 112(%rsi), %xmm11
950 ; AVX1-ONLY-NEXT: vmovaps 112(%rdi), %xmm12
951 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm13 = xmm12[3,3],xmm11[3,3]
952 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm12 = xmm12[1],xmm11[1]
953 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm11 = xmm11[1,1],xmm12[0,2]
954 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm13, %ymm11, %ymm11
955 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm12 = ymm0[2,3,2,3]
956 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm12 = ymm12[0,0,3,3]
957 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm11 = ymm11[0],ymm12[1],ymm11[2,3],ymm12[4],ymm11[5,6],ymm12[7]
958 ; AVX1-ONLY-NEXT: vmovaps 96(%rsi), %xmm12
959 ; AVX1-ONLY-NEXT: vmovaps 96(%rdi), %xmm13
960 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm14 = xmm13[1],xmm12[1]
961 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm14 = xmm12[1,1],xmm14[0,2]
962 ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm12 = xmm12[0],xmm13[0]
963 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm12 = xmm12[2,0],xmm13[2,1]
964 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm14, %ymm12, %ymm12
965 ; AVX1-ONLY-NEXT: vbroadcastsd 96(%rdx), %ymm13
966 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm12 = ymm12[0,1],ymm13[2],ymm12[3,4],ymm13[5],ymm12[6,7]
967 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm13 = xmm8[3,3],xmm7[3,3]
968 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm8 = xmm8[1],xmm7[1]
969 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm7 = xmm7[1,1],xmm8[0,2]
970 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm13, %ymm7, %ymm7
971 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm8 = ymm4[2,3,2,3]
972 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm8 = ymm8[0,0,3,3]
973 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0],ymm8[1],ymm7[2,3],ymm8[4],ymm7[5,6],ymm8[7]
974 ; AVX1-ONLY-NEXT: vpermilps {{.*#+}} ymm8 = mem[0,0,3,3,4,4,7,7]
975 ; AVX1-ONLY-NEXT: vpermilpd {{.*#+}} ymm13 = mem[1,0,2,2]
976 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm8 = ymm13[0,1],ymm8[2],ymm13[3,4],ymm8[5],ymm13[6,7]
977 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm4 = ymm4[1,1,2,2]
978 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0],ymm8[1,2],ymm4[3],ymm8[4,5],ymm4[6],ymm8[7]
979 ; AVX1-ONLY-NEXT: vpermilps {{.*#+}} ymm8 = mem[0,0,3,3,4,4,7,7]
980 ; AVX1-ONLY-NEXT: vpermilpd {{.*#+}} ymm13 = mem[1,0,2,2]
981 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm8 = ymm13[0,1],ymm8[2],ymm13[3,4],ymm8[5],ymm13[6,7]
982 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm3 = ymm3[1,1,2,2]
983 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0],ymm8[1,2],ymm3[3],ymm8[4,5],ymm3[6],ymm8[7]
984 ; AVX1-ONLY-NEXT: vpermilps {{.*#+}} ymm8 = mem[0,0,3,3,4,4,7,7]
985 ; AVX1-ONLY-NEXT: vpermilpd {{.*#+}} ymm13 = mem[1,0,2,2]
986 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm8 = ymm13[0,1],ymm8[2],ymm13[3,4],ymm8[5],ymm13[6,7]
987 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm2 = ymm2[1,1,2,2]
988 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0],ymm8[1,2],ymm2[3],ymm8[4,5],ymm2[6],ymm8[7]
989 ; AVX1-ONLY-NEXT: vpermilps {{.*#+}} ymm8 = mem[0,0,3,3,4,4,7,7]
990 ; AVX1-ONLY-NEXT: vpermilpd {{.*#+}} ymm13 = mem[1,0,2,2]
991 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm8 = ymm13[0,1],ymm8[2],ymm13[3,4],ymm8[5],ymm13[6,7]
992 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[1,1,2,2]
993 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm8[1,2],ymm0[3],ymm8[4,5],ymm0[6],ymm8[7]
994 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 320(%rcx)
995 ; AVX1-ONLY-NEXT: vmovaps %ymm2, 128(%rcx)
996 ; AVX1-ONLY-NEXT: vmovaps %ymm3, 224(%rcx)
997 ; AVX1-ONLY-NEXT: vmovaps %ymm4, 32(%rcx)
998 ; AVX1-ONLY-NEXT: vmovaps %ymm7, 64(%rcx)
999 ; AVX1-ONLY-NEXT: vmovaps %ymm12, 288(%rcx)
1000 ; AVX1-ONLY-NEXT: vmovaps %ymm11, 352(%rcx)
1001 ; AVX1-ONLY-NEXT: vmovaps %ymm10, 96(%rcx)
1002 ; AVX1-ONLY-NEXT: vmovaps %ymm9, 160(%rcx)
1003 ; AVX1-ONLY-NEXT: vmovaps %ymm6, 192(%rcx)
1004 ; AVX1-ONLY-NEXT: vmovaps %ymm5, 256(%rcx)
1005 ; AVX1-ONLY-NEXT: vmovaps %ymm1, (%rcx)
1006 ; AVX1-ONLY-NEXT: vzeroupper
1007 ; AVX1-ONLY-NEXT: retq
1009 ; AVX2-SLOW-LABEL: store_i32_stride3_vf32:
1010 ; AVX2-SLOW: # %bb.0:
1011 ; AVX2-SLOW-NEXT: subq $40, %rsp
1012 ; AVX2-SLOW-NEXT: vmovaps (%rdi), %ymm8
1013 ; AVX2-SLOW-NEXT: vmovaps 32(%rdi), %ymm1
1014 ; AVX2-SLOW-NEXT: vmovaps 64(%rdi), %ymm6
1015 ; AVX2-SLOW-NEXT: vmovaps 32(%rsi), %ymm5
1016 ; AVX2-SLOW-NEXT: vmovaps 64(%rsi), %ymm9
1017 ; AVX2-SLOW-NEXT: vmovaps 96(%rsi), %ymm2
1018 ; AVX2-SLOW-NEXT: vmovaps 32(%rdx), %ymm7
1019 ; AVX2-SLOW-NEXT: vmovaps 64(%rdx), %ymm10
1020 ; AVX2-SLOW-NEXT: vmovaps 96(%rdx), %ymm3
1021 ; AVX2-SLOW-NEXT: vpermilps {{.*#+}} xmm0 = mem[1,0,2,2]
1022 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,0,1]
1023 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm4 = ymm8[0,0,2,1]
1024 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm4[0],ymm0[1],ymm4[2,3],ymm0[4],ymm4[5,6],ymm0[7]
1025 ; AVX2-SLOW-NEXT: vbroadcastsd (%rdx), %ymm4
1026 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm4[2],ymm0[3,4],ymm4[5],ymm0[6,7]
1027 ; AVX2-SLOW-NEXT: vmovups %ymm0, (%rsp) # 32-byte Spill
1028 ; AVX2-SLOW-NEXT: vbroadcastsd 88(%rdi), %ymm4
1029 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm11 = ymm9[1,2,3,3,5,6,7,7]
1030 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm11 = ymm11[2,2,2,3]
1031 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm4 = ymm11[0,1],ymm4[2],ymm11[3,4],ymm4[5],ymm11[6,7]
1032 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm11 = ymm10[2,1,3,3]
1033 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm4[0],ymm11[1],ymm4[2,3],ymm11[4],ymm4[5,6],ymm11[7]
1034 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1035 ; AVX2-SLOW-NEXT: vpermilps {{.*#+}} xmm11 = mem[1,0,2,2]
1036 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm11 = ymm11[0,1,0,1]
1037 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm12 = ymm6[0,0,2,1]
1038 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm11 = ymm12[0],ymm11[1],ymm12[2,3],ymm11[4],ymm12[5,6],ymm11[7]
1039 ; AVX2-SLOW-NEXT: vbroadcastsd 64(%rdx), %ymm12
1040 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm11[0,1],ymm12[2],ymm11[3,4],ymm12[5],ymm11[6,7]
1041 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1042 ; AVX2-SLOW-NEXT: vbroadcastsd 56(%rdi), %ymm12
1043 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm13 = ymm5[1,2,3,3,5,6,7,7]
1044 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm13 = ymm13[2,2,2,3]
1045 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm12 = ymm13[0,1],ymm12[2],ymm13[3,4],ymm12[5],ymm13[6,7]
1046 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm13 = ymm7[2,1,3,3]
1047 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm12[0],ymm13[1],ymm12[2,3],ymm13[4],ymm12[5,6],ymm13[7]
1048 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1049 ; AVX2-SLOW-NEXT: vpermilps {{.*#+}} xmm13 = mem[1,0,2,2]
1050 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm13 = ymm13[0,1,0,1]
1051 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm14 = ymm1[0,0,2,1]
1052 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm13 = ymm14[0],ymm13[1],ymm14[2,3],ymm13[4],ymm14[5,6],ymm13[7]
1053 ; AVX2-SLOW-NEXT: vbroadcastsd 32(%rdx), %ymm14
1054 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm13[0,1],ymm14[2],ymm13[3,4],ymm14[5],ymm13[6,7]
1055 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1056 ; AVX2-SLOW-NEXT: vbroadcastsd 120(%rdi), %ymm14
1057 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm15 = ymm2[1,2,3,3,5,6,7,7]
1058 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm15 = ymm15[2,2,2,3]
1059 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm14 = ymm15[0,1],ymm14[2],ymm15[3,4],ymm14[5],ymm15[6,7]
1060 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm15 = ymm3[2,1,3,3]
1061 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm14 = ymm14[0],ymm15[1],ymm14[2,3],ymm15[4],ymm14[5,6],ymm15[7]
1062 ; AVX2-SLOW-NEXT: vpermilps {{.*#+}} xmm15 = mem[1,0,2,2]
1063 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm15 = ymm15[0,1,0,1]
1064 ; AVX2-SLOW-NEXT: vmovaps 96(%rdi), %ymm0
1065 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm4 = ymm0[0,0,2,1]
1066 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0],ymm15[1],ymm4[2,3],ymm15[4],ymm4[5,6],ymm15[7]
1067 ; AVX2-SLOW-NEXT: vbroadcastsd 96(%rdx), %ymm15
1068 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm15 = ymm4[0,1],ymm15[2],ymm4[3,4],ymm15[5],ymm4[6,7]
1069 ; AVX2-SLOW-NEXT: vmovaps (%rsi), %ymm4
1070 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm11 = ymm4[1,2,3,3,5,6,7,7]
1071 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm11 = ymm11[2,2,2,3]
1072 ; AVX2-SLOW-NEXT: vbroadcastsd 24(%rdi), %ymm12
1073 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm11 = ymm11[0,1],ymm12[2],ymm11[3,4],ymm12[5],ymm11[6,7]
1074 ; AVX2-SLOW-NEXT: vmovaps (%rdx), %ymm12
1075 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm13 = ymm12[2,1,3,3]
1076 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm11 = ymm11[0],ymm13[1],ymm11[2,3],ymm13[4],ymm11[5,6],ymm13[7]
1077 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm4 = ymm4[0,0,3,3,4,4,7,7]
1078 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm8 = ymm8[1,1,2,2]
1079 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm4 = ymm8[0,1],ymm4[2],ymm8[3,4],ymm4[5],ymm8[6,7]
1080 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm8 = ymm12[1,1,2,2]
1081 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm4 = ymm8[0],ymm4[1,2],ymm8[3],ymm4[4,5],ymm8[6],ymm4[7]
1082 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm8 = ymm9[0,0,3,3,4,4,7,7]
1083 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[1,1,2,2]
1084 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1],ymm8[2],ymm6[3,4],ymm8[5],ymm6[6,7]
1085 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm8 = ymm10[1,1,2,2]
1086 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm6 = ymm8[0],ymm6[1,2],ymm8[3],ymm6[4,5],ymm8[6],ymm6[7]
1087 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm5 = ymm5[0,0,3,3,4,4,7,7]
1088 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[1,1,2,2]
1089 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm5[2],ymm1[3,4],ymm5[5],ymm1[6,7]
1090 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm5 = ymm7[1,1,2,2]
1091 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm5[0],ymm1[1,2],ymm5[3],ymm1[4,5],ymm5[6],ymm1[7]
1092 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm2 = ymm2[0,0,3,3,4,4,7,7]
1093 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[1,1,2,2]
1094 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm2[2],ymm0[3,4],ymm2[5],ymm0[6,7]
1095 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm2 = ymm3[1,1,2,2]
1096 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0],ymm0[1,2],ymm2[3],ymm0[4,5],ymm2[6],ymm0[7]
1097 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 320(%rcx)
1098 ; AVX2-SLOW-NEXT: vmovaps %ymm1, 128(%rcx)
1099 ; AVX2-SLOW-NEXT: vmovaps %ymm6, 224(%rcx)
1100 ; AVX2-SLOW-NEXT: vmovaps %ymm4, 32(%rcx)
1101 ; AVX2-SLOW-NEXT: vmovaps %ymm11, 64(%rcx)
1102 ; AVX2-SLOW-NEXT: vmovaps %ymm15, 288(%rcx)
1103 ; AVX2-SLOW-NEXT: vmovaps %ymm14, 352(%rcx)
1104 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
1105 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 96(%rcx)
1106 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
1107 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 160(%rcx)
1108 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
1109 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 192(%rcx)
1110 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
1111 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 256(%rcx)
1112 ; AVX2-SLOW-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
1113 ; AVX2-SLOW-NEXT: vmovaps %ymm0, (%rcx)
1114 ; AVX2-SLOW-NEXT: addq $40, %rsp
1115 ; AVX2-SLOW-NEXT: vzeroupper
1116 ; AVX2-SLOW-NEXT: retq
1118 ; AVX2-FAST-LABEL: store_i32_stride3_vf32:
1119 ; AVX2-FAST: # %bb.0:
1120 ; AVX2-FAST-NEXT: vmovaps (%rdi), %ymm4
1121 ; AVX2-FAST-NEXT: vmovaps 32(%rdi), %ymm7
1122 ; AVX2-FAST-NEXT: vmovaps 64(%rdi), %ymm11
1123 ; AVX2-FAST-NEXT: vmovaps 96(%rdi), %ymm3
1124 ; AVX2-FAST-NEXT: vmovaps (%rsi), %ymm1
1125 ; AVX2-FAST-NEXT: vmovaps 32(%rsi), %ymm10
1126 ; AVX2-FAST-NEXT: vmovaps 64(%rsi), %ymm13
1127 ; AVX2-FAST-NEXT: vmovaps (%rdx), %ymm2
1128 ; AVX2-FAST-NEXT: vmovaps 32(%rdx), %ymm12
1129 ; AVX2-FAST-NEXT: vmovaps 64(%rdx), %ymm14
1130 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm0 = ymm1[0,0,3,3,4,4,7,7]
1131 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm5 = ymm4[1,1,2,2]
1132 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm5[0,1],ymm0[2],ymm5[3,4],ymm0[5],ymm5[6,7]
1133 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm5 = ymm2[1,1,2,2]
1134 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm5[0],ymm0[1,2],ymm5[3],ymm0[4,5],ymm5[6],ymm0[7]
1135 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1136 ; AVX2-FAST-NEXT: vbroadcastf128 {{.*#+}} ymm8 = [1,0,2,2,1,0,2,2]
1137 ; AVX2-FAST-NEXT: # ymm8 = mem[0,1,0,1]
1138 ; AVX2-FAST-NEXT: vpermps %ymm1, %ymm8, %ymm5
1139 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,0,2,1]
1140 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0],ymm5[1],ymm4[2,3],ymm5[4],ymm4[5,6],ymm5[7]
1141 ; AVX2-FAST-NEXT: vbroadcastsd (%rdx), %ymm5
1142 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm4[0,1],ymm5[2],ymm4[3,4],ymm5[5],ymm4[6,7]
1143 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1144 ; AVX2-FAST-NEXT: vmovaps {{.*#+}} ymm9 = [5,6,5,6,5,6,7,7]
1145 ; AVX2-FAST-NEXT: vpermps %ymm13, %ymm9, %ymm5
1146 ; AVX2-FAST-NEXT: vbroadcastsd 88(%rdi), %ymm6
1147 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1],ymm6[2],ymm5[3,4],ymm6[5],ymm5[6,7]
1148 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm6 = ymm14[2,1,3,3]
1149 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0],ymm6[1],ymm5[2,3],ymm6[4],ymm5[5,6],ymm6[7]
1150 ; AVX2-FAST-NEXT: vpermps %ymm13, %ymm8, %ymm6
1151 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm15 = ymm11[0,0,2,1]
1152 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm6 = ymm15[0],ymm6[1],ymm15[2,3],ymm6[4],ymm15[5,6],ymm6[7]
1153 ; AVX2-FAST-NEXT: vbroadcastsd 64(%rdx), %ymm15
1154 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1],ymm15[2],ymm6[3,4],ymm15[5],ymm6[6,7]
1155 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm13 = ymm13[0,0,3,3,4,4,7,7]
1156 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm11 = ymm11[1,1,2,2]
1157 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm11 = ymm11[0,1],ymm13[2],ymm11[3,4],ymm13[5],ymm11[6,7]
1158 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm13 = ymm14[1,1,2,2]
1159 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm11 = ymm13[0],ymm11[1,2],ymm13[3],ymm11[4,5],ymm13[6],ymm11[7]
1160 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm13 = ymm10[0,0,3,3,4,4,7,7]
1161 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm14 = ymm7[1,1,2,2]
1162 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm13 = ymm14[0,1],ymm13[2],ymm14[3,4],ymm13[5],ymm14[6,7]
1163 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm14 = ymm12[1,1,2,2]
1164 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm13 = ymm14[0],ymm13[1,2],ymm14[3],ymm13[4,5],ymm14[6],ymm13[7]
1165 ; AVX2-FAST-NEXT: vpermps %ymm10, %ymm9, %ymm14
1166 ; AVX2-FAST-NEXT: vbroadcastsd 56(%rdi), %ymm15
1167 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm14 = ymm14[0,1],ymm15[2],ymm14[3,4],ymm15[5],ymm14[6,7]
1168 ; AVX2-FAST-NEXT: vmovaps 96(%rsi), %ymm15
1169 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm12 = ymm12[2,1,3,3]
1170 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm12 = ymm14[0],ymm12[1],ymm14[2,3],ymm12[4],ymm14[5,6],ymm12[7]
1171 ; AVX2-FAST-NEXT: vpermps %ymm10, %ymm8, %ymm10
1172 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm7 = ymm7[0,0,2,1]
1173 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0],ymm10[1],ymm7[2,3],ymm10[4],ymm7[5,6],ymm10[7]
1174 ; AVX2-FAST-NEXT: vbroadcastsd 32(%rdx), %ymm10
1175 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1],ymm10[2],ymm7[3,4],ymm10[5],ymm7[6,7]
1176 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm10 = ymm15[0,0,3,3,4,4,7,7]
1177 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm14 = ymm3[1,1,2,2]
1178 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm10 = ymm14[0,1],ymm10[2],ymm14[3,4],ymm10[5],ymm14[6,7]
1179 ; AVX2-FAST-NEXT: vmovaps 96(%rdx), %ymm14
1180 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm0 = ymm14[1,1,2,2]
1181 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm10[1,2],ymm0[3],ymm10[4,5],ymm0[6],ymm10[7]
1182 ; AVX2-FAST-NEXT: vpermps %ymm15, %ymm9, %ymm10
1183 ; AVX2-FAST-NEXT: vbroadcastsd 120(%rdi), %ymm4
1184 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm4 = ymm10[0,1],ymm4[2],ymm10[3,4],ymm4[5],ymm10[6,7]
1185 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm10 = ymm14[2,1,3,3]
1186 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0],ymm10[1],ymm4[2,3],ymm10[4],ymm4[5,6],ymm10[7]
1187 ; AVX2-FAST-NEXT: vpermps %ymm15, %ymm8, %ymm8
1188 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[0,0,2,1]
1189 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0],ymm8[1],ymm3[2,3],ymm8[4],ymm3[5,6],ymm8[7]
1190 ; AVX2-FAST-NEXT: vbroadcastsd 96(%rdx), %ymm8
1191 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm8[2],ymm3[3,4],ymm8[5],ymm3[6,7]
1192 ; AVX2-FAST-NEXT: vpermps %ymm1, %ymm9, %ymm1
1193 ; AVX2-FAST-NEXT: vbroadcastsd 24(%rdi), %ymm8
1194 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm8[2],ymm1[3,4],ymm8[5],ymm1[6,7]
1195 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[2,1,3,3]
1196 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3],ymm2[4],ymm1[5,6],ymm2[7]
1197 ; AVX2-FAST-NEXT: vmovaps %ymm1, 64(%rcx)
1198 ; AVX2-FAST-NEXT: vmovaps %ymm3, 288(%rcx)
1199 ; AVX2-FAST-NEXT: vmovaps %ymm4, 352(%rcx)
1200 ; AVX2-FAST-NEXT: vmovaps %ymm0, 320(%rcx)
1201 ; AVX2-FAST-NEXT: vmovaps %ymm7, 96(%rcx)
1202 ; AVX2-FAST-NEXT: vmovaps %ymm12, 160(%rcx)
1203 ; AVX2-FAST-NEXT: vmovaps %ymm13, 128(%rcx)
1204 ; AVX2-FAST-NEXT: vmovaps %ymm11, 224(%rcx)
1205 ; AVX2-FAST-NEXT: vmovaps %ymm6, 192(%rcx)
1206 ; AVX2-FAST-NEXT: vmovaps %ymm5, 256(%rcx)
1207 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
1208 ; AVX2-FAST-NEXT: vmovaps %ymm0, (%rcx)
1209 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
1210 ; AVX2-FAST-NEXT: vmovaps %ymm0, 32(%rcx)
1211 ; AVX2-FAST-NEXT: vzeroupper
1212 ; AVX2-FAST-NEXT: retq
1214 ; AVX2-FAST-PERLANE-LABEL: store_i32_stride3_vf32:
1215 ; AVX2-FAST-PERLANE: # %bb.0:
1216 ; AVX2-FAST-PERLANE-NEXT: subq $40, %rsp
1217 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rdi), %ymm8
1218 ; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%rdi), %ymm1
1219 ; AVX2-FAST-PERLANE-NEXT: vmovaps 64(%rdi), %ymm6
1220 ; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%rsi), %ymm5
1221 ; AVX2-FAST-PERLANE-NEXT: vmovaps 64(%rsi), %ymm9
1222 ; AVX2-FAST-PERLANE-NEXT: vmovaps 96(%rsi), %ymm2
1223 ; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%rdx), %ymm7
1224 ; AVX2-FAST-PERLANE-NEXT: vmovaps 64(%rdx), %ymm10
1225 ; AVX2-FAST-PERLANE-NEXT: vmovaps 96(%rdx), %ymm3
1226 ; AVX2-FAST-PERLANE-NEXT: vpermilps {{.*#+}} xmm0 = mem[1,0,2,2]
1227 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,0,1]
1228 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm4 = ymm8[0,0,2,1]
1229 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm4[0],ymm0[1],ymm4[2,3],ymm0[4],ymm4[5,6],ymm0[7]
1230 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd (%rdx), %ymm4
1231 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm4[2],ymm0[3,4],ymm4[5],ymm0[6,7]
1232 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, (%rsp) # 32-byte Spill
1233 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 88(%rdi), %ymm4
1234 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm11 = ymm9[1,2,3,3,5,6,7,7]
1235 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm11 = ymm11[2,2,2,3]
1236 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm4 = ymm11[0,1],ymm4[2],ymm11[3,4],ymm4[5],ymm11[6,7]
1237 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm11 = ymm10[2,1,3,3]
1238 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm4[0],ymm11[1],ymm4[2,3],ymm11[4],ymm4[5,6],ymm11[7]
1239 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1240 ; AVX2-FAST-PERLANE-NEXT: vpermilps {{.*#+}} xmm11 = mem[1,0,2,2]
1241 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm11 = ymm11[0,1,0,1]
1242 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm12 = ymm6[0,0,2,1]
1243 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm11 = ymm12[0],ymm11[1],ymm12[2,3],ymm11[4],ymm12[5,6],ymm11[7]
1244 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 64(%rdx), %ymm12
1245 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm11[0,1],ymm12[2],ymm11[3,4],ymm12[5],ymm11[6,7]
1246 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1247 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 56(%rdi), %ymm12
1248 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm13 = ymm5[1,2,3,3,5,6,7,7]
1249 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm13 = ymm13[2,2,2,3]
1250 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm12 = ymm13[0,1],ymm12[2],ymm13[3,4],ymm12[5],ymm13[6,7]
1251 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm13 = ymm7[2,1,3,3]
1252 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm12[0],ymm13[1],ymm12[2,3],ymm13[4],ymm12[5,6],ymm13[7]
1253 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1254 ; AVX2-FAST-PERLANE-NEXT: vpermilps {{.*#+}} xmm13 = mem[1,0,2,2]
1255 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm13 = ymm13[0,1,0,1]
1256 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm14 = ymm1[0,0,2,1]
1257 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm13 = ymm14[0],ymm13[1],ymm14[2,3],ymm13[4],ymm14[5,6],ymm13[7]
1258 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 32(%rdx), %ymm14
1259 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm13[0,1],ymm14[2],ymm13[3,4],ymm14[5],ymm13[6,7]
1260 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1261 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 120(%rdi), %ymm14
1262 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm15 = ymm2[1,2,3,3,5,6,7,7]
1263 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm15 = ymm15[2,2,2,3]
1264 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm14 = ymm15[0,1],ymm14[2],ymm15[3,4],ymm14[5],ymm15[6,7]
1265 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm15 = ymm3[2,1,3,3]
1266 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm14 = ymm14[0],ymm15[1],ymm14[2,3],ymm15[4],ymm14[5,6],ymm15[7]
1267 ; AVX2-FAST-PERLANE-NEXT: vpermilps {{.*#+}} xmm15 = mem[1,0,2,2]
1268 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm15 = ymm15[0,1,0,1]
1269 ; AVX2-FAST-PERLANE-NEXT: vmovaps 96(%rdi), %ymm0
1270 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm4 = ymm0[0,0,2,1]
1271 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0],ymm15[1],ymm4[2,3],ymm15[4],ymm4[5,6],ymm15[7]
1272 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 96(%rdx), %ymm15
1273 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm15 = ymm4[0,1],ymm15[2],ymm4[3,4],ymm15[5],ymm4[6,7]
1274 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rsi), %ymm4
1275 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm11 = ymm4[1,2,3,3,5,6,7,7]
1276 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm11 = ymm11[2,2,2,3]
1277 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 24(%rdi), %ymm12
1278 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm11 = ymm11[0,1],ymm12[2],ymm11[3,4],ymm12[5],ymm11[6,7]
1279 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rdx), %ymm12
1280 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm13 = ymm12[2,1,3,3]
1281 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm11 = ymm11[0],ymm13[1],ymm11[2,3],ymm13[4],ymm11[5,6],ymm13[7]
1282 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm4 = ymm4[0,0,3,3,4,4,7,7]
1283 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm8 = ymm8[1,1,2,2]
1284 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm4 = ymm8[0,1],ymm4[2],ymm8[3,4],ymm4[5],ymm8[6,7]
1285 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm8 = ymm12[1,1,2,2]
1286 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm4 = ymm8[0],ymm4[1,2],ymm8[3],ymm4[4,5],ymm8[6],ymm4[7]
1287 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm8 = ymm9[0,0,3,3,4,4,7,7]
1288 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[1,1,2,2]
1289 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1],ymm8[2],ymm6[3,4],ymm8[5],ymm6[6,7]
1290 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm8 = ymm10[1,1,2,2]
1291 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm6 = ymm8[0],ymm6[1,2],ymm8[3],ymm6[4,5],ymm8[6],ymm6[7]
1292 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm5 = ymm5[0,0,3,3,4,4,7,7]
1293 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[1,1,2,2]
1294 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm5[2],ymm1[3,4],ymm5[5],ymm1[6,7]
1295 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm5 = ymm7[1,1,2,2]
1296 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm5[0],ymm1[1,2],ymm5[3],ymm1[4,5],ymm5[6],ymm1[7]
1297 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm2 = ymm2[0,0,3,3,4,4,7,7]
1298 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[1,1,2,2]
1299 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm2[2],ymm0[3,4],ymm2[5],ymm0[6,7]
1300 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm2 = ymm3[1,1,2,2]
1301 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0],ymm0[1,2],ymm2[3],ymm0[4,5],ymm2[6],ymm0[7]
1302 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 320(%rcx)
1303 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm1, 128(%rcx)
1304 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm6, 224(%rcx)
1305 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm4, 32(%rcx)
1306 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm11, 64(%rcx)
1307 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm15, 288(%rcx)
1308 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm14, 352(%rcx)
1309 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
1310 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 96(%rcx)
1311 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
1312 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 160(%rcx)
1313 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
1314 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 192(%rcx)
1315 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
1316 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 256(%rcx)
1317 ; AVX2-FAST-PERLANE-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
1318 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, (%rcx)
1319 ; AVX2-FAST-PERLANE-NEXT: addq $40, %rsp
1320 ; AVX2-FAST-PERLANE-NEXT: vzeroupper
1321 ; AVX2-FAST-PERLANE-NEXT: retq
1323 ; AVX512-LABEL: store_i32_stride3_vf32:
1325 ; AVX512-NEXT: vmovdqa64 (%rdi), %zmm0
1326 ; AVX512-NEXT: vmovdqa64 64(%rdi), %zmm1
1327 ; AVX512-NEXT: vmovdqa64 (%rsi), %zmm2
1328 ; AVX512-NEXT: vmovdqa64 64(%rsi), %zmm3
1329 ; AVX512-NEXT: vmovdqa64 (%rdx), %zmm4
1330 ; AVX512-NEXT: vmovdqa64 64(%rdx), %zmm5
1331 ; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm6 = <0,16,u,1,17,u,2,18,u,3,19,u,4,20,u,5>
1332 ; AVX512-NEXT: vmovdqa64 %zmm0, %zmm7
1333 ; AVX512-NEXT: vpermt2d %zmm2, %zmm6, %zmm7
1334 ; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm8 = [0,1,16,3,4,17,6,7,18,9,10,19,12,13,20,15]
1335 ; AVX512-NEXT: vpermt2d %zmm4, %zmm8, %zmm7
1336 ; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm9 = <u,11,27,u,12,28,u,13,29,u,14,30,u,15,31,u>
1337 ; AVX512-NEXT: vmovdqa64 %zmm1, %zmm10
1338 ; AVX512-NEXT: vpermt2d %zmm3, %zmm9, %zmm10
1339 ; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm11 = [26,1,2,27,4,5,28,7,8,29,10,11,30,13,14,31]
1340 ; AVX512-NEXT: vpermt2d %zmm5, %zmm11, %zmm10
1341 ; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm12 = <5,u,22,6,u,23,7,u,24,8,u,25,9,u,26,10>
1342 ; AVX512-NEXT: vmovdqa64 %zmm3, %zmm13
1343 ; AVX512-NEXT: vpermt2d %zmm1, %zmm12, %zmm13
1344 ; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm14 = [0,21,2,3,22,5,6,23,8,9,24,11,12,25,14,15]
1345 ; AVX512-NEXT: vpermt2d %zmm5, %zmm14, %zmm13
1346 ; AVX512-NEXT: vpermt2d %zmm3, %zmm6, %zmm1
1347 ; AVX512-NEXT: vpermt2d %zmm5, %zmm8, %zmm1
1348 ; AVX512-NEXT: vpermi2d %zmm2, %zmm0, %zmm9
1349 ; AVX512-NEXT: vpermt2d %zmm4, %zmm11, %zmm9
1350 ; AVX512-NEXT: vpermt2d %zmm0, %zmm12, %zmm2
1351 ; AVX512-NEXT: vpermt2d %zmm4, %zmm14, %zmm2
1352 ; AVX512-NEXT: vmovdqa64 %zmm2, 64(%rcx)
1353 ; AVX512-NEXT: vmovdqa64 %zmm9, 128(%rcx)
1354 ; AVX512-NEXT: vmovdqa64 %zmm1, 192(%rcx)
1355 ; AVX512-NEXT: vmovdqa64 %zmm13, 256(%rcx)
1356 ; AVX512-NEXT: vmovdqa64 %zmm10, 320(%rcx)
1357 ; AVX512-NEXT: vmovdqa64 %zmm7, (%rcx)
1358 ; AVX512-NEXT: vzeroupper
1360 %in.vec0 = load <32 x i32>, ptr %in.vecptr0, align 64
1361 %in.vec1 = load <32 x i32>, ptr %in.vecptr1, align 64
1362 %in.vec2 = load <32 x i32>, ptr %in.vecptr2, align 64
1363 %1 = shufflevector <32 x i32> %in.vec0, <32 x i32> %in.vec1, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
1364 %2 = shufflevector <32 x i32> %in.vec2, <32 x i32> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
1365 %3 = shufflevector <64 x i32> %1, <64 x i32> %2, <96 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95>
1366 %interleaved.vec = shufflevector <96 x i32> %3, <96 x i32> poison, <96 x i32> <i32 0, i32 32, i32 64, i32 1, i32 33, i32 65, i32 2, i32 34, i32 66, i32 3, i32 35, i32 67, i32 4, i32 36, i32 68, i32 5, i32 37, i32 69, i32 6, i32 38, i32 70, i32 7, i32 39, i32 71, i32 8, i32 40, i32 72, i32 9, i32 41, i32 73, i32 10, i32 42, i32 74, i32 11, i32 43, i32 75, i32 12, i32 44, i32 76, i32 13, i32 45, i32 77, i32 14, i32 46, i32 78, i32 15, i32 47, i32 79, i32 16, i32 48, i32 80, i32 17, i32 49, i32 81, i32 18, i32 50, i32 82, i32 19, i32 51, i32 83, i32 20, i32 52, i32 84, i32 21, i32 53, i32 85, i32 22, i32 54, i32 86, i32 23, i32 55, i32 87, i32 24, i32 56, i32 88, i32 25, i32 57, i32 89, i32 26, i32 58, i32 90, i32 27, i32 59, i32 91, i32 28, i32 60, i32 92, i32 29, i32 61, i32 93, i32 30, i32 62, i32 94, i32 31, i32 63, i32 95>
1367 store <96 x i32> %interleaved.vec, ptr %out.vec, align 64
1371 define void @store_i32_stride3_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecptr2, ptr %out.vec) nounwind {
1372 ; SSE-LABEL: store_i32_stride3_vf64:
1374 ; SSE-NEXT: subq $664, %rsp # imm = 0x298
1375 ; SSE-NEXT: movaps (%rdi), %xmm2
1376 ; SSE-NEXT: movaps 16(%rdi), %xmm4
1377 ; SSE-NEXT: movaps 32(%rdi), %xmm5
1378 ; SSE-NEXT: movaps 48(%rdi), %xmm6
1379 ; SSE-NEXT: movaps (%rsi), %xmm0
1380 ; SSE-NEXT: movaps 16(%rsi), %xmm11
1381 ; SSE-NEXT: movaps 32(%rsi), %xmm14
1382 ; SSE-NEXT: movaps 48(%rsi), %xmm3
1383 ; SSE-NEXT: movaps (%rdx), %xmm7
1384 ; SSE-NEXT: movaps 16(%rdx), %xmm8
1385 ; SSE-NEXT: movaps 32(%rdx), %xmm9
1386 ; SSE-NEXT: movaps 48(%rdx), %xmm10
1387 ; SSE-NEXT: movaps %xmm2, %xmm1
1388 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm7[0,3]
1389 ; SSE-NEXT: movaps %xmm7, %xmm12
1390 ; SSE-NEXT: movaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1391 ; SSE-NEXT: movaps %xmm2, %xmm7
1392 ; SSE-NEXT: unpcklps {{.*#+}} xmm7 = xmm7[0],xmm0[0],xmm7[1],xmm0[1]
1393 ; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,1],xmm1[2,0]
1394 ; SSE-NEXT: movaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1395 ; SSE-NEXT: movaps %xmm2, %xmm1
1396 ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
1397 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,3],xmm0[3,3]
1398 ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1399 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm12[1,1]
1400 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
1401 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1402 ; SSE-NEXT: movaps %xmm4, %xmm0
1403 ; SSE-NEXT: movaps %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1404 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm8[0,3]
1405 ; SSE-NEXT: movaps %xmm4, %xmm1
1406 ; SSE-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm11[0],xmm1[1],xmm11[1]
1407 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,0]
1408 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1409 ; SSE-NEXT: movaps %xmm4, %xmm0
1410 ; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm11[1]
1411 ; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[3,3],xmm11[3,3]
1412 ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1413 ; SSE-NEXT: shufps {{.*#+}} xmm11 = xmm11[1,1],xmm8[1,1]
1414 ; SSE-NEXT: shufps {{.*#+}} xmm11 = xmm11[0,2],xmm0[0,2]
1415 ; SSE-NEXT: movaps %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1416 ; SSE-NEXT: movaps %xmm5, %xmm0
1417 ; SSE-NEXT: movaps %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1418 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm9[0,3]
1419 ; SSE-NEXT: movaps %xmm5, %xmm1
1420 ; SSE-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm14[0],xmm1[1],xmm14[1]
1421 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,0]
1422 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1423 ; SSE-NEXT: movaps %xmm5, %xmm0
1424 ; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm14[1]
1425 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[3,3],xmm14[3,3]
1426 ; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1427 ; SSE-NEXT: shufps {{.*#+}} xmm14 = xmm14[1,1],xmm9[1,1]
1428 ; SSE-NEXT: shufps {{.*#+}} xmm14 = xmm14[0,2],xmm0[0,2]
1429 ; SSE-NEXT: movaps %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1430 ; SSE-NEXT: movaps %xmm6, %xmm0
1431 ; SSE-NEXT: movaps %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1432 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm10[0,3]
1433 ; SSE-NEXT: movaps %xmm6, %xmm1
1434 ; SSE-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
1435 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,0]
1436 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1437 ; SSE-NEXT: movaps %xmm6, %xmm0
1438 ; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm3[1]
1439 ; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[3,3],xmm3[3,3]
1440 ; SSE-NEXT: movaps %xmm6, (%rsp) # 16-byte Spill
1441 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,1],xmm10[1,1]
1442 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,2],xmm0[0,2]
1443 ; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1444 ; SSE-NEXT: movaps 64(%rdi), %xmm2
1445 ; SSE-NEXT: movaps 64(%rdx), %xmm1
1446 ; SSE-NEXT: movaps %xmm2, %xmm0
1447 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm1[0,3]
1448 ; SSE-NEXT: movaps %xmm1, %xmm4
1449 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1450 ; SSE-NEXT: movaps 64(%rsi), %xmm1
1451 ; SSE-NEXT: movaps %xmm2, %xmm3
1452 ; SSE-NEXT: unpcklps {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
1453 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,1],xmm0[2,0]
1454 ; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1455 ; SSE-NEXT: movaps %xmm2, %xmm0
1456 ; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
1457 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,3],xmm1[3,3]
1458 ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1459 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm4[1,1]
1460 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm0[0,2]
1461 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1462 ; SSE-NEXT: movaps 80(%rdi), %xmm2
1463 ; SSE-NEXT: movaps 80(%rdx), %xmm1
1464 ; SSE-NEXT: movaps %xmm2, %xmm0
1465 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm1[0,3]
1466 ; SSE-NEXT: movaps %xmm1, %xmm4
1467 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1468 ; SSE-NEXT: movaps 80(%rsi), %xmm1
1469 ; SSE-NEXT: movaps %xmm2, %xmm3
1470 ; SSE-NEXT: unpcklps {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
1471 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,1],xmm0[2,0]
1472 ; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1473 ; SSE-NEXT: movaps %xmm2, %xmm0
1474 ; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
1475 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,3],xmm1[3,3]
1476 ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1477 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm4[1,1]
1478 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm0[0,2]
1479 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1480 ; SSE-NEXT: movaps 96(%rdi), %xmm2
1481 ; SSE-NEXT: movaps 96(%rdx), %xmm1
1482 ; SSE-NEXT: movaps %xmm2, %xmm0
1483 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm1[0,3]
1484 ; SSE-NEXT: movaps %xmm1, %xmm4
1485 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1486 ; SSE-NEXT: movaps 96(%rsi), %xmm1
1487 ; SSE-NEXT: movaps %xmm2, %xmm3
1488 ; SSE-NEXT: unpcklps {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
1489 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,1],xmm0[2,0]
1490 ; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1491 ; SSE-NEXT: movaps %xmm2, %xmm0
1492 ; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
1493 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,3],xmm1[3,3]
1494 ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1495 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm4[1,1]
1496 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm0[0,2]
1497 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1498 ; SSE-NEXT: movaps 112(%rdi), %xmm2
1499 ; SSE-NEXT: movaps 112(%rdx), %xmm1
1500 ; SSE-NEXT: movaps %xmm2, %xmm0
1501 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm1[0,3]
1502 ; SSE-NEXT: movaps %xmm1, %xmm4
1503 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1504 ; SSE-NEXT: movaps 112(%rsi), %xmm1
1505 ; SSE-NEXT: movaps %xmm2, %xmm3
1506 ; SSE-NEXT: unpcklps {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
1507 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,1],xmm0[2,0]
1508 ; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1509 ; SSE-NEXT: movaps %xmm2, %xmm0
1510 ; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
1511 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,3],xmm1[3,3]
1512 ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1513 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm4[1,1]
1514 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm0[0,2]
1515 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1516 ; SSE-NEXT: movaps 128(%rdi), %xmm2
1517 ; SSE-NEXT: movaps 128(%rdx), %xmm1
1518 ; SSE-NEXT: movaps %xmm2, %xmm0
1519 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm1[0,3]
1520 ; SSE-NEXT: movaps %xmm1, %xmm4
1521 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1522 ; SSE-NEXT: movaps 128(%rsi), %xmm1
1523 ; SSE-NEXT: movaps %xmm2, %xmm3
1524 ; SSE-NEXT: unpcklps {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
1525 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,1],xmm0[2,0]
1526 ; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1527 ; SSE-NEXT: movaps %xmm2, %xmm0
1528 ; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
1529 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,3],xmm1[3,3]
1530 ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1531 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm4[1,1]
1532 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm0[0,2]
1533 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1534 ; SSE-NEXT: movaps 144(%rdi), %xmm2
1535 ; SSE-NEXT: movaps 144(%rdx), %xmm1
1536 ; SSE-NEXT: movaps %xmm2, %xmm0
1537 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm1[0,3]
1538 ; SSE-NEXT: movaps %xmm1, %xmm4
1539 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1540 ; SSE-NEXT: movaps 144(%rsi), %xmm1
1541 ; SSE-NEXT: movaps %xmm2, %xmm3
1542 ; SSE-NEXT: unpcklps {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
1543 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,1],xmm0[2,0]
1544 ; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1545 ; SSE-NEXT: movaps %xmm2, %xmm0
1546 ; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
1547 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,3],xmm1[3,3]
1548 ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1549 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm4[1,1]
1550 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm0[0,2]
1551 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1552 ; SSE-NEXT: movaps 160(%rdi), %xmm14
1553 ; SSE-NEXT: movaps 160(%rdx), %xmm1
1554 ; SSE-NEXT: movaps %xmm14, %xmm0
1555 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm1[0,3]
1556 ; SSE-NEXT: movaps %xmm1, %xmm3
1557 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1558 ; SSE-NEXT: movaps 160(%rsi), %xmm1
1559 ; SSE-NEXT: movaps %xmm14, %xmm2
1560 ; SSE-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
1561 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1],xmm0[2,0]
1562 ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1563 ; SSE-NEXT: movaps %xmm14, %xmm0
1564 ; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
1565 ; SSE-NEXT: shufps {{.*#+}} xmm14 = xmm14[3,3],xmm1[3,3]
1566 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm3[1,1]
1567 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm0[0,2]
1568 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1569 ; SSE-NEXT: movaps 176(%rdi), %xmm12
1570 ; SSE-NEXT: movaps 176(%rdx), %xmm1
1571 ; SSE-NEXT: movaps %xmm12, %xmm0
1572 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm1[0,3]
1573 ; SSE-NEXT: movaps %xmm1, %xmm3
1574 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1575 ; SSE-NEXT: movaps 176(%rsi), %xmm1
1576 ; SSE-NEXT: movaps %xmm12, %xmm2
1577 ; SSE-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
1578 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1],xmm0[2,0]
1579 ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1580 ; SSE-NEXT: movaps %xmm12, %xmm0
1581 ; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
1582 ; SSE-NEXT: shufps {{.*#+}} xmm12 = xmm12[3,3],xmm1[3,3]
1583 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm3[1,1]
1584 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm0[0,2]
1585 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1586 ; SSE-NEXT: movaps 192(%rdi), %xmm13
1587 ; SSE-NEXT: movaps 192(%rdx), %xmm1
1588 ; SSE-NEXT: movaps %xmm13, %xmm0
1589 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm1[0,3]
1590 ; SSE-NEXT: movaps %xmm1, %xmm2
1591 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1592 ; SSE-NEXT: movaps 192(%rsi), %xmm11
1593 ; SSE-NEXT: movaps %xmm13, %xmm1
1594 ; SSE-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm11[0],xmm1[1],xmm11[1]
1595 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,0]
1596 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1597 ; SSE-NEXT: movaps %xmm13, %xmm0
1598 ; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm11[1]
1599 ; SSE-NEXT: shufps {{.*#+}} xmm13 = xmm13[3,3],xmm11[3,3]
1600 ; SSE-NEXT: shufps {{.*#+}} xmm11 = xmm11[1,1],xmm2[1,1]
1601 ; SSE-NEXT: shufps {{.*#+}} xmm11 = xmm11[0,2],xmm0[0,2]
1602 ; SSE-NEXT: movaps 208(%rdi), %xmm6
1603 ; SSE-NEXT: movaps 208(%rdx), %xmm1
1604 ; SSE-NEXT: movaps %xmm6, %xmm0
1605 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm1[0,3]
1606 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1607 ; SSE-NEXT: movaps 208(%rsi), %xmm8
1608 ; SSE-NEXT: movaps %xmm6, %xmm10
1609 ; SSE-NEXT: unpcklps {{.*#+}} xmm10 = xmm10[0],xmm8[0],xmm10[1],xmm8[1]
1610 ; SSE-NEXT: shufps {{.*#+}} xmm10 = xmm10[0,1],xmm0[2,0]
1611 ; SSE-NEXT: movaps %xmm6, %xmm0
1612 ; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm8[1]
1613 ; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[3,3],xmm8[3,3]
1614 ; SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[1,1],xmm1[1,1]
1615 ; SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[0,2],xmm0[0,2]
1616 ; SSE-NEXT: movaps 224(%rdi), %xmm5
1617 ; SSE-NEXT: movaps 224(%rdx), %xmm15
1618 ; SSE-NEXT: movaps %xmm5, %xmm0
1619 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm15[0,3]
1620 ; SSE-NEXT: movaps 224(%rsi), %xmm4
1621 ; SSE-NEXT: movaps %xmm5, %xmm7
1622 ; SSE-NEXT: unpcklps {{.*#+}} xmm7 = xmm7[0],xmm4[0],xmm7[1],xmm4[1]
1623 ; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,1],xmm0[2,0]
1624 ; SSE-NEXT: movaps %xmm5, %xmm0
1625 ; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm4[1]
1626 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[3,3],xmm4[3,3]
1627 ; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[1,1],xmm15[1,1]
1628 ; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,2],xmm0[0,2]
1629 ; SSE-NEXT: movaps 240(%rdi), %xmm2
1630 ; SSE-NEXT: movaps 240(%rdx), %xmm9
1631 ; SSE-NEXT: movaps %xmm2, %xmm1
1632 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm9[0,3]
1633 ; SSE-NEXT: movaps 240(%rsi), %xmm0
1634 ; SSE-NEXT: movaps %xmm2, %xmm3
1635 ; SSE-NEXT: unpcklps {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1]
1636 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,1],xmm1[2,0]
1637 ; SSE-NEXT: movaps %xmm2, %xmm1
1638 ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
1639 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,3],xmm0[3,3]
1640 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm9[1,1]
1641 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
1642 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
1643 ; SSE-NEXT: shufps $233, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
1644 ; SSE-NEXT: # xmm1 = xmm1[1,2],mem[2,3]
1645 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1646 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
1647 ; SSE-NEXT: shufps $233, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
1648 ; SSE-NEXT: # xmm1 = xmm1[1,2],mem[2,3]
1649 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1650 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
1651 ; SSE-NEXT: shufps $233, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
1652 ; SSE-NEXT: # xmm1 = xmm1[1,2],mem[2,3]
1653 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1654 ; SSE-NEXT: movaps (%rsp), %xmm1 # 16-byte Reload
1655 ; SSE-NEXT: shufps $233, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
1656 ; SSE-NEXT: # xmm1 = xmm1[1,2],mem[2,3]
1657 ; SSE-NEXT: movaps %xmm1, (%rsp) # 16-byte Spill
1658 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
1659 ; SSE-NEXT: shufps $233, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
1660 ; SSE-NEXT: # xmm1 = xmm1[1,2],mem[2,3]
1661 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1662 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
1663 ; SSE-NEXT: shufps $233, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
1664 ; SSE-NEXT: # xmm1 = xmm1[1,2],mem[2,3]
1665 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1666 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
1667 ; SSE-NEXT: shufps $233, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
1668 ; SSE-NEXT: # xmm1 = xmm1[1,2],mem[2,3]
1669 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1670 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
1671 ; SSE-NEXT: shufps $233, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
1672 ; SSE-NEXT: # xmm1 = xmm1[1,2],mem[2,3]
1673 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1674 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
1675 ; SSE-NEXT: shufps $233, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
1676 ; SSE-NEXT: # xmm1 = xmm1[1,2],mem[2,3]
1677 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1678 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
1679 ; SSE-NEXT: shufps $233, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
1680 ; SSE-NEXT: # xmm1 = xmm1[1,2],mem[2,3]
1681 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1682 ; SSE-NEXT: shufps $233, {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Folded Reload
1683 ; SSE-NEXT: # xmm14 = xmm14[1,2],mem[2,3]
1684 ; SSE-NEXT: shufps $233, {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Folded Reload
1685 ; SSE-NEXT: # xmm12 = xmm12[1,2],mem[2,3]
1686 ; SSE-NEXT: shufps $233, {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Folded Reload
1687 ; SSE-NEXT: # xmm13 = xmm13[1,2],mem[2,3]
1688 ; SSE-NEXT: shufps $233, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
1689 ; SSE-NEXT: # xmm6 = xmm6[1,2],mem[2,3]
1690 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[1,2],xmm15[2,3]
1691 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,2],xmm9[2,3]
1692 ; SSE-NEXT: movaps %xmm0, 736(%rcx)
1693 ; SSE-NEXT: movaps %xmm3, 720(%rcx)
1694 ; SSE-NEXT: movaps %xmm4, 688(%rcx)
1695 ; SSE-NEXT: movaps %xmm7, 672(%rcx)
1696 ; SSE-NEXT: movaps %xmm8, 640(%rcx)
1697 ; SSE-NEXT: movaps %xmm10, 624(%rcx)
1698 ; SSE-NEXT: movaps %xmm11, 592(%rcx)
1699 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1700 ; SSE-NEXT: movaps %xmm0, 576(%rcx)
1701 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1702 ; SSE-NEXT: movaps %xmm0, 544(%rcx)
1703 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1704 ; SSE-NEXT: movaps %xmm0, 528(%rcx)
1705 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1706 ; SSE-NEXT: movaps %xmm0, 496(%rcx)
1707 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1708 ; SSE-NEXT: movaps %xmm0, 480(%rcx)
1709 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1710 ; SSE-NEXT: movaps %xmm0, 448(%rcx)
1711 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1712 ; SSE-NEXT: movaps %xmm0, 432(%rcx)
1713 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1714 ; SSE-NEXT: movaps %xmm0, 400(%rcx)
1715 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1716 ; SSE-NEXT: movaps %xmm0, 384(%rcx)
1717 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1718 ; SSE-NEXT: movaps %xmm0, 352(%rcx)
1719 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1720 ; SSE-NEXT: movaps %xmm0, 336(%rcx)
1721 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1722 ; SSE-NEXT: movaps %xmm0, 304(%rcx)
1723 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1724 ; SSE-NEXT: movaps %xmm0, 288(%rcx)
1725 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1726 ; SSE-NEXT: movaps %xmm0, 256(%rcx)
1727 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1728 ; SSE-NEXT: movaps %xmm0, 240(%rcx)
1729 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1730 ; SSE-NEXT: movaps %xmm0, 208(%rcx)
1731 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1732 ; SSE-NEXT: movaps %xmm0, 192(%rcx)
1733 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1734 ; SSE-NEXT: movaps %xmm0, 160(%rcx)
1735 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1736 ; SSE-NEXT: movaps %xmm0, 144(%rcx)
1737 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1738 ; SSE-NEXT: movaps %xmm0, 112(%rcx)
1739 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1740 ; SSE-NEXT: movaps %xmm0, 96(%rcx)
1741 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1742 ; SSE-NEXT: movaps %xmm0, 64(%rcx)
1743 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1744 ; SSE-NEXT: movaps %xmm0, 48(%rcx)
1745 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1746 ; SSE-NEXT: movaps %xmm0, 16(%rcx)
1747 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1748 ; SSE-NEXT: movaps %xmm0, (%rcx)
1749 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[2,0,1,3]
1750 ; SSE-NEXT: movaps %xmm2, 752(%rcx)
1751 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[2,0,1,3]
1752 ; SSE-NEXT: movaps %xmm5, 704(%rcx)
1753 ; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[2,0,1,3]
1754 ; SSE-NEXT: movaps %xmm6, 656(%rcx)
1755 ; SSE-NEXT: shufps {{.*#+}} xmm13 = xmm13[2,0,1,3]
1756 ; SSE-NEXT: movaps %xmm13, 608(%rcx)
1757 ; SSE-NEXT: shufps {{.*#+}} xmm12 = xmm12[2,0,1,3]
1758 ; SSE-NEXT: movaps %xmm12, 560(%rcx)
1759 ; SSE-NEXT: shufps {{.*#+}} xmm14 = xmm14[2,0,1,3]
1760 ; SSE-NEXT: movaps %xmm14, 512(%rcx)
1761 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1762 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0,1,3]
1763 ; SSE-NEXT: movaps %xmm0, 464(%rcx)
1764 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1765 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0,1,3]
1766 ; SSE-NEXT: movaps %xmm0, 416(%rcx)
1767 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1768 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0,1,3]
1769 ; SSE-NEXT: movaps %xmm0, 368(%rcx)
1770 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1771 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0,1,3]
1772 ; SSE-NEXT: movaps %xmm0, 320(%rcx)
1773 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1774 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0,1,3]
1775 ; SSE-NEXT: movaps %xmm0, 272(%rcx)
1776 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1777 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0,1,3]
1778 ; SSE-NEXT: movaps %xmm0, 224(%rcx)
1779 ; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
1780 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0,1,3]
1781 ; SSE-NEXT: movaps %xmm0, 176(%rcx)
1782 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1783 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0,1,3]
1784 ; SSE-NEXT: movaps %xmm0, 128(%rcx)
1785 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1786 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0,1,3]
1787 ; SSE-NEXT: movaps %xmm0, 80(%rcx)
1788 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1789 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0,1,3]
1790 ; SSE-NEXT: movaps %xmm0, 32(%rcx)
1791 ; SSE-NEXT: addq $664, %rsp # imm = 0x298
1794 ; AVX1-ONLY-LABEL: store_i32_stride3_vf64:
1795 ; AVX1-ONLY: # %bb.0:
1796 ; AVX1-ONLY-NEXT: subq $200, %rsp
1797 ; AVX1-ONLY-NEXT: vmovapd (%rdx), %ymm8
1798 ; AVX1-ONLY-NEXT: vmovapd 32(%rdx), %ymm9
1799 ; AVX1-ONLY-NEXT: vmovapd 64(%rdx), %ymm11
1800 ; AVX1-ONLY-NEXT: vmovapd 96(%rdx), %ymm12
1801 ; AVX1-ONLY-NEXT: vmovaps (%rsi), %xmm0
1802 ; AVX1-ONLY-NEXT: vmovaps 16(%rsi), %xmm1
1803 ; AVX1-ONLY-NEXT: vmovaps 32(%rsi), %xmm2
1804 ; AVX1-ONLY-NEXT: vmovaps 48(%rsi), %xmm3
1805 ; AVX1-ONLY-NEXT: vmovaps (%rdi), %xmm4
1806 ; AVX1-ONLY-NEXT: vmovaps 16(%rdi), %xmm5
1807 ; AVX1-ONLY-NEXT: vmovaps 32(%rdi), %xmm6
1808 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm7 = xmm4[1],xmm0[1]
1809 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm7 = xmm0[1,1],xmm7[0,2]
1810 ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm4[0]
1811 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,0],xmm4[2,1]
1812 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm7, %ymm0, %ymm0
1813 ; AVX1-ONLY-NEXT: vbroadcastsd (%rdx), %ymm4
1814 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm4[2],ymm0[3,4],ymm4[5],ymm0[6,7]
1815 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1816 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm5[3,3],xmm1[3,3]
1817 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm4 = xmm5[1],xmm1[1]
1818 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,1],xmm4[0,2]
1819 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
1820 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm8[2,3,2,3]
1821 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm1 = ymm1[0,0,3,3]
1822 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7]
1823 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1824 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm6[1],xmm2[1]
1825 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm2[1,1],xmm0[0,2]
1826 ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm1 = xmm2[0],xmm6[0]
1827 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm1[2,0],xmm6[2,1]
1828 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
1829 ; AVX1-ONLY-NEXT: vbroadcastsd 32(%rdx), %ymm1
1830 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
1831 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1832 ; AVX1-ONLY-NEXT: vmovaps 48(%rdi), %xmm0
1833 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm0[3,3],xmm3[3,3]
1834 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm3[1]
1835 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm3[1,1],xmm0[0,2]
1836 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
1837 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm9[2,3,2,3]
1838 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm1 = ymm1[0,0,3,3]
1839 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7]
1840 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1841 ; AVX1-ONLY-NEXT: vmovaps 64(%rsi), %xmm0
1842 ; AVX1-ONLY-NEXT: vmovaps 64(%rdi), %xmm1
1843 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm2 = xmm1[1],xmm0[1]
1844 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm2 = xmm0[1,1],xmm2[0,2]
1845 ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
1846 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,1]
1847 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
1848 ; AVX1-ONLY-NEXT: vbroadcastsd 64(%rdx), %ymm1
1849 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
1850 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1851 ; AVX1-ONLY-NEXT: vmovaps 80(%rsi), %xmm0
1852 ; AVX1-ONLY-NEXT: vmovaps 80(%rdi), %xmm1
1853 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm2 = xmm1[3,3],xmm0[3,3]
1854 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
1855 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,1],xmm1[0,2]
1856 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
1857 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm11[2,3,2,3]
1858 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm1 = ymm1[0,0,3,3]
1859 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7]
1860 ; AVX1-ONLY-NEXT: vmovups %ymm0, (%rsp) # 32-byte Spill
1861 ; AVX1-ONLY-NEXT: vmovaps 96(%rsi), %xmm0
1862 ; AVX1-ONLY-NEXT: vmovaps 96(%rdi), %xmm1
1863 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm2 = xmm1[1],xmm0[1]
1864 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm2 = xmm0[1,1],xmm2[0,2]
1865 ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
1866 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,1]
1867 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
1868 ; AVX1-ONLY-NEXT: vbroadcastsd 96(%rdx), %ymm1
1869 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
1870 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1871 ; AVX1-ONLY-NEXT: vmovaps 112(%rsi), %xmm0
1872 ; AVX1-ONLY-NEXT: vmovaps 112(%rdi), %xmm1
1873 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm2 = xmm1[3,3],xmm0[3,3]
1874 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
1875 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,1],xmm1[0,2]
1876 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
1877 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm12[2,3,2,3]
1878 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm1 = ymm1[0,0,3,3]
1879 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7]
1880 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1881 ; AVX1-ONLY-NEXT: vmovaps 128(%rsi), %xmm0
1882 ; AVX1-ONLY-NEXT: vmovaps 128(%rdi), %xmm1
1883 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm2 = xmm1[1],xmm0[1]
1884 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm2 = xmm0[1,1],xmm2[0,2]
1885 ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
1886 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,1]
1887 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
1888 ; AVX1-ONLY-NEXT: vbroadcastsd 128(%rdx), %ymm1
1889 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
1890 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1891 ; AVX1-ONLY-NEXT: vmovaps 144(%rsi), %xmm0
1892 ; AVX1-ONLY-NEXT: vmovaps 144(%rdi), %xmm1
1893 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm2 = xmm1[3,3],xmm0[3,3]
1894 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
1895 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,1],xmm1[0,2]
1896 ; AVX1-ONLY-NEXT: vmovapd 128(%rdx), %ymm6
1897 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
1898 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm6[2,3,2,3]
1899 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm1 = ymm1[0,0,3,3]
1900 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7]
1901 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1902 ; AVX1-ONLY-NEXT: vmovaps 160(%rsi), %xmm0
1903 ; AVX1-ONLY-NEXT: vmovaps 160(%rdi), %xmm1
1904 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm2 = xmm1[1],xmm0[1]
1905 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm2 = xmm0[1,1],xmm2[0,2]
1906 ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
1907 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,1]
1908 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
1909 ; AVX1-ONLY-NEXT: vbroadcastsd 160(%rdx), %ymm1
1910 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm14 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
1911 ; AVX1-ONLY-NEXT: vmovaps 176(%rsi), %xmm0
1912 ; AVX1-ONLY-NEXT: vmovaps 176(%rdi), %xmm1
1913 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm2 = xmm1[3,3],xmm0[3,3]
1914 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
1915 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,1],xmm1[0,2]
1916 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
1917 ; AVX1-ONLY-NEXT: vmovapd 160(%rdx), %ymm4
1918 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm4[2,3,2,3]
1919 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm1 = ymm1[0,0,3,3]
1920 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm15 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7]
1921 ; AVX1-ONLY-NEXT: vmovaps 192(%rsi), %xmm0
1922 ; AVX1-ONLY-NEXT: vmovaps 192(%rdi), %xmm1
1923 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm3 = xmm1[1],xmm0[1]
1924 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm3 = xmm0[1,1],xmm3[0,2]
1925 ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
1926 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,1]
1927 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0
1928 ; AVX1-ONLY-NEXT: vbroadcastsd 192(%rdx), %ymm1
1929 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm10 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
1930 ; AVX1-ONLY-NEXT: vmovaps 208(%rsi), %xmm0
1931 ; AVX1-ONLY-NEXT: vmovaps 208(%rdi), %xmm1
1932 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm3 = xmm1[3,3],xmm0[3,3]
1933 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
1934 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,1],xmm1[0,2]
1935 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0
1936 ; AVX1-ONLY-NEXT: vmovapd 192(%rdx), %ymm2
1937 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm3 = ymm2[2,3,2,3]
1938 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm3 = ymm3[0,0,3,3]
1939 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm7 = ymm0[0],ymm3[1],ymm0[2,3],ymm3[4],ymm0[5,6],ymm3[7]
1940 ; AVX1-ONLY-NEXT: vmovaps 224(%rsi), %xmm0
1941 ; AVX1-ONLY-NEXT: vmovaps 224(%rdi), %xmm3
1942 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm5 = xmm3[1],xmm0[1]
1943 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm5 = xmm0[1,1],xmm5[0,2]
1944 ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm3[0]
1945 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,0],xmm3[2,1]
1946 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm5, %ymm0, %ymm0
1947 ; AVX1-ONLY-NEXT: vbroadcastsd 224(%rdx), %ymm3
1948 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm5 = ymm0[0,1],ymm3[2],ymm0[3,4],ymm3[5],ymm0[6,7]
1949 ; AVX1-ONLY-NEXT: vmovaps 240(%rsi), %xmm0
1950 ; AVX1-ONLY-NEXT: vmovaps 240(%rdi), %xmm3
1951 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm3[3,3],xmm0[3,3]
1952 ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm3 = xmm3[1],xmm0[1]
1953 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,1],xmm3[0,2]
1954 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
1955 ; AVX1-ONLY-NEXT: vmovapd 224(%rdx), %ymm0
1956 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm3 = ymm0[2,3,2,3]
1957 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm3 = ymm3[0,0,3,3]
1958 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm3 = ymm1[0],ymm3[1],ymm1[2,3],ymm3[4],ymm1[5,6],ymm3[7]
1959 ; AVX1-ONLY-NEXT: vpermilps {{.*#+}} ymm1 = mem[0,0,3,3,4,4,7,7]
1960 ; AVX1-ONLY-NEXT: vpermilpd {{.*#+}} ymm13 = mem[1,0,2,2]
1961 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm13[0,1],ymm1[2],ymm13[3,4],ymm1[5],ymm13[6,7]
1962 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm8 = ymm8[1,1,2,2]
1963 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0],ymm1[1,2],ymm8[3],ymm1[4,5],ymm8[6],ymm1[7]
1964 ; AVX1-ONLY-NEXT: vpermilps {{.*#+}} ymm1 = mem[0,0,3,3,4,4,7,7]
1965 ; AVX1-ONLY-NEXT: vpermilpd {{.*#+}} ymm13 = mem[1,0,2,2]
1966 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm13[0,1],ymm1[2],ymm13[3,4],ymm1[5],ymm13[6,7]
1967 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm9 = ymm9[1,1,2,2]
1968 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm9 = ymm9[0],ymm1[1,2],ymm9[3],ymm1[4,5],ymm9[6],ymm1[7]
1969 ; AVX1-ONLY-NEXT: vpermilps {{.*#+}} ymm1 = mem[0,0,3,3,4,4,7,7]
1970 ; AVX1-ONLY-NEXT: vpermilpd {{.*#+}} ymm13 = mem[1,0,2,2]
1971 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm13[0,1],ymm1[2],ymm13[3,4],ymm1[5],ymm13[6,7]
1972 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm11 = ymm11[1,1,2,2]
1973 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm11[0],ymm1[1,2],ymm11[3],ymm1[4,5],ymm11[6],ymm1[7]
1974 ; AVX1-ONLY-NEXT: vpermilps {{.*#+}} ymm11 = mem[0,0,3,3,4,4,7,7]
1975 ; AVX1-ONLY-NEXT: vpermilpd {{.*#+}} ymm13 = mem[1,0,2,2]
1976 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm11 = ymm13[0,1],ymm11[2],ymm13[3,4],ymm11[5],ymm13[6,7]
1977 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm12 = ymm12[1,1,2,2]
1978 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm11 = ymm12[0],ymm11[1,2],ymm12[3],ymm11[4,5],ymm12[6],ymm11[7]
1979 ; AVX1-ONLY-NEXT: vpermilps {{.*#+}} ymm12 = mem[0,0,3,3,4,4,7,7]
1980 ; AVX1-ONLY-NEXT: vpermilpd {{.*#+}} ymm13 = mem[1,0,2,2]
1981 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm12 = ymm13[0,1],ymm12[2],ymm13[3,4],ymm12[5],ymm13[6,7]
1982 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm6 = ymm6[1,1,2,2]
1983 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0],ymm12[1,2],ymm6[3],ymm12[4,5],ymm6[6],ymm12[7]
1984 ; AVX1-ONLY-NEXT: vpermilps {{.*#+}} ymm12 = mem[0,0,3,3,4,4,7,7]
1985 ; AVX1-ONLY-NEXT: vpermilpd {{.*#+}} ymm13 = mem[1,0,2,2]
1986 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm12 = ymm13[0,1],ymm12[2],ymm13[3,4],ymm12[5],ymm13[6,7]
1987 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm4 = ymm4[1,1,2,2]
1988 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0],ymm12[1,2],ymm4[3],ymm12[4,5],ymm4[6],ymm12[7]
1989 ; AVX1-ONLY-NEXT: vpermilps {{.*#+}} ymm12 = mem[0,0,3,3,4,4,7,7]
1990 ; AVX1-ONLY-NEXT: vpermilpd {{.*#+}} ymm13 = mem[1,0,2,2]
1991 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm12 = ymm13[0,1],ymm12[2],ymm13[3,4],ymm12[5],ymm13[6,7]
1992 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm2 = ymm2[1,1,2,2]
1993 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0],ymm12[1,2],ymm2[3],ymm12[4,5],ymm2[6],ymm12[7]
1994 ; AVX1-ONLY-NEXT: vpermilps {{.*#+}} ymm12 = mem[0,0,3,3,4,4,7,7]
1995 ; AVX1-ONLY-NEXT: vpermilpd {{.*#+}} ymm13 = mem[1,0,2,2]
1996 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm12 = ymm13[0,1],ymm12[2],ymm13[3,4],ymm12[5],ymm13[6,7]
1997 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[1,1,2,2]
1998 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm12[1,2],ymm0[3],ymm12[4,5],ymm0[6],ymm12[7]
1999 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 704(%rcx)
2000 ; AVX1-ONLY-NEXT: vmovaps %ymm2, 608(%rcx)
2001 ; AVX1-ONLY-NEXT: vmovaps %ymm4, 512(%rcx)
2002 ; AVX1-ONLY-NEXT: vmovaps %ymm6, 416(%rcx)
2003 ; AVX1-ONLY-NEXT: vmovaps %ymm11, 320(%rcx)
2004 ; AVX1-ONLY-NEXT: vmovaps %ymm1, 224(%rcx)
2005 ; AVX1-ONLY-NEXT: vmovaps %ymm9, 128(%rcx)
2006 ; AVX1-ONLY-NEXT: vmovaps %ymm8, 32(%rcx)
2007 ; AVX1-ONLY-NEXT: vmovaps %ymm3, 736(%rcx)
2008 ; AVX1-ONLY-NEXT: vmovaps %ymm5, 672(%rcx)
2009 ; AVX1-ONLY-NEXT: vmovaps %ymm7, 640(%rcx)
2010 ; AVX1-ONLY-NEXT: vmovaps %ymm10, 576(%rcx)
2011 ; AVX1-ONLY-NEXT: vmovaps %ymm15, 544(%rcx)
2012 ; AVX1-ONLY-NEXT: vmovaps %ymm14, 480(%rcx)
2013 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2014 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 448(%rcx)
2015 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2016 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 384(%rcx)
2017 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2018 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 352(%rcx)
2019 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2020 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 288(%rcx)
2021 ; AVX1-ONLY-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
2022 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 256(%rcx)
2023 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2024 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 192(%rcx)
2025 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2026 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 160(%rcx)
2027 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2028 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 96(%rcx)
2029 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2030 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 64(%rcx)
2031 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2032 ; AVX1-ONLY-NEXT: vmovaps %ymm0, (%rcx)
2033 ; AVX1-ONLY-NEXT: addq $200, %rsp
2034 ; AVX1-ONLY-NEXT: vzeroupper
2035 ; AVX1-ONLY-NEXT: retq
2037 ; AVX2-SLOW-LABEL: store_i32_stride3_vf64:
2038 ; AVX2-SLOW: # %bb.0:
2039 ; AVX2-SLOW-NEXT: subq $712, %rsp # imm = 0x2C8
2040 ; AVX2-SLOW-NEXT: vmovaps (%rdi), %ymm15
2041 ; AVX2-SLOW-NEXT: vmovaps 32(%rdi), %ymm8
2042 ; AVX2-SLOW-NEXT: vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2043 ; AVX2-SLOW-NEXT: vmovaps 64(%rdi), %ymm13
2044 ; AVX2-SLOW-NEXT: vmovaps (%rsi), %ymm5
2045 ; AVX2-SLOW-NEXT: vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2046 ; AVX2-SLOW-NEXT: vmovaps 32(%rsi), %ymm3
2047 ; AVX2-SLOW-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2048 ; AVX2-SLOW-NEXT: vmovaps 64(%rsi), %ymm2
2049 ; AVX2-SLOW-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2050 ; AVX2-SLOW-NEXT: vmovaps (%rdx), %ymm7
2051 ; AVX2-SLOW-NEXT: vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2052 ; AVX2-SLOW-NEXT: vmovaps 32(%rdx), %ymm6
2053 ; AVX2-SLOW-NEXT: vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2054 ; AVX2-SLOW-NEXT: vmovaps 64(%rdx), %ymm4
2055 ; AVX2-SLOW-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2056 ; AVX2-SLOW-NEXT: vpermilps {{.*#+}} xmm0 = mem[1,0,2,2]
2057 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,0,1]
2058 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm15[0,0,2,1]
2059 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6],ymm0[7]
2060 ; AVX2-SLOW-NEXT: vbroadcastsd (%rdx), %ymm1
2061 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
2062 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2063 ; AVX2-SLOW-NEXT: vbroadcastsd 24(%rdi), %ymm0
2064 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm1 = ymm5[1,2,3,3,5,6,7,7]
2065 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,2,2,3]
2066 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
2067 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm7[2,1,3,3]
2068 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7]
2069 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2070 ; AVX2-SLOW-NEXT: vpermilps {{.*#+}} xmm0 = mem[1,0,2,2]
2071 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,0,1]
2072 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm8[0,0,2,1]
2073 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6],ymm0[7]
2074 ; AVX2-SLOW-NEXT: vbroadcastsd 32(%rdx), %ymm1
2075 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
2076 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2077 ; AVX2-SLOW-NEXT: vbroadcastsd 56(%rdi), %ymm0
2078 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm1 = ymm3[1,2,3,3,5,6,7,7]
2079 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,2,2,3]
2080 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
2081 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm6[2,1,3,3]
2082 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7]
2083 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2084 ; AVX2-SLOW-NEXT: vpermilps {{.*#+}} xmm0 = mem[1,0,2,2]
2085 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,0,1]
2086 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm13[0,0,2,1]
2087 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6],ymm0[7]
2088 ; AVX2-SLOW-NEXT: vbroadcastsd 64(%rdx), %ymm1
2089 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
2090 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2091 ; AVX2-SLOW-NEXT: vbroadcastsd 88(%rdi), %ymm0
2092 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm1 = ymm2[1,2,3,3,5,6,7,7]
2093 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,2,2,3]
2094 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
2095 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm4[2,1,3,3]
2096 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7]
2097 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2098 ; AVX2-SLOW-NEXT: vpermilps {{.*#+}} xmm0 = mem[1,0,2,2]
2099 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,0,1]
2100 ; AVX2-SLOW-NEXT: vmovaps 96(%rdi), %ymm1
2101 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2102 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,0,2,1]
2103 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6],ymm0[7]
2104 ; AVX2-SLOW-NEXT: vbroadcastsd 96(%rdx), %ymm1
2105 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
2106 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2107 ; AVX2-SLOW-NEXT: vmovaps 96(%rsi), %ymm14
2108 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm0 = ymm14[1,2,3,3,5,6,7,7]
2109 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,3]
2110 ; AVX2-SLOW-NEXT: vbroadcastsd 120(%rdi), %ymm1
2111 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
2112 ; AVX2-SLOW-NEXT: vmovaps 96(%rdx), %ymm1
2113 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2114 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,3,3]
2115 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7]
2116 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2117 ; AVX2-SLOW-NEXT: vpermilps {{.*#+}} xmm0 = mem[1,0,2,2]
2118 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,0,1]
2119 ; AVX2-SLOW-NEXT: vmovaps 128(%rdi), %ymm1
2120 ; AVX2-SLOW-NEXT: vmovups %ymm1, (%rsp) # 32-byte Spill
2121 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,0,2,1]
2122 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6],ymm0[7]
2123 ; AVX2-SLOW-NEXT: vbroadcastsd 128(%rdx), %ymm1
2124 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
2125 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2126 ; AVX2-SLOW-NEXT: vmovaps 128(%rsi), %ymm12
2127 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm0 = ymm12[1,2,3,3,5,6,7,7]
2128 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,3]
2129 ; AVX2-SLOW-NEXT: vbroadcastsd 152(%rdi), %ymm1
2130 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
2131 ; AVX2-SLOW-NEXT: vmovaps 128(%rdx), %ymm10
2132 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm10[2,1,3,3]
2133 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7]
2134 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2135 ; AVX2-SLOW-NEXT: vpermilps {{.*#+}} xmm0 = mem[1,0,2,2]
2136 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,0,1]
2137 ; AVX2-SLOW-NEXT: vmovaps 160(%rdi), %ymm9
2138 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm9[0,0,2,1]
2139 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6],ymm0[7]
2140 ; AVX2-SLOW-NEXT: vbroadcastsd 160(%rdx), %ymm1
2141 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
2142 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2143 ; AVX2-SLOW-NEXT: vmovaps 160(%rsi), %ymm7
2144 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm0 = ymm7[1,2,3,3,5,6,7,7]
2145 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,3]
2146 ; AVX2-SLOW-NEXT: vbroadcastsd 184(%rdi), %ymm1
2147 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
2148 ; AVX2-SLOW-NEXT: vmovaps 160(%rdx), %ymm6
2149 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm6[2,1,3,3]
2150 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7]
2151 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2152 ; AVX2-SLOW-NEXT: vpermilps {{.*#+}} xmm0 = mem[1,0,2,2]
2153 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,0,1]
2154 ; AVX2-SLOW-NEXT: vmovaps 192(%rdi), %ymm5
2155 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm5[0,0,2,1]
2156 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6],ymm0[7]
2157 ; AVX2-SLOW-NEXT: vbroadcastsd 192(%rdx), %ymm1
2158 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
2159 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2160 ; AVX2-SLOW-NEXT: vmovaps 192(%rsi), %ymm4
2161 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm0 = ymm4[1,2,3,3,5,6,7,7]
2162 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,3]
2163 ; AVX2-SLOW-NEXT: vbroadcastsd 216(%rdi), %ymm1
2164 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
2165 ; AVX2-SLOW-NEXT: vmovaps 192(%rdx), %ymm3
2166 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm3[2,1,3,3]
2167 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7]
2168 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2169 ; AVX2-SLOW-NEXT: vpermilps {{.*#+}} xmm0 = mem[1,0,2,2]
2170 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,0,1]
2171 ; AVX2-SLOW-NEXT: vmovaps 224(%rdi), %ymm2
2172 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm2[0,0,2,1]
2173 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6],ymm0[7]
2174 ; AVX2-SLOW-NEXT: vbroadcastsd 224(%rdx), %ymm1
2175 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
2176 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2177 ; AVX2-SLOW-NEXT: vmovaps 224(%rsi), %ymm1
2178 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm0 = ymm1[1,2,3,3,5,6,7,7]
2179 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,3]
2180 ; AVX2-SLOW-NEXT: vbroadcastsd 248(%rdi), %ymm8
2181 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm8 = ymm0[0,1],ymm8[2],ymm0[3,4],ymm8[5],ymm0[6,7]
2182 ; AVX2-SLOW-NEXT: vmovaps 224(%rdx), %ymm0
2183 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm11 = ymm0[2,1,3,3]
2184 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0],ymm11[1],ymm8[2,3],ymm11[4],ymm8[5,6],ymm11[7]
2185 ; AVX2-SLOW-NEXT: vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2186 ; AVX2-SLOW-NEXT: vpermilps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Folded Reload
2187 ; AVX2-SLOW-NEXT: # ymm8 = mem[0,0,3,3,4,4,7,7]
2188 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm11 = ymm15[1,1,2,2]
2189 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm8 = ymm11[0,1],ymm8[2],ymm11[3,4],ymm8[5],ymm11[6,7]
2190 ; AVX2-SLOW-NEXT: vpermpd $165, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Folded Reload
2191 ; AVX2-SLOW-NEXT: # ymm11 = mem[1,1,2,2]
2192 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm11 = ymm11[0],ymm8[1,2],ymm11[3],ymm8[4,5],ymm11[6],ymm8[7]
2193 ; AVX2-SLOW-NEXT: vpermilps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Folded Reload
2194 ; AVX2-SLOW-NEXT: # ymm8 = mem[0,0,3,3,4,4,7,7]
2195 ; AVX2-SLOW-NEXT: vpermpd $165, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Folded Reload
2196 ; AVX2-SLOW-NEXT: # ymm15 = mem[1,1,2,2]
2197 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm8 = ymm15[0,1],ymm8[2],ymm15[3,4],ymm8[5],ymm15[6,7]
2198 ; AVX2-SLOW-NEXT: vpermpd $165, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Folded Reload
2199 ; AVX2-SLOW-NEXT: # ymm15 = mem[1,1,2,2]
2200 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm8 = ymm15[0],ymm8[1,2],ymm15[3],ymm8[4,5],ymm15[6],ymm8[7]
2201 ; AVX2-SLOW-NEXT: vpermilps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Folded Reload
2202 ; AVX2-SLOW-NEXT: # ymm15 = mem[0,0,3,3,4,4,7,7]
2203 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm13 = ymm13[1,1,2,2]
2204 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm13 = ymm13[0,1],ymm15[2],ymm13[3,4],ymm15[5],ymm13[6,7]
2205 ; AVX2-SLOW-NEXT: vpermpd $165, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Folded Reload
2206 ; AVX2-SLOW-NEXT: # ymm15 = mem[1,1,2,2]
2207 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm13 = ymm15[0],ymm13[1,2],ymm15[3],ymm13[4,5],ymm15[6],ymm13[7]
2208 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm15 = ymm14[0,0,3,3,4,4,7,7]
2209 ; AVX2-SLOW-NEXT: vpermpd $165, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Folded Reload
2210 ; AVX2-SLOW-NEXT: # ymm14 = mem[1,1,2,2]
2211 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm14 = ymm14[0,1],ymm15[2],ymm14[3,4],ymm15[5],ymm14[6,7]
2212 ; AVX2-SLOW-NEXT: vpermpd $165, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Folded Reload
2213 ; AVX2-SLOW-NEXT: # ymm15 = mem[1,1,2,2]
2214 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm14 = ymm15[0],ymm14[1,2],ymm15[3],ymm14[4,5],ymm15[6],ymm14[7]
2215 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm12 = ymm12[0,0,3,3,4,4,7,7]
2216 ; AVX2-SLOW-NEXT: vpermpd $165, (%rsp), %ymm15 # 32-byte Folded Reload
2217 ; AVX2-SLOW-NEXT: # ymm15 = mem[1,1,2,2]
2218 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm12 = ymm15[0,1],ymm12[2],ymm15[3,4],ymm12[5],ymm15[6,7]
2219 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm10 = ymm10[1,1,2,2]
2220 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0],ymm12[1,2],ymm10[3],ymm12[4,5],ymm10[6],ymm12[7]
2221 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm7 = ymm7[0,0,3,3,4,4,7,7]
2222 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm9 = ymm9[1,1,2,2]
2223 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm7 = ymm9[0,1],ymm7[2],ymm9[3,4],ymm7[5],ymm9[6,7]
2224 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[1,1,2,2]
2225 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0],ymm7[1,2],ymm6[3],ymm7[4,5],ymm6[6],ymm7[7]
2226 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm4 = ymm4[0,0,3,3,4,4,7,7]
2227 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[1,1,2,2]
2228 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0,1],ymm4[2],ymm5[3,4],ymm4[5],ymm5[6,7]
2229 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[1,1,2,2]
2230 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0],ymm4[1,2],ymm3[3],ymm4[4,5],ymm3[6],ymm4[7]
2231 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,0,3,3,4,4,7,7]
2232 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[1,1,2,2]
2233 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7]
2234 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[1,1,2,2]
2235 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2],ymm0[3],ymm1[4,5],ymm0[6],ymm1[7]
2236 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 704(%rcx)
2237 ; AVX2-SLOW-NEXT: vmovaps %ymm3, 608(%rcx)
2238 ; AVX2-SLOW-NEXT: vmovaps %ymm6, 512(%rcx)
2239 ; AVX2-SLOW-NEXT: vmovaps %ymm10, 416(%rcx)
2240 ; AVX2-SLOW-NEXT: vmovaps %ymm14, 320(%rcx)
2241 ; AVX2-SLOW-NEXT: vmovaps %ymm13, 224(%rcx)
2242 ; AVX2-SLOW-NEXT: vmovaps %ymm8, 128(%rcx)
2243 ; AVX2-SLOW-NEXT: vmovaps %ymm11, 32(%rcx)
2244 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2245 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 736(%rcx)
2246 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2247 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 672(%rcx)
2248 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2249 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 640(%rcx)
2250 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2251 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 576(%rcx)
2252 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2253 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 544(%rcx)
2254 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2255 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 480(%rcx)
2256 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2257 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 448(%rcx)
2258 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2259 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 384(%rcx)
2260 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2261 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 352(%rcx)
2262 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2263 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 288(%rcx)
2264 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2265 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 256(%rcx)
2266 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2267 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 192(%rcx)
2268 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2269 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 160(%rcx)
2270 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2271 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 96(%rcx)
2272 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2273 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 64(%rcx)
2274 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2275 ; AVX2-SLOW-NEXT: vmovaps %ymm0, (%rcx)
2276 ; AVX2-SLOW-NEXT: addq $712, %rsp # imm = 0x2C8
2277 ; AVX2-SLOW-NEXT: vzeroupper
2278 ; AVX2-SLOW-NEXT: retq
2280 ; AVX2-FAST-LABEL: store_i32_stride3_vf64:
2281 ; AVX2-FAST: # %bb.0:
2282 ; AVX2-FAST-NEXT: subq $232, %rsp
2283 ; AVX2-FAST-NEXT: vmovaps (%rdi), %ymm6
2284 ; AVX2-FAST-NEXT: vmovaps 32(%rdi), %ymm3
2285 ; AVX2-FAST-NEXT: vmovaps 64(%rdi), %ymm1
2286 ; AVX2-FAST-NEXT: vmovaps 96(%rdi), %ymm12
2287 ; AVX2-FAST-NEXT: vmovaps (%rsi), %ymm8
2288 ; AVX2-FAST-NEXT: vmovaps 32(%rsi), %ymm5
2289 ; AVX2-FAST-NEXT: vmovaps 64(%rsi), %ymm10
2290 ; AVX2-FAST-NEXT: vmovaps 96(%rsi), %ymm0
2291 ; AVX2-FAST-NEXT: vmovaps (%rdx), %ymm9
2292 ; AVX2-FAST-NEXT: vmovaps 32(%rdx), %ymm4
2293 ; AVX2-FAST-NEXT: vmovaps 64(%rdx), %ymm2
2294 ; AVX2-FAST-NEXT: vmovaps 96(%rdx), %ymm13
2295 ; AVX2-FAST-NEXT: vbroadcastf128 {{.*#+}} ymm7 = [1,0,2,2,1,0,2,2]
2296 ; AVX2-FAST-NEXT: # ymm7 = mem[0,1,0,1]
2297 ; AVX2-FAST-NEXT: vpermps %ymm8, %ymm7, %ymm11
2298 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm14 = ymm6[0,0,2,1]
2299 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm11 = ymm14[0],ymm11[1],ymm14[2,3],ymm11[4],ymm14[5,6],ymm11[7]
2300 ; AVX2-FAST-NEXT: vbroadcastsd (%rdx), %ymm14
2301 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm11 = ymm11[0,1],ymm14[2],ymm11[3,4],ymm14[5],ymm11[6,7]
2302 ; AVX2-FAST-NEXT: vmovups %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2303 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm11 = ymm8[0,0,3,3,4,4,7,7]
2304 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[1,1,2,2]
2305 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1],ymm11[2],ymm6[3,4],ymm11[5],ymm6[6,7]
2306 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm11 = ymm9[1,1,2,2]
2307 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm6 = ymm11[0],ymm6[1,2],ymm11[3],ymm6[4,5],ymm11[6],ymm6[7]
2308 ; AVX2-FAST-NEXT: vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2309 ; AVX2-FAST-NEXT: vmovaps {{.*#+}} ymm6 = [5,6,5,6,5,6,7,7]
2310 ; AVX2-FAST-NEXT: vpermps %ymm8, %ymm6, %ymm8
2311 ; AVX2-FAST-NEXT: vbroadcastsd 24(%rdi), %ymm11
2312 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1],ymm11[2],ymm8[3,4],ymm11[5],ymm8[6,7]
2313 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm9 = ymm9[2,1,3,3]
2314 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0],ymm9[1],ymm8[2,3],ymm9[4],ymm8[5,6],ymm9[7]
2315 ; AVX2-FAST-NEXT: vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2316 ; AVX2-FAST-NEXT: vpermps %ymm5, %ymm7, %ymm8
2317 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm9 = ymm3[0,0,2,1]
2318 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm8 = ymm9[0],ymm8[1],ymm9[2,3],ymm8[4],ymm9[5,6],ymm8[7]
2319 ; AVX2-FAST-NEXT: vbroadcastsd 32(%rdx), %ymm9
2320 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1],ymm9[2],ymm8[3,4],ymm9[5],ymm8[6,7]
2321 ; AVX2-FAST-NEXT: vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2322 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm8 = ymm5[0,0,3,3,4,4,7,7]
2323 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[1,1,2,2]
2324 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm8[2],ymm3[3,4],ymm8[5],ymm3[6,7]
2325 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm8 = ymm4[1,1,2,2]
2326 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm3 = ymm8[0],ymm3[1,2],ymm8[3],ymm3[4,5],ymm8[6],ymm3[7]
2327 ; AVX2-FAST-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2328 ; AVX2-FAST-NEXT: vpermps %ymm5, %ymm6, %ymm3
2329 ; AVX2-FAST-NEXT: vbroadcastsd 56(%rdi), %ymm5
2330 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm5[2],ymm3[3,4],ymm5[5],ymm3[6,7]
2331 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[2,1,3,3]
2332 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0],ymm4[1],ymm3[2,3],ymm4[4],ymm3[5,6],ymm4[7]
2333 ; AVX2-FAST-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2334 ; AVX2-FAST-NEXT: vpermps %ymm10, %ymm7, %ymm3
2335 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm4 = ymm1[0,0,2,1]
2336 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0],ymm3[1],ymm4[2,3],ymm3[4],ymm4[5,6],ymm3[7]
2337 ; AVX2-FAST-NEXT: vbroadcastsd 64(%rdx), %ymm4
2338 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm4[2],ymm3[3,4],ymm4[5],ymm3[6,7]
2339 ; AVX2-FAST-NEXT: vmovups %ymm3, (%rsp) # 32-byte Spill
2340 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm3 = ymm10[0,0,3,3,4,4,7,7]
2341 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[1,1,2,2]
2342 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm3[2],ymm1[3,4],ymm3[5],ymm1[6,7]
2343 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm3 = ymm2[1,1,2,2]
2344 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm3[0],ymm1[1,2],ymm3[3],ymm1[4,5],ymm3[6],ymm1[7]
2345 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2346 ; AVX2-FAST-NEXT: vpermps %ymm10, %ymm6, %ymm1
2347 ; AVX2-FAST-NEXT: vbroadcastsd 88(%rdi), %ymm3
2348 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm3[2],ymm1[3,4],ymm3[5],ymm1[6,7]
2349 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[2,1,3,3]
2350 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3],ymm2[4],ymm1[5,6],ymm2[7]
2351 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2352 ; AVX2-FAST-NEXT: vpermps %ymm0, %ymm7, %ymm1
2353 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm2 = ymm12[0,0,2,1]
2354 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2,3],ymm1[4],ymm2[5,6],ymm1[7]
2355 ; AVX2-FAST-NEXT: vbroadcastsd 96(%rdx), %ymm2
2356 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4],ymm2[5],ymm1[6,7]
2357 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2358 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm1 = ymm0[0,0,3,3,4,4,7,7]
2359 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm2 = ymm12[1,1,2,2]
2360 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7]
2361 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm2 = ymm13[1,1,2,2]
2362 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0],ymm1[1,2],ymm2[3],ymm1[4,5],ymm2[6],ymm1[7]
2363 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2364 ; AVX2-FAST-NEXT: vpermps %ymm0, %ymm6, %ymm0
2365 ; AVX2-FAST-NEXT: vbroadcastsd 120(%rdi), %ymm1
2366 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
2367 ; AVX2-FAST-NEXT: vmovaps 128(%rdi), %ymm1
2368 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm2 = ymm13[2,1,3,3]
2369 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm13 = ymm0[0],ymm2[1],ymm0[2,3],ymm2[4],ymm0[5,6],ymm2[7]
2370 ; AVX2-FAST-NEXT: vmovaps 128(%rsi), %ymm0
2371 ; AVX2-FAST-NEXT: vpermps %ymm0, %ymm7, %ymm2
2372 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm3 = ymm1[0,0,2,1]
2373 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0],ymm2[1],ymm3[2,3],ymm2[4],ymm3[5,6],ymm2[7]
2374 ; AVX2-FAST-NEXT: vbroadcastsd 128(%rdx), %ymm3
2375 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm14 = ymm2[0,1],ymm3[2],ymm2[3,4],ymm3[5],ymm2[6,7]
2376 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[1,1,2,2]
2377 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm2 = ymm0[0,0,3,3,4,4,7,7]
2378 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4],ymm2[5],ymm1[6,7]
2379 ; AVX2-FAST-NEXT: vmovaps 128(%rdx), %ymm2
2380 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm3 = ymm2[1,1,2,2]
2381 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm15 = ymm3[0],ymm1[1,2],ymm3[3],ymm1[4,5],ymm3[6],ymm1[7]
2382 ; AVX2-FAST-NEXT: vpermps %ymm0, %ymm6, %ymm0
2383 ; AVX2-FAST-NEXT: vbroadcastsd 152(%rdi), %ymm1
2384 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
2385 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm1 = ymm2[2,1,3,3]
2386 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm8 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7]
2387 ; AVX2-FAST-NEXT: vmovaps 160(%rdi), %ymm0
2388 ; AVX2-FAST-NEXT: vmovaps 160(%rsi), %ymm1
2389 ; AVX2-FAST-NEXT: vpermps %ymm1, %ymm7, %ymm2
2390 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm3 = ymm0[0,0,2,1]
2391 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0],ymm2[1],ymm3[2,3],ymm2[4],ymm3[5,6],ymm2[7]
2392 ; AVX2-FAST-NEXT: vbroadcastsd 160(%rdx), %ymm3
2393 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm4 = ymm2[0,1],ymm3[2],ymm2[3,4],ymm3[5],ymm2[6,7]
2394 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[1,1,2,2]
2395 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm2 = ymm1[0,0,3,3,4,4,7,7]
2396 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm0[0,1],ymm2[2],ymm0[3,4],ymm2[5],ymm0[6,7]
2397 ; AVX2-FAST-NEXT: vmovaps 160(%rdx), %ymm0
2398 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm5 = ymm0[1,1,2,2]
2399 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm3 = ymm5[0],ymm2[1,2],ymm5[3],ymm2[4,5],ymm5[6],ymm2[7]
2400 ; AVX2-FAST-NEXT: vpermps %ymm1, %ymm6, %ymm1
2401 ; AVX2-FAST-NEXT: vbroadcastsd 184(%rdi), %ymm5
2402 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm5[2],ymm1[3,4],ymm5[5],ymm1[6,7]
2403 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,1,3,3]
2404 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6],ymm0[7]
2405 ; AVX2-FAST-NEXT: vmovaps 192(%rdi), %ymm5
2406 ; AVX2-FAST-NEXT: vmovaps 192(%rsi), %ymm0
2407 ; AVX2-FAST-NEXT: vpermps %ymm0, %ymm7, %ymm1
2408 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm9 = ymm5[0,0,2,1]
2409 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm9[0],ymm1[1],ymm9[2,3],ymm1[4],ymm9[5,6],ymm1[7]
2410 ; AVX2-FAST-NEXT: vbroadcastsd 192(%rdx), %ymm9
2411 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm9[2],ymm1[3,4],ymm9[5],ymm1[6,7]
2412 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[1,1,2,2]
2413 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm9 = ymm0[0,0,3,3,4,4,7,7]
2414 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1],ymm9[2],ymm5[3,4],ymm9[5],ymm5[6,7]
2415 ; AVX2-FAST-NEXT: vmovaps 192(%rdx), %ymm9
2416 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm10 = ymm9[1,1,2,2]
2417 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm5 = ymm10[0],ymm5[1,2],ymm10[3],ymm5[4,5],ymm10[6],ymm5[7]
2418 ; AVX2-FAST-NEXT: vpermps %ymm0, %ymm6, %ymm0
2419 ; AVX2-FAST-NEXT: vbroadcastsd 216(%rdi), %ymm10
2420 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm10[2],ymm0[3,4],ymm10[5],ymm0[6,7]
2421 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm9 = ymm9[2,1,3,3]
2422 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm9[1],ymm0[2,3],ymm9[4],ymm0[5,6],ymm9[7]
2423 ; AVX2-FAST-NEXT: vmovaps 224(%rsi), %ymm9
2424 ; AVX2-FAST-NEXT: vpermps %ymm9, %ymm7, %ymm7
2425 ; AVX2-FAST-NEXT: vmovaps 224(%rdi), %ymm10
2426 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm11 = ymm10[0,0,2,1]
2427 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm7 = ymm11[0],ymm7[1],ymm11[2,3],ymm7[4],ymm11[5,6],ymm7[7]
2428 ; AVX2-FAST-NEXT: vbroadcastsd 224(%rdx), %ymm11
2429 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1],ymm11[2],ymm7[3,4],ymm11[5],ymm7[6,7]
2430 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm10 = ymm10[1,1,2,2]
2431 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm11 = ymm9[0,0,3,3,4,4,7,7]
2432 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0,1],ymm11[2],ymm10[3,4],ymm11[5],ymm10[6,7]
2433 ; AVX2-FAST-NEXT: vmovaps 224(%rdx), %ymm11
2434 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm12 = ymm11[1,1,2,2]
2435 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm10 = ymm12[0],ymm10[1,2],ymm12[3],ymm10[4,5],ymm12[6],ymm10[7]
2436 ; AVX2-FAST-NEXT: vpermps %ymm9, %ymm6, %ymm6
2437 ; AVX2-FAST-NEXT: vbroadcastsd 248(%rdi), %ymm9
2438 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1],ymm9[2],ymm6[3,4],ymm9[5],ymm6[6,7]
2439 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm9 = ymm11[2,1,3,3]
2440 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0],ymm9[1],ymm6[2,3],ymm9[4],ymm6[5,6],ymm9[7]
2441 ; AVX2-FAST-NEXT: vmovaps %ymm6, 736(%rcx)
2442 ; AVX2-FAST-NEXT: vmovaps %ymm10, 704(%rcx)
2443 ; AVX2-FAST-NEXT: vmovaps %ymm7, 672(%rcx)
2444 ; AVX2-FAST-NEXT: vmovaps %ymm0, 640(%rcx)
2445 ; AVX2-FAST-NEXT: vmovaps %ymm5, 608(%rcx)
2446 ; AVX2-FAST-NEXT: vmovaps %ymm1, 576(%rcx)
2447 ; AVX2-FAST-NEXT: vmovaps %ymm2, 544(%rcx)
2448 ; AVX2-FAST-NEXT: vmovaps %ymm3, 512(%rcx)
2449 ; AVX2-FAST-NEXT: vmovaps %ymm4, 480(%rcx)
2450 ; AVX2-FAST-NEXT: vmovaps %ymm8, 448(%rcx)
2451 ; AVX2-FAST-NEXT: vmovaps %ymm15, 416(%rcx)
2452 ; AVX2-FAST-NEXT: vmovaps %ymm14, 384(%rcx)
2453 ; AVX2-FAST-NEXT: vmovaps %ymm13, 352(%rcx)
2454 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2455 ; AVX2-FAST-NEXT: vmovaps %ymm0, 320(%rcx)
2456 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2457 ; AVX2-FAST-NEXT: vmovaps %ymm0, 288(%rcx)
2458 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2459 ; AVX2-FAST-NEXT: vmovaps %ymm0, 256(%rcx)
2460 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2461 ; AVX2-FAST-NEXT: vmovaps %ymm0, 224(%rcx)
2462 ; AVX2-FAST-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
2463 ; AVX2-FAST-NEXT: vmovaps %ymm0, 192(%rcx)
2464 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2465 ; AVX2-FAST-NEXT: vmovaps %ymm0, 160(%rcx)
2466 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2467 ; AVX2-FAST-NEXT: vmovaps %ymm0, 128(%rcx)
2468 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2469 ; AVX2-FAST-NEXT: vmovaps %ymm0, 96(%rcx)
2470 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2471 ; AVX2-FAST-NEXT: vmovaps %ymm0, 64(%rcx)
2472 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2473 ; AVX2-FAST-NEXT: vmovaps %ymm0, 32(%rcx)
2474 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2475 ; AVX2-FAST-NEXT: vmovaps %ymm0, (%rcx)
2476 ; AVX2-FAST-NEXT: addq $232, %rsp
2477 ; AVX2-FAST-NEXT: vzeroupper
2478 ; AVX2-FAST-NEXT: retq
2480 ; AVX2-FAST-PERLANE-LABEL: store_i32_stride3_vf64:
2481 ; AVX2-FAST-PERLANE: # %bb.0:
2482 ; AVX2-FAST-PERLANE-NEXT: subq $712, %rsp # imm = 0x2C8
2483 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rdi), %ymm15
2484 ; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%rdi), %ymm8
2485 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2486 ; AVX2-FAST-PERLANE-NEXT: vmovaps 64(%rdi), %ymm13
2487 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rsi), %ymm5
2488 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2489 ; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%rsi), %ymm3
2490 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2491 ; AVX2-FAST-PERLANE-NEXT: vmovaps 64(%rsi), %ymm2
2492 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2493 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rdx), %ymm7
2494 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2495 ; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%rdx), %ymm6
2496 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2497 ; AVX2-FAST-PERLANE-NEXT: vmovaps 64(%rdx), %ymm4
2498 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2499 ; AVX2-FAST-PERLANE-NEXT: vpermilps {{.*#+}} xmm0 = mem[1,0,2,2]
2500 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,0,1]
2501 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm15[0,0,2,1]
2502 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6],ymm0[7]
2503 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd (%rdx), %ymm1
2504 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
2505 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2506 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 24(%rdi), %ymm0
2507 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm1 = ymm5[1,2,3,3,5,6,7,7]
2508 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,2,2,3]
2509 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
2510 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm7[2,1,3,3]
2511 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7]
2512 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2513 ; AVX2-FAST-PERLANE-NEXT: vpermilps {{.*#+}} xmm0 = mem[1,0,2,2]
2514 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,0,1]
2515 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm8[0,0,2,1]
2516 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6],ymm0[7]
2517 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 32(%rdx), %ymm1
2518 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
2519 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2520 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 56(%rdi), %ymm0
2521 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm1 = ymm3[1,2,3,3,5,6,7,7]
2522 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,2,2,3]
2523 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
2524 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm6[2,1,3,3]
2525 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7]
2526 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2527 ; AVX2-FAST-PERLANE-NEXT: vpermilps {{.*#+}} xmm0 = mem[1,0,2,2]
2528 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,0,1]
2529 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm13[0,0,2,1]
2530 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6],ymm0[7]
2531 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 64(%rdx), %ymm1
2532 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
2533 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2534 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 88(%rdi), %ymm0
2535 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm1 = ymm2[1,2,3,3,5,6,7,7]
2536 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,2,2,3]
2537 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
2538 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm4[2,1,3,3]
2539 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7]
2540 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2541 ; AVX2-FAST-PERLANE-NEXT: vpermilps {{.*#+}} xmm0 = mem[1,0,2,2]
2542 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,0,1]
2543 ; AVX2-FAST-PERLANE-NEXT: vmovaps 96(%rdi), %ymm1
2544 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2545 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,0,2,1]
2546 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6],ymm0[7]
2547 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 96(%rdx), %ymm1
2548 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
2549 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2550 ; AVX2-FAST-PERLANE-NEXT: vmovaps 96(%rsi), %ymm14
2551 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm0 = ymm14[1,2,3,3,5,6,7,7]
2552 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,3]
2553 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 120(%rdi), %ymm1
2554 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
2555 ; AVX2-FAST-PERLANE-NEXT: vmovaps 96(%rdx), %ymm1
2556 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2557 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,3,3]
2558 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7]
2559 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2560 ; AVX2-FAST-PERLANE-NEXT: vpermilps {{.*#+}} xmm0 = mem[1,0,2,2]
2561 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,0,1]
2562 ; AVX2-FAST-PERLANE-NEXT: vmovaps 128(%rdi), %ymm1
2563 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, (%rsp) # 32-byte Spill
2564 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,0,2,1]
2565 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6],ymm0[7]
2566 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 128(%rdx), %ymm1
2567 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
2568 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2569 ; AVX2-FAST-PERLANE-NEXT: vmovaps 128(%rsi), %ymm12
2570 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm0 = ymm12[1,2,3,3,5,6,7,7]
2571 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,3]
2572 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 152(%rdi), %ymm1
2573 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
2574 ; AVX2-FAST-PERLANE-NEXT: vmovaps 128(%rdx), %ymm10
2575 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm10[2,1,3,3]
2576 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7]
2577 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2578 ; AVX2-FAST-PERLANE-NEXT: vpermilps {{.*#+}} xmm0 = mem[1,0,2,2]
2579 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,0,1]
2580 ; AVX2-FAST-PERLANE-NEXT: vmovaps 160(%rdi), %ymm9
2581 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm9[0,0,2,1]
2582 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6],ymm0[7]
2583 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 160(%rdx), %ymm1
2584 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
2585 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2586 ; AVX2-FAST-PERLANE-NEXT: vmovaps 160(%rsi), %ymm7
2587 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm0 = ymm7[1,2,3,3,5,6,7,7]
2588 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,3]
2589 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 184(%rdi), %ymm1
2590 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
2591 ; AVX2-FAST-PERLANE-NEXT: vmovaps 160(%rdx), %ymm6
2592 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm6[2,1,3,3]
2593 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7]
2594 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2595 ; AVX2-FAST-PERLANE-NEXT: vpermilps {{.*#+}} xmm0 = mem[1,0,2,2]
2596 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,0,1]
2597 ; AVX2-FAST-PERLANE-NEXT: vmovaps 192(%rdi), %ymm5
2598 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm5[0,0,2,1]
2599 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6],ymm0[7]
2600 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 192(%rdx), %ymm1
2601 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
2602 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2603 ; AVX2-FAST-PERLANE-NEXT: vmovaps 192(%rsi), %ymm4
2604 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm0 = ymm4[1,2,3,3,5,6,7,7]
2605 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,3]
2606 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 216(%rdi), %ymm1
2607 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
2608 ; AVX2-FAST-PERLANE-NEXT: vmovaps 192(%rdx), %ymm3
2609 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm3[2,1,3,3]
2610 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7]
2611 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2612 ; AVX2-FAST-PERLANE-NEXT: vpermilps {{.*#+}} xmm0 = mem[1,0,2,2]
2613 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,0,1]
2614 ; AVX2-FAST-PERLANE-NEXT: vmovaps 224(%rdi), %ymm2
2615 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm2[0,0,2,1]
2616 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6],ymm0[7]
2617 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 224(%rdx), %ymm1
2618 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
2619 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2620 ; AVX2-FAST-PERLANE-NEXT: vmovaps 224(%rsi), %ymm1
2621 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm0 = ymm1[1,2,3,3,5,6,7,7]
2622 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,3]
2623 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 248(%rdi), %ymm8
2624 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm8 = ymm0[0,1],ymm8[2],ymm0[3,4],ymm8[5],ymm0[6,7]
2625 ; AVX2-FAST-PERLANE-NEXT: vmovaps 224(%rdx), %ymm0
2626 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm11 = ymm0[2,1,3,3]
2627 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0],ymm11[1],ymm8[2,3],ymm11[4],ymm8[5,6],ymm11[7]
2628 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2629 ; AVX2-FAST-PERLANE-NEXT: vpermilps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Folded Reload
2630 ; AVX2-FAST-PERLANE-NEXT: # ymm8 = mem[0,0,3,3,4,4,7,7]
2631 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm11 = ymm15[1,1,2,2]
2632 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm8 = ymm11[0,1],ymm8[2],ymm11[3,4],ymm8[5],ymm11[6,7]
2633 ; AVX2-FAST-PERLANE-NEXT: vpermpd $165, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Folded Reload
2634 ; AVX2-FAST-PERLANE-NEXT: # ymm11 = mem[1,1,2,2]
2635 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm11 = ymm11[0],ymm8[1,2],ymm11[3],ymm8[4,5],ymm11[6],ymm8[7]
2636 ; AVX2-FAST-PERLANE-NEXT: vpermilps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Folded Reload
2637 ; AVX2-FAST-PERLANE-NEXT: # ymm8 = mem[0,0,3,3,4,4,7,7]
2638 ; AVX2-FAST-PERLANE-NEXT: vpermpd $165, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Folded Reload
2639 ; AVX2-FAST-PERLANE-NEXT: # ymm15 = mem[1,1,2,2]
2640 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm8 = ymm15[0,1],ymm8[2],ymm15[3,4],ymm8[5],ymm15[6,7]
2641 ; AVX2-FAST-PERLANE-NEXT: vpermpd $165, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Folded Reload
2642 ; AVX2-FAST-PERLANE-NEXT: # ymm15 = mem[1,1,2,2]
2643 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm8 = ymm15[0],ymm8[1,2],ymm15[3],ymm8[4,5],ymm15[6],ymm8[7]
2644 ; AVX2-FAST-PERLANE-NEXT: vpermilps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Folded Reload
2645 ; AVX2-FAST-PERLANE-NEXT: # ymm15 = mem[0,0,3,3,4,4,7,7]
2646 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm13 = ymm13[1,1,2,2]
2647 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm13 = ymm13[0,1],ymm15[2],ymm13[3,4],ymm15[5],ymm13[6,7]
2648 ; AVX2-FAST-PERLANE-NEXT: vpermpd $165, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Folded Reload
2649 ; AVX2-FAST-PERLANE-NEXT: # ymm15 = mem[1,1,2,2]
2650 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm13 = ymm15[0],ymm13[1,2],ymm15[3],ymm13[4,5],ymm15[6],ymm13[7]
2651 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm15 = ymm14[0,0,3,3,4,4,7,7]
2652 ; AVX2-FAST-PERLANE-NEXT: vpermpd $165, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Folded Reload
2653 ; AVX2-FAST-PERLANE-NEXT: # ymm14 = mem[1,1,2,2]
2654 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm14 = ymm14[0,1],ymm15[2],ymm14[3,4],ymm15[5],ymm14[6,7]
2655 ; AVX2-FAST-PERLANE-NEXT: vpermpd $165, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Folded Reload
2656 ; AVX2-FAST-PERLANE-NEXT: # ymm15 = mem[1,1,2,2]
2657 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm14 = ymm15[0],ymm14[1,2],ymm15[3],ymm14[4,5],ymm15[6],ymm14[7]
2658 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm12 = ymm12[0,0,3,3,4,4,7,7]
2659 ; AVX2-FAST-PERLANE-NEXT: vpermpd $165, (%rsp), %ymm15 # 32-byte Folded Reload
2660 ; AVX2-FAST-PERLANE-NEXT: # ymm15 = mem[1,1,2,2]
2661 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm12 = ymm15[0,1],ymm12[2],ymm15[3,4],ymm12[5],ymm15[6,7]
2662 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm10 = ymm10[1,1,2,2]
2663 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0],ymm12[1,2],ymm10[3],ymm12[4,5],ymm10[6],ymm12[7]
2664 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm7 = ymm7[0,0,3,3,4,4,7,7]
2665 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm9 = ymm9[1,1,2,2]
2666 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm7 = ymm9[0,1],ymm7[2],ymm9[3,4],ymm7[5],ymm9[6,7]
2667 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[1,1,2,2]
2668 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0],ymm7[1,2],ymm6[3],ymm7[4,5],ymm6[6],ymm7[7]
2669 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm4 = ymm4[0,0,3,3,4,4,7,7]
2670 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[1,1,2,2]
2671 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0,1],ymm4[2],ymm5[3,4],ymm4[5],ymm5[6,7]
2672 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[1,1,2,2]
2673 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0],ymm4[1,2],ymm3[3],ymm4[4,5],ymm3[6],ymm4[7]
2674 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,0,3,3,4,4,7,7]
2675 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[1,1,2,2]
2676 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7]
2677 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[1,1,2,2]
2678 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2],ymm0[3],ymm1[4,5],ymm0[6],ymm1[7]
2679 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 704(%rcx)
2680 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm3, 608(%rcx)
2681 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm6, 512(%rcx)
2682 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm10, 416(%rcx)
2683 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm14, 320(%rcx)
2684 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm13, 224(%rcx)
2685 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm8, 128(%rcx)
2686 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm11, 32(%rcx)
2687 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2688 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 736(%rcx)
2689 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2690 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 672(%rcx)
2691 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2692 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 640(%rcx)
2693 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2694 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 576(%rcx)
2695 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2696 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 544(%rcx)
2697 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2698 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 480(%rcx)
2699 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2700 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 448(%rcx)
2701 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2702 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 384(%rcx)
2703 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2704 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 352(%rcx)
2705 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2706 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 288(%rcx)
2707 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2708 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 256(%rcx)
2709 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2710 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 192(%rcx)
2711 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2712 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 160(%rcx)
2713 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2714 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 96(%rcx)
2715 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2716 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 64(%rcx)
2717 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2718 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, (%rcx)
2719 ; AVX2-FAST-PERLANE-NEXT: addq $712, %rsp # imm = 0x2C8
2720 ; AVX2-FAST-PERLANE-NEXT: vzeroupper
2721 ; AVX2-FAST-PERLANE-NEXT: retq
2723 ; AVX512-LABEL: store_i32_stride3_vf64:
2725 ; AVX512-NEXT: vmovdqa64 (%rdi), %zmm3
2726 ; AVX512-NEXT: vmovdqa64 64(%rdi), %zmm2
2727 ; AVX512-NEXT: vmovdqa64 128(%rdi), %zmm1
2728 ; AVX512-NEXT: vmovdqa64 192(%rdi), %zmm0
2729 ; AVX512-NEXT: vmovdqa64 (%rsi), %zmm4
2730 ; AVX512-NEXT: vmovdqa64 64(%rsi), %zmm5
2731 ; AVX512-NEXT: vmovdqa64 128(%rsi), %zmm6
2732 ; AVX512-NEXT: vmovdqa64 192(%rsi), %zmm7
2733 ; AVX512-NEXT: vmovdqa64 (%rdx), %zmm8
2734 ; AVX512-NEXT: vmovdqa64 64(%rdx), %zmm9
2735 ; AVX512-NEXT: vmovdqa64 128(%rdx), %zmm10
2736 ; AVX512-NEXT: vmovdqa64 192(%rdx), %zmm11
2737 ; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm12 = <0,16,u,1,17,u,2,18,u,3,19,u,4,20,u,5>
2738 ; AVX512-NEXT: vmovdqa64 %zmm3, %zmm13
2739 ; AVX512-NEXT: vpermt2d %zmm4, %zmm12, %zmm13
2740 ; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm14 = [0,1,16,3,4,17,6,7,18,9,10,19,12,13,20,15]
2741 ; AVX512-NEXT: vpermt2d %zmm8, %zmm14, %zmm13
2742 ; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm15 = <u,11,27,u,12,28,u,13,29,u,14,30,u,15,31,u>
2743 ; AVX512-NEXT: vmovdqa64 %zmm0, %zmm16
2744 ; AVX512-NEXT: vpermt2d %zmm7, %zmm15, %zmm16
2745 ; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm17 = [26,1,2,27,4,5,28,7,8,29,10,11,30,13,14,31]
2746 ; AVX512-NEXT: vpermt2d %zmm11, %zmm17, %zmm16
2747 ; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm18 = <5,u,22,6,u,23,7,u,24,8,u,25,9,u,26,10>
2748 ; AVX512-NEXT: vmovdqa64 %zmm7, %zmm19
2749 ; AVX512-NEXT: vpermt2d %zmm0, %zmm18, %zmm19
2750 ; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm20 = [0,21,2,3,22,5,6,23,8,9,24,11,12,25,14,15]
2751 ; AVX512-NEXT: vpermt2d %zmm11, %zmm20, %zmm19
2752 ; AVX512-NEXT: vpermt2d %zmm7, %zmm12, %zmm0
2753 ; AVX512-NEXT: vpermt2d %zmm11, %zmm14, %zmm0
2754 ; AVX512-NEXT: vmovdqa64 %zmm1, %zmm7
2755 ; AVX512-NEXT: vpermt2d %zmm6, %zmm15, %zmm7
2756 ; AVX512-NEXT: vpermt2d %zmm10, %zmm17, %zmm7
2757 ; AVX512-NEXT: vmovdqa64 %zmm6, %zmm11
2758 ; AVX512-NEXT: vpermt2d %zmm1, %zmm18, %zmm11
2759 ; AVX512-NEXT: vpermt2d %zmm10, %zmm20, %zmm11
2760 ; AVX512-NEXT: vpermt2d %zmm6, %zmm12, %zmm1
2761 ; AVX512-NEXT: vpermt2d %zmm10, %zmm14, %zmm1
2762 ; AVX512-NEXT: vmovdqa64 %zmm2, %zmm6
2763 ; AVX512-NEXT: vpermt2d %zmm5, %zmm15, %zmm6
2764 ; AVX512-NEXT: vpermt2d %zmm9, %zmm17, %zmm6
2765 ; AVX512-NEXT: vmovdqa64 %zmm5, %zmm10
2766 ; AVX512-NEXT: vpermt2d %zmm2, %zmm18, %zmm10
2767 ; AVX512-NEXT: vpermt2d %zmm9, %zmm20, %zmm10
2768 ; AVX512-NEXT: vpermt2d %zmm5, %zmm12, %zmm2
2769 ; AVX512-NEXT: vpermt2d %zmm9, %zmm14, %zmm2
2770 ; AVX512-NEXT: vpermi2d %zmm4, %zmm3, %zmm15
2771 ; AVX512-NEXT: vpermt2d %zmm8, %zmm17, %zmm15
2772 ; AVX512-NEXT: vpermt2d %zmm3, %zmm18, %zmm4
2773 ; AVX512-NEXT: vpermt2d %zmm8, %zmm20, %zmm4
2774 ; AVX512-NEXT: vmovdqa64 %zmm4, 64(%rcx)
2775 ; AVX512-NEXT: vmovdqa64 %zmm15, 128(%rcx)
2776 ; AVX512-NEXT: vmovdqa64 %zmm2, 192(%rcx)
2777 ; AVX512-NEXT: vmovdqa64 %zmm10, 256(%rcx)
2778 ; AVX512-NEXT: vmovdqa64 %zmm6, 320(%rcx)
2779 ; AVX512-NEXT: vmovdqa64 %zmm1, 384(%rcx)
2780 ; AVX512-NEXT: vmovdqa64 %zmm11, 448(%rcx)
2781 ; AVX512-NEXT: vmovdqa64 %zmm7, 512(%rcx)
2782 ; AVX512-NEXT: vmovdqa64 %zmm0, 576(%rcx)
2783 ; AVX512-NEXT: vmovdqa64 %zmm19, 640(%rcx)
2784 ; AVX512-NEXT: vmovdqa64 %zmm16, 704(%rcx)
2785 ; AVX512-NEXT: vmovdqa64 %zmm13, (%rcx)
2786 ; AVX512-NEXT: vzeroupper
2788 %in.vec0 = load <64 x i32>, ptr %in.vecptr0, align 64
2789 %in.vec1 = load <64 x i32>, ptr %in.vecptr1, align 64
2790 %in.vec2 = load <64 x i32>, ptr %in.vecptr2, align 64
2791 %1 = shufflevector <64 x i32> %in.vec0, <64 x i32> %in.vec1, <128 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95, i32 96, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 112, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127>
2792 %2 = shufflevector <64 x i32> %in.vec2, <64 x i32> poison, <128 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
2793 %3 = shufflevector <128 x i32> %1, <128 x i32> %2, <192 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95, i32 96, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 112, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127, i32 128, i32 129, i32 130, i32 131, i32 132, i32 133, i32 134, i32 135, i32 136, i32 137, i32 138, i32 139, i32 140, i32 141, i32 142, i32 143, i32 144, i32 145, i32 146, i32 147, i32 148, i32 149, i32 150, i32 151, i32 152, i32 153, i32 154, i32 155, i32 156, i32 157, i32 158, i32 159, i32 160, i32 161, i32 162, i32 163, i32 164, i32 165, i32 166, i32 167, i32 168, i32 169, i32 170, i32 171, i32 172, i32 173, i32 174, i32 175, i32 176, i32 177, i32 178, i32 179, i32 180, i32 181, i32 182, i32 183, i32 184, i32 185, i32 186, i32 187, i32 188, i32 189, i32 190, i32 191>
2794 %interleaved.vec = shufflevector <192 x i32> %3, <192 x i32> poison, <192 x i32> <i32 0, i32 64, i32 128, i32 1, i32 65, i32 129, i32 2, i32 66, i32 130, i32 3, i32 67, i32 131, i32 4, i32 68, i32 132, i32 5, i32 69, i32 133, i32 6, i32 70, i32 134, i32 7, i32 71, i32 135, i32 8, i32 72, i32 136, i32 9, i32 73, i32 137, i32 10, i32 74, i32 138, i32 11, i32 75, i32 139, i32 12, i32 76, i32 140, i32 13, i32 77, i32 141, i32 14, i32 78, i32 142, i32 15, i32 79, i32 143, i32 16, i32 80, i32 144, i32 17, i32 81, i32 145, i32 18, i32 82, i32 146, i32 19, i32 83, i32 147, i32 20, i32 84, i32 148, i32 21, i32 85, i32 149, i32 22, i32 86, i32 150, i32 23, i32 87, i32 151, i32 24, i32 88, i32 152, i32 25, i32 89, i32 153, i32 26, i32 90, i32 154, i32 27, i32 91, i32 155, i32 28, i32 92, i32 156, i32 29, i32 93, i32 157, i32 30, i32 94, i32 158, i32 31, i32 95, i32 159, i32 32, i32 96, i32 160, i32 33, i32 97, i32 161, i32 34, i32 98, i32 162, i32 35, i32 99, i32 163, i32 36, i32 100, i32 164, i32 37, i32 101, i32 165, i32 38, i32 102, i32 166, i32 39, i32 103, i32 167, i32 40, i32 104, i32 168, i32 41, i32 105, i32 169, i32 42, i32 106, i32 170, i32 43, i32 107, i32 171, i32 44, i32 108, i32 172, i32 45, i32 109, i32 173, i32 46, i32 110, i32 174, i32 47, i32 111, i32 175, i32 48, i32 112, i32 176, i32 49, i32 113, i32 177, i32 50, i32 114, i32 178, i32 51, i32 115, i32 179, i32 52, i32 116, i32 180, i32 53, i32 117, i32 181, i32 54, i32 118, i32 182, i32 55, i32 119, i32 183, i32 56, i32 120, i32 184, i32 57, i32 121, i32 185, i32 58, i32 122, i32 186, i32 59, i32 123, i32 187, i32 60, i32 124, i32 188, i32 61, i32 125, i32 189, i32 62, i32 126, i32 190, i32 63, i32 127, i32 191>
2795 store <192 x i32> %interleaved.vec, ptr %out.vec, align 64
2798 ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
2802 ; AVX512-FAST: {{.*}}
2803 ; AVX512-SLOW: {{.*}}
2805 ; AVX512BW-FAST: {{.*}}
2806 ; AVX512BW-ONLY-FAST: {{.*}}
2807 ; AVX512BW-ONLY-SLOW: {{.*}}
2808 ; AVX512BW-SLOW: {{.*}}
2809 ; AVX512DQ-FAST: {{.*}}
2810 ; AVX512DQ-SLOW: {{.*}}
2811 ; AVX512DQBW-FAST: {{.*}}
2812 ; AVX512DQBW-SLOW: {{.*}}
2814 ; AVX512F-FAST: {{.*}}
2815 ; AVX512F-ONLY-FAST: {{.*}}
2816 ; AVX512F-ONLY-SLOW: {{.*}}
2817 ; AVX512F-SLOW: {{.*}}
2820 ; FALLBACK10: {{.*}}
2821 ; FALLBACK11: {{.*}}
2822 ; FALLBACK12: {{.*}}