1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+sse2 | FileCheck %s --check-prefixes=SSE,FALLBACK0
3 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1,AVX1-ONLY,FALLBACK1
4 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-SLOW,FALLBACK2
5 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST,FALLBACK3
6 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST-PERLANE,FALLBACK4
7 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
8 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
9 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
10 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
11 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
12 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
13 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
14 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
16 ; These patterns are produced by LoopVectorizer for interleaved loads.
18 define void @load_i32_stride3_vf2(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2) nounwind {
19 ; SSE-LABEL: load_i32_stride3_vf2:
21 ; SSE-NEXT: movdqa (%rdi), %xmm0
22 ; SSE-NEXT: movdqa 16(%rdi), %xmm1
23 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,3,2,3]
24 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,1,1]
25 ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
26 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,1,1]
27 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
28 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
29 ; SSE-NEXT: movq %xmm2, (%rsi)
30 ; SSE-NEXT: movq %xmm3, (%rdx)
31 ; SSE-NEXT: movq %xmm0, (%rcx)
34 ; AVX1-ONLY-LABEL: load_i32_stride3_vf2:
36 ; AVX1-ONLY-NEXT: vmovaps (%rdi), %xmm0
37 ; AVX1-ONLY-NEXT: vmovaps 16(%rdi), %xmm1
38 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm2 = xmm0[0,3,2,3]
39 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm3 = xmm1[0],xmm0[1],xmm1[2,3]
40 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm3 = xmm3[1,0,2,3]
41 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,3,2,3]
42 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
43 ; AVX1-ONLY-NEXT: vmovlps %xmm2, (%rsi)
44 ; AVX1-ONLY-NEXT: vmovlps %xmm3, (%rdx)
45 ; AVX1-ONLY-NEXT: vmovlps %xmm0, (%rcx)
46 ; AVX1-ONLY-NEXT: retq
48 ; AVX2-ONLY-LABEL: load_i32_stride3_vf2:
50 ; AVX2-ONLY-NEXT: vmovaps (%rdi), %xmm0
51 ; AVX2-ONLY-NEXT: vmovaps 16(%rdi), %xmm1
52 ; AVX2-ONLY-NEXT: vshufps {{.*#+}} xmm2 = xmm0[0,3,2,3]
53 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3]
54 ; AVX2-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,0,2,3]
55 ; AVX2-ONLY-NEXT: vbroadcastss 8(%rdi), %xmm3
56 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm3[0],xmm1[1],xmm3[2,3]
57 ; AVX2-ONLY-NEXT: vmovlps %xmm2, (%rsi)
58 ; AVX2-ONLY-NEXT: vmovlps %xmm0, (%rdx)
59 ; AVX2-ONLY-NEXT: vmovlps %xmm1, (%rcx)
60 ; AVX2-ONLY-NEXT: retq
62 ; AVX512-SLOW-LABEL: load_i32_stride3_vf2:
63 ; AVX512-SLOW: # %bb.0:
64 ; AVX512-SLOW-NEXT: vmovaps (%rdi), %xmm0
65 ; AVX512-SLOW-NEXT: vmovaps 16(%rdi), %xmm1
66 ; AVX512-SLOW-NEXT: vshufps {{.*#+}} xmm2 = xmm0[0,3,2,3]
67 ; AVX512-SLOW-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3]
68 ; AVX512-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,0,2,3]
69 ; AVX512-SLOW-NEXT: vbroadcastss 8(%rdi), %xmm3
70 ; AVX512-SLOW-NEXT: vblendps {{.*#+}} xmm1 = xmm3[0],xmm1[1],xmm3[2,3]
71 ; AVX512-SLOW-NEXT: vmovlps %xmm2, (%rsi)
72 ; AVX512-SLOW-NEXT: vmovlps %xmm0, (%rdx)
73 ; AVX512-SLOW-NEXT: vmovlps %xmm1, (%rcx)
74 ; AVX512-SLOW-NEXT: retq
76 ; AVX512-FAST-LABEL: load_i32_stride3_vf2:
77 ; AVX512-FAST: # %bb.0:
78 ; AVX512-FAST-NEXT: vmovdqa (%rdi), %xmm0
79 ; AVX512-FAST-NEXT: vmovdqa 16(%rdi), %xmm1
80 ; AVX512-FAST-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[0,3,2,3]
81 ; AVX512-FAST-NEXT: vpbroadcastq {{.*#+}} xmm3 = [5,0,5,0]
82 ; AVX512-FAST-NEXT: vpermi2d %xmm0, %xmm1, %xmm3
83 ; AVX512-FAST-NEXT: vpbroadcastd 8(%rdi), %xmm0
84 ; AVX512-FAST-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
85 ; AVX512-FAST-NEXT: vmovq %xmm2, (%rsi)
86 ; AVX512-FAST-NEXT: vmovq %xmm3, (%rdx)
87 ; AVX512-FAST-NEXT: vmovq %xmm0, (%rcx)
88 ; AVX512-FAST-NEXT: retq
89 %wide.vec = load <6 x i32>, ptr %in.vec, align 64
90 %strided.vec0 = shufflevector <6 x i32> %wide.vec, <6 x i32> poison, <2 x i32> <i32 0, i32 3>
91 %strided.vec1 = shufflevector <6 x i32> %wide.vec, <6 x i32> poison, <2 x i32> <i32 1, i32 4>
92 %strided.vec2 = shufflevector <6 x i32> %wide.vec, <6 x i32> poison, <2 x i32> <i32 2, i32 5>
93 store <2 x i32> %strided.vec0, ptr %out.vec0, align 64
94 store <2 x i32> %strided.vec1, ptr %out.vec1, align 64
95 store <2 x i32> %strided.vec2, ptr %out.vec2, align 64
99 define void @load_i32_stride3_vf4(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2) nounwind {
100 ; SSE-LABEL: load_i32_stride3_vf4:
102 ; SSE-NEXT: movdqa (%rdi), %xmm0
103 ; SSE-NEXT: movaps 16(%rdi), %xmm1
104 ; SSE-NEXT: movaps 32(%rdi), %xmm2
105 ; SSE-NEXT: movdqa %xmm0, %xmm3
106 ; SSE-NEXT: movaps %xmm1, %xmm4
107 ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm0[2,3,2,3]
108 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm1[0,0]
109 ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm1[1,1,1,1]
110 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm2[1,0]
111 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,3],xmm1[0,2]
112 ; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[3,1],xmm2[2,3]
113 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm4[0,2]
114 ; SSE-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1]
115 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,1],xmm2[0,3]
116 ; SSE-NEXT: movaps %xmm3, (%rsi)
117 ; SSE-NEXT: movaps %xmm0, (%rdx)
118 ; SSE-NEXT: movaps %xmm5, (%rcx)
121 ; AVX1-ONLY-LABEL: load_i32_stride3_vf4:
122 ; AVX1-ONLY: # %bb.0:
123 ; AVX1-ONLY-NEXT: vmovaps (%rdi), %xmm0
124 ; AVX1-ONLY-NEXT: vmovaps 16(%rdi), %xmm1
125 ; AVX1-ONLY-NEXT: vmovaps 32(%rdi), %xmm2
126 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm3 = xmm0[0],xmm2[1],xmm0[2,3]
127 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm3 = xmm3[0,1],xmm1[2],xmm3[3]
128 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm3 = xmm3[0,3,2,1]
129 ; AVX1-ONLY-NEXT: vmovaps 32(%rdi), %xmm4
130 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm4 = mem[0,1],xmm4[2,3]
131 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm4 = xmm1[0],xmm4[1,2],xmm1[3]
132 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm4 = xmm4[1,0,3,2]
133 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm0 = xmm2[0,1],xmm0[2],xmm2[3]
134 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
135 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,1,0,3]
136 ; AVX1-ONLY-NEXT: vmovaps %xmm3, (%rsi)
137 ; AVX1-ONLY-NEXT: vmovaps %xmm4, (%rdx)
138 ; AVX1-ONLY-NEXT: vmovaps %xmm0, (%rcx)
139 ; AVX1-ONLY-NEXT: retq
141 ; AVX2-ONLY-LABEL: load_i32_stride3_vf4:
142 ; AVX2-ONLY: # %bb.0:
143 ; AVX2-ONLY-NEXT: vmovaps (%rdi), %ymm0
144 ; AVX2-ONLY-NEXT: vmovaps 32(%rdi), %ymm1
145 ; AVX2-ONLY-NEXT: vmovaps {{.*#+}} xmm2 = [0,3,6,1]
146 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm3 = ymm0[0],ymm1[1],ymm0[2,3,4,5,6,7]
147 ; AVX2-ONLY-NEXT: vpermps %ymm3, %ymm2, %ymm2
148 ; AVX2-ONLY-NEXT: vmovaps {{.*#+}} xmm3 = [1,4,7,2]
149 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm4 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
150 ; AVX2-ONLY-NEXT: vpermps %ymm4, %ymm3, %ymm3
151 ; AVX2-ONLY-NEXT: vmovaps {{.*#+}} xmm4 = [2,5,0,3]
152 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3],ymm0[4,5,6,7]
153 ; AVX2-ONLY-NEXT: vpermps %ymm0, %ymm4, %ymm0
154 ; AVX2-ONLY-NEXT: vmovaps %xmm2, (%rsi)
155 ; AVX2-ONLY-NEXT: vmovaps %xmm3, (%rdx)
156 ; AVX2-ONLY-NEXT: vmovaps %xmm0, (%rcx)
157 ; AVX2-ONLY-NEXT: vzeroupper
158 ; AVX2-ONLY-NEXT: retq
160 ; AVX512-LABEL: load_i32_stride3_vf4:
162 ; AVX512-NEXT: vmovdqa {{.*#+}} xmm0 = [0,3,6,9]
163 ; AVX512-NEXT: vmovdqa (%rdi), %ymm1
164 ; AVX512-NEXT: vmovdqa 32(%rdi), %ymm2
165 ; AVX512-NEXT: vpermi2d %ymm2, %ymm1, %ymm0
166 ; AVX512-NEXT: vmovdqa {{.*#+}} xmm3 = [1,4,7,10]
167 ; AVX512-NEXT: vpermi2d %ymm2, %ymm1, %ymm3
168 ; AVX512-NEXT: vmovdqa {{.*#+}} xmm4 = [2,5,8,11]
169 ; AVX512-NEXT: vpermi2d %ymm2, %ymm1, %ymm4
170 ; AVX512-NEXT: vmovdqa %xmm0, (%rsi)
171 ; AVX512-NEXT: vmovdqa %xmm3, (%rdx)
172 ; AVX512-NEXT: vmovdqa %xmm4, (%rcx)
173 ; AVX512-NEXT: vzeroupper
175 %wide.vec = load <12 x i32>, ptr %in.vec, align 64
176 %strided.vec0 = shufflevector <12 x i32> %wide.vec, <12 x i32> poison, <4 x i32> <i32 0, i32 3, i32 6, i32 9>
177 %strided.vec1 = shufflevector <12 x i32> %wide.vec, <12 x i32> poison, <4 x i32> <i32 1, i32 4, i32 7, i32 10>
178 %strided.vec2 = shufflevector <12 x i32> %wide.vec, <12 x i32> poison, <4 x i32> <i32 2, i32 5, i32 8, i32 11>
179 store <4 x i32> %strided.vec0, ptr %out.vec0, align 64
180 store <4 x i32> %strided.vec1, ptr %out.vec1, align 64
181 store <4 x i32> %strided.vec2, ptr %out.vec2, align 64
185 define void @load_i32_stride3_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2) nounwind {
186 ; SSE-LABEL: load_i32_stride3_vf8:
188 ; SSE-NEXT: movaps 80(%rdi), %xmm1
189 ; SSE-NEXT: movaps 64(%rdi), %xmm5
190 ; SSE-NEXT: movdqa (%rdi), %xmm0
191 ; SSE-NEXT: movaps 16(%rdi), %xmm7
192 ; SSE-NEXT: movaps 32(%rdi), %xmm4
193 ; SSE-NEXT: movdqa 48(%rdi), %xmm2
194 ; SSE-NEXT: movdqa %xmm0, %xmm3
195 ; SSE-NEXT: movaps %xmm7, %xmm8
196 ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm0[2,3,2,3]
197 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm7[0,0]
198 ; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm7[1,1,1,1]
199 ; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[2,0],xmm4[1,0]
200 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,3],xmm7[0,2]
201 ; SSE-NEXT: movdqa %xmm2, %xmm7
202 ; SSE-NEXT: movaps %xmm5, %xmm10
203 ; SSE-NEXT: pshufd {{.*#+}} xmm11 = xmm2[2,3,2,3]
204 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,0],xmm5[0,0]
205 ; SSE-NEXT: pshufd {{.*#+}} xmm12 = xmm5[1,1,1,1]
206 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[2,0],xmm1[1,0]
207 ; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,3],xmm5[0,2]
208 ; SSE-NEXT: shufps {{.*#+}} xmm10 = xmm10[3,1],xmm1[2,3]
209 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm10[0,2]
210 ; SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[3,1],xmm4[2,3]
211 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm8[0,2]
212 ; SSE-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm9[0],xmm6[1],xmm9[1]
213 ; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,1],xmm4[0,3]
214 ; SSE-NEXT: punpckldq {{.*#+}} xmm11 = xmm11[0],xmm12[0],xmm11[1],xmm12[1]
215 ; SSE-NEXT: shufps {{.*#+}} xmm11 = xmm11[0,1],xmm1[0,3]
216 ; SSE-NEXT: movaps %xmm7, 16(%rsi)
217 ; SSE-NEXT: movaps %xmm3, (%rsi)
218 ; SSE-NEXT: movaps %xmm2, 16(%rdx)
219 ; SSE-NEXT: movaps %xmm0, (%rdx)
220 ; SSE-NEXT: movaps %xmm11, 16(%rcx)
221 ; SSE-NEXT: movaps %xmm6, (%rcx)
224 ; AVX1-ONLY-LABEL: load_i32_stride3_vf8:
225 ; AVX1-ONLY: # %bb.0:
226 ; AVX1-ONLY-NEXT: vmovaps 64(%rdi), %ymm0
227 ; AVX1-ONLY-NEXT: vmovaps 32(%rdi), %ymm1
228 ; AVX1-ONLY-NEXT: vmovaps (%rdi), %ymm2
229 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm3 = ymm2[0],ymm1[1],ymm2[2,3],ymm1[4],ymm2[5,6],ymm1[7]
230 ; AVX1-ONLY-NEXT: vmovaps 16(%rdi), %xmm4
231 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm4 = ymm4[2,1],ymm1[1,3],ymm4[6,5],ymm1[5,7]
232 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm3 = ymm3[0,3],ymm4[0,2],ymm3[4,7],ymm4[4,6]
233 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm0[2,3,0,1]
234 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm5 = ymm0[1,0],ymm4[2,0],ymm0[5,4],ymm4[6,4]
235 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm5 = ymm5[0,1,2,0,4,5,6,4]
236 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5],ymm5[6,7]
237 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm5 = ymm0[2,0],ymm4[3,0],ymm0[6,4],ymm4[7,4]
238 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm5 = ymm4[0,0],ymm5[2,0],ymm4[4,4],ymm5[6,4]
239 ; AVX1-ONLY-NEXT: vmovaps 16(%rdi), %xmm6
240 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm7 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7]
241 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm7 = ymm7[1,2],ymm6[0,3],ymm7[5,6],ymm6[4,7]
242 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm7 = ymm7[0,2,3,1,4,6,7,5]
243 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm5 = ymm7[0,1,2,3,4],ymm5[5,6,7]
244 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm1[0,1],ymm2[2],ymm1[3,4],ymm2[5],ymm1[6,7]
245 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm2 = ymm6[1,0],ymm2[2,0],ymm6[5,4],ymm2[6,4]
246 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm2[2,0],ymm1[0,3],ymm2[6,4],ymm1[4,7]
247 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm4[0,1],ymm0[0,3],ymm4[4,5],ymm0[4,7]
248 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5,6,7]
249 ; AVX1-ONLY-NEXT: vmovaps %ymm3, (%rsi)
250 ; AVX1-ONLY-NEXT: vmovaps %ymm5, (%rdx)
251 ; AVX1-ONLY-NEXT: vmovaps %ymm0, (%rcx)
252 ; AVX1-ONLY-NEXT: vzeroupper
253 ; AVX1-ONLY-NEXT: retq
255 ; AVX2-SLOW-LABEL: load_i32_stride3_vf8:
256 ; AVX2-SLOW: # %bb.0:
257 ; AVX2-SLOW-NEXT: vmovaps (%rdi), %ymm0
258 ; AVX2-SLOW-NEXT: vmovaps 32(%rdi), %ymm1
259 ; AVX2-SLOW-NEXT: vmovaps 64(%rdi), %ymm2
260 ; AVX2-SLOW-NEXT: vbroadcastsd {{.*#+}} ymm3 = [2,5,2,5,2,5,2,5]
261 ; AVX2-SLOW-NEXT: vpermps %ymm2, %ymm3, %ymm3
262 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm4 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7]
263 ; AVX2-SLOW-NEXT: vmovaps {{.*#+}} ymm5 = [0,3,6,1,4,7,u,u]
264 ; AVX2-SLOW-NEXT: vpermps %ymm4, %ymm5, %ymm4
265 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0,1,2,3,4,5],ymm3[6,7]
266 ; AVX2-SLOW-NEXT: vbroadcastf128 {{.*#+}} ymm4 = [0,0,3,6,0,0,3,6]
267 ; AVX2-SLOW-NEXT: # ymm4 = mem[0,1,0,1]
268 ; AVX2-SLOW-NEXT: vpermps %ymm2, %ymm4, %ymm4
269 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm5 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
270 ; AVX2-SLOW-NEXT: vmovaps {{.*#+}} ymm6 = [1,4,7,2,5,u,u,u]
271 ; AVX2-SLOW-NEXT: vpermps %ymm5, %ymm6, %ymm5
272 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0,1,2,3,4],ymm4[5,6,7]
273 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
274 ; AVX2-SLOW-NEXT: vmovaps {{.*#+}} ymm1 = [2,5,0,3,6,u,u,u]
275 ; AVX2-SLOW-NEXT: vpermps %ymm0, %ymm1, %ymm0
276 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm1 = ymm2[0,1,0,3,4,5,4,7]
277 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
278 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
279 ; AVX2-SLOW-NEXT: vmovaps %ymm3, (%rsi)
280 ; AVX2-SLOW-NEXT: vmovaps %ymm4, (%rdx)
281 ; AVX2-SLOW-NEXT: vmovaps %ymm0, (%rcx)
282 ; AVX2-SLOW-NEXT: vzeroupper
283 ; AVX2-SLOW-NEXT: retq
285 ; AVX2-FAST-LABEL: load_i32_stride3_vf8:
286 ; AVX2-FAST: # %bb.0:
287 ; AVX2-FAST-NEXT: vmovaps (%rdi), %ymm0
288 ; AVX2-FAST-NEXT: vmovaps 32(%rdi), %ymm1
289 ; AVX2-FAST-NEXT: vmovaps 64(%rdi), %ymm2
290 ; AVX2-FAST-NEXT: vbroadcastsd {{.*#+}} ymm3 = [2,5,2,5,2,5,2,5]
291 ; AVX2-FAST-NEXT: vpermps %ymm2, %ymm3, %ymm3
292 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm4 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7]
293 ; AVX2-FAST-NEXT: vmovaps {{.*#+}} ymm5 = [0,3,6,1,4,7,u,u]
294 ; AVX2-FAST-NEXT: vpermps %ymm4, %ymm5, %ymm4
295 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0,1,2,3,4,5],ymm3[6,7]
296 ; AVX2-FAST-NEXT: vbroadcastf128 {{.*#+}} ymm4 = [0,0,3,6,0,0,3,6]
297 ; AVX2-FAST-NEXT: # ymm4 = mem[0,1,0,1]
298 ; AVX2-FAST-NEXT: vpermps %ymm2, %ymm4, %ymm4
299 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm5 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
300 ; AVX2-FAST-NEXT: vmovaps {{.*#+}} ymm6 = [1,4,7,2,5,u,u,u]
301 ; AVX2-FAST-NEXT: vpermps %ymm5, %ymm6, %ymm5
302 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0,1,2,3,4],ymm4[5,6,7]
303 ; AVX2-FAST-NEXT: vbroadcastf128 {{.*#+}} ymm5 = [0,1,4,7,0,1,4,7]
304 ; AVX2-FAST-NEXT: # ymm5 = mem[0,1,0,1]
305 ; AVX2-FAST-NEXT: vpermps %ymm2, %ymm5, %ymm2
306 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
307 ; AVX2-FAST-NEXT: vmovaps {{.*#+}} ymm1 = [2,5,0,3,6,u,u,u]
308 ; AVX2-FAST-NEXT: vpermps %ymm0, %ymm1, %ymm0
309 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm2[5,6,7]
310 ; AVX2-FAST-NEXT: vmovaps %ymm3, (%rsi)
311 ; AVX2-FAST-NEXT: vmovaps %ymm4, (%rdx)
312 ; AVX2-FAST-NEXT: vmovaps %ymm0, (%rcx)
313 ; AVX2-FAST-NEXT: vzeroupper
314 ; AVX2-FAST-NEXT: retq
316 ; AVX2-FAST-PERLANE-LABEL: load_i32_stride3_vf8:
317 ; AVX2-FAST-PERLANE: # %bb.0:
318 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rdi), %ymm0
319 ; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%rdi), %ymm1
320 ; AVX2-FAST-PERLANE-NEXT: vmovaps 64(%rdi), %ymm2
321 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd {{.*#+}} ymm3 = [2,5,2,5,2,5,2,5]
322 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm2, %ymm3, %ymm3
323 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm4 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7]
324 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{.*#+}} ymm5 = [0,3,6,1,4,7,u,u]
325 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm4, %ymm5, %ymm4
326 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0,1,2,3,4,5],ymm3[6,7]
327 ; AVX2-FAST-PERLANE-NEXT: vbroadcastf128 {{.*#+}} ymm4 = [0,0,3,6,0,0,3,6]
328 ; AVX2-FAST-PERLANE-NEXT: # ymm4 = mem[0,1,0,1]
329 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm2, %ymm4, %ymm4
330 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm5 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
331 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{.*#+}} ymm6 = [1,4,7,2,5,u,u,u]
332 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm5, %ymm6, %ymm5
333 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0,1,2,3,4],ymm4[5,6,7]
334 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
335 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{.*#+}} ymm1 = [2,5,0,3,6,u,u,u]
336 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm0, %ymm1, %ymm0
337 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm1 = ymm2[0,1,0,3,4,5,4,7]
338 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
339 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
340 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm3, (%rsi)
341 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm4, (%rdx)
342 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, (%rcx)
343 ; AVX2-FAST-PERLANE-NEXT: vzeroupper
344 ; AVX2-FAST-PERLANE-NEXT: retq
346 ; AVX512-LABEL: load_i32_stride3_vf8:
348 ; AVX512-NEXT: vmovdqa64 (%rdi), %zmm0
349 ; AVX512-NEXT: vmovdqa64 64(%rdi), %zmm1
350 ; AVX512-NEXT: vmovdqa {{.*#+}} ymm2 = [0,3,6,9,12,15,18,21]
351 ; AVX512-NEXT: vpermi2d %zmm1, %zmm0, %zmm2
352 ; AVX512-NEXT: vmovdqa {{.*#+}} ymm3 = [1,4,7,10,13,16,19,22]
353 ; AVX512-NEXT: vpermi2d %zmm1, %zmm0, %zmm3
354 ; AVX512-NEXT: vmovdqa {{.*#+}} ymm4 = [2,5,8,11,14,17,20,23]
355 ; AVX512-NEXT: vpermi2d %zmm1, %zmm0, %zmm4
356 ; AVX512-NEXT: vmovdqa %ymm2, (%rsi)
357 ; AVX512-NEXT: vmovdqa %ymm3, (%rdx)
358 ; AVX512-NEXT: vmovdqa %ymm4, (%rcx)
359 ; AVX512-NEXT: vzeroupper
361 %wide.vec = load <24 x i32>, ptr %in.vec, align 64
362 %strided.vec0 = shufflevector <24 x i32> %wide.vec, <24 x i32> poison, <8 x i32> <i32 0, i32 3, i32 6, i32 9, i32 12, i32 15, i32 18, i32 21>
363 %strided.vec1 = shufflevector <24 x i32> %wide.vec, <24 x i32> poison, <8 x i32> <i32 1, i32 4, i32 7, i32 10, i32 13, i32 16, i32 19, i32 22>
364 %strided.vec2 = shufflevector <24 x i32> %wide.vec, <24 x i32> poison, <8 x i32> <i32 2, i32 5, i32 8, i32 11, i32 14, i32 17, i32 20, i32 23>
365 store <8 x i32> %strided.vec0, ptr %out.vec0, align 64
366 store <8 x i32> %strided.vec1, ptr %out.vec1, align 64
367 store <8 x i32> %strided.vec2, ptr %out.vec2, align 64
371 define void @load_i32_stride3_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2) nounwind {
372 ; SSE-LABEL: load_i32_stride3_vf16:
374 ; SSE-NEXT: movaps 96(%rdi), %xmm6
375 ; SSE-NEXT: movaps 128(%rdi), %xmm1
376 ; SSE-NEXT: movaps 112(%rdi), %xmm13
377 ; SSE-NEXT: movaps 144(%rdi), %xmm11
378 ; SSE-NEXT: movaps 176(%rdi), %xmm10
379 ; SSE-NEXT: movaps 160(%rdi), %xmm9
380 ; SSE-NEXT: movaps (%rdi), %xmm7
381 ; SSE-NEXT: movaps 16(%rdi), %xmm8
382 ; SSE-NEXT: movaps 32(%rdi), %xmm3
383 ; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
384 ; SSE-NEXT: movaps 48(%rdi), %xmm15
385 ; SSE-NEXT: movaps 80(%rdi), %xmm14
386 ; SSE-NEXT: movaps 64(%rdi), %xmm2
387 ; SSE-NEXT: movaps %xmm2, %xmm0
388 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm14[1,0]
389 ; SSE-NEXT: movaps %xmm15, %xmm5
390 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,3],xmm0[0,2]
391 ; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
392 ; SSE-NEXT: movaps %xmm8, %xmm0
393 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm3[1,0]
394 ; SSE-NEXT: movaps %xmm7, %xmm5
395 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,3],xmm0[0,2]
396 ; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
397 ; SSE-NEXT: movaps %xmm9, %xmm0
398 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm10[1,0]
399 ; SSE-NEXT: movaps %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
400 ; SSE-NEXT: movaps %xmm11, %xmm3
401 ; SSE-NEXT: movaps %xmm11, %xmm4
402 ; SSE-NEXT: movaps %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
403 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,3],xmm0[0,2]
404 ; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
405 ; SSE-NEXT: movaps %xmm13, %xmm0
406 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[1,0]
407 ; SSE-NEXT: movaps %xmm1, %xmm12
408 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
409 ; SSE-NEXT: movaps %xmm6, %xmm5
410 ; SSE-NEXT: movaps %xmm6, %xmm3
411 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,3],xmm0[0,2]
412 ; SSE-NEXT: movaps %xmm15, %xmm11
413 ; SSE-NEXT: shufps {{.*#+}} xmm11 = xmm11[1,0],xmm2[0,0]
414 ; SSE-NEXT: movaps %xmm2, %xmm0
415 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1],xmm14[2,3]
416 ; SSE-NEXT: shufps {{.*#+}} xmm11 = xmm11[0,2],xmm0[0,2]
417 ; SSE-NEXT: movaps %xmm4, %xmm6
418 ; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[1,0],xmm9[0,0]
419 ; SSE-NEXT: movaps %xmm9, %xmm0
420 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1],xmm10[2,3]
421 ; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,2],xmm0[0,2]
422 ; SSE-NEXT: movaps %xmm3, %xmm1
423 ; SSE-NEXT: movaps %xmm3, %xmm10
424 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,0],xmm13[0,0]
425 ; SSE-NEXT: movaps %xmm13, %xmm0
426 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1],xmm12[2,3]
427 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm0[0,2]
428 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm7[2,3,2,3]
429 ; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[1,0],xmm8[0,0]
430 ; SSE-NEXT: movaps %xmm8, %xmm12
431 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
432 ; SSE-NEXT: shufps {{.*#+}} xmm12 = xmm12[3,1],xmm4[2,3]
433 ; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,2],xmm12[0,2]
434 ; SSE-NEXT: pshufd {{.*#+}} xmm12 = xmm2[1,1,1,1]
435 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm15[2,3,2,3]
436 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm12[0],xmm2[1],xmm12[1]
437 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1],xmm14[0,3]
438 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm8[1,1,1,1]
439 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
440 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm4[0,3]
441 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm9[1,1,1,1]
442 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Folded Reload
443 ; SSE-NEXT: # xmm8 = mem[2,3,2,3]
444 ; SSE-NEXT: punpckldq {{.*#+}} xmm8 = xmm8[0],xmm3[0],xmm8[1],xmm3[1]
445 ; SSE-NEXT: shufps $196, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Folded Reload
446 ; SSE-NEXT: # xmm8 = xmm8[0,1],mem[0,3]
447 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm13[1,1,1,1]
448 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm10[2,3,2,3]
449 ; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
450 ; SSE-NEXT: shufps $196, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
451 ; SSE-NEXT: # xmm4 = xmm4[0,1],mem[0,3]
452 ; SSE-NEXT: movaps %xmm5, 32(%rsi)
453 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
454 ; SSE-NEXT: movaps %xmm3, 48(%rsi)
455 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
456 ; SSE-NEXT: movaps %xmm3, (%rsi)
457 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
458 ; SSE-NEXT: movaps %xmm3, 16(%rsi)
459 ; SSE-NEXT: movaps %xmm1, 32(%rdx)
460 ; SSE-NEXT: movaps %xmm6, 48(%rdx)
461 ; SSE-NEXT: movaps %xmm7, (%rdx)
462 ; SSE-NEXT: movaps %xmm11, 16(%rdx)
463 ; SSE-NEXT: movaps %xmm4, 32(%rcx)
464 ; SSE-NEXT: movaps %xmm8, 48(%rcx)
465 ; SSE-NEXT: movaps %xmm0, (%rcx)
466 ; SSE-NEXT: movaps %xmm2, 16(%rcx)
469 ; AVX1-ONLY-LABEL: load_i32_stride3_vf16:
470 ; AVX1-ONLY: # %bb.0:
471 ; AVX1-ONLY-NEXT: vmovaps 160(%rdi), %ymm0
472 ; AVX1-ONLY-NEXT: vmovaps 128(%rdi), %ymm1
473 ; AVX1-ONLY-NEXT: vmovaps 96(%rdi), %ymm2
474 ; AVX1-ONLY-NEXT: vmovaps 64(%rdi), %ymm3
475 ; AVX1-ONLY-NEXT: vmovaps 32(%rdi), %ymm4
476 ; AVX1-ONLY-NEXT: vmovaps (%rdi), %ymm6
477 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm5 = ymm6[0],ymm4[1],ymm6[2,3],ymm4[4],ymm6[5,6],ymm4[7]
478 ; AVX1-ONLY-NEXT: vmovaps 16(%rdi), %xmm7
479 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm7 = ymm7[2,1],ymm4[1,3],ymm7[6,5],ymm4[5,7]
480 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm5 = ymm5[0,3],ymm7[0,2],ymm5[4,7],ymm7[4,6]
481 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm7 = ymm3[2,3,0,1]
482 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm8 = ymm3[1,0],ymm7[2,0],ymm3[5,4],ymm7[6,4]
483 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm8 = ymm8[0,1,2,0,4,5,6,4]
484 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3,4,5],ymm8[6,7]
485 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm8 = ymm2[0],ymm1[1],ymm2[2,3],ymm1[4],ymm2[5,6],ymm1[7]
486 ; AVX1-ONLY-NEXT: vmovaps 112(%rdi), %xmm9
487 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm9 = ymm9[2,1],ymm1[1,3],ymm9[6,5],ymm1[5,7]
488 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm8 = ymm8[0,3],ymm9[0,2],ymm8[4,7],ymm9[4,6]
489 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm9 = ymm0[2,3,0,1]
490 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm10 = ymm0[1,0],ymm9[2,0],ymm0[5,4],ymm9[6,4]
491 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm10 = ymm10[0,1,2,0,4,5,6,4]
492 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1,2,3,4,5],ymm10[6,7]
493 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm10 = ymm3[2,0],ymm7[3,0],ymm3[6,4],ymm7[7,4]
494 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm10 = ymm7[0,0],ymm10[2,0],ymm7[4,4],ymm10[6,4]
495 ; AVX1-ONLY-NEXT: vmovaps 16(%rdi), %xmm11
496 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm12 = ymm6[0,1],ymm4[2],ymm6[3,4],ymm4[5],ymm6[6,7]
497 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm12 = ymm12[1,2],ymm11[0,3],ymm12[5,6],ymm11[4,7]
498 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm12 = ymm12[0,2,3,1,4,6,7,5]
499 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm10 = ymm12[0,1,2,3,4],ymm10[5,6,7]
500 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm12 = ymm0[2,0],ymm9[3,0],ymm0[6,4],ymm9[7,4]
501 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm12 = ymm9[0,0],ymm12[2,0],ymm9[4,4],ymm12[6,4]
502 ; AVX1-ONLY-NEXT: vmovaps 112(%rdi), %xmm13
503 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm14 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7]
504 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm14 = ymm14[1,2],ymm13[0,3],ymm14[5,6],ymm13[4,7]
505 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm14 = ymm14[0,2,3,1,4,6,7,5]
506 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm12 = ymm14[0,1,2,3,4],ymm12[5,6,7]
507 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm6 = ymm4[0,1],ymm6[2],ymm4[3,4],ymm6[5],ymm4[6,7]
508 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm6 = ymm11[1,0],ymm6[2,0],ymm11[5,4],ymm6[6,4]
509 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm4 = ymm6[2,0],ymm4[0,3],ymm6[6,4],ymm4[4,7]
510 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm3 = ymm7[0,1],ymm3[0,3],ymm7[4,5],ymm3[4,7]
511 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0,1,2,3,4],ymm3[5,6,7]
512 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm1[0,1],ymm2[2],ymm1[3,4],ymm2[5],ymm1[6,7]
513 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm2 = ymm13[1,0],ymm2[2,0],ymm13[5,4],ymm2[6,4]
514 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm2[2,0],ymm1[0,3],ymm2[6,4],ymm1[4,7]
515 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm9[0,1],ymm0[0,3],ymm9[4,5],ymm0[4,7]
516 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5,6,7]
517 ; AVX1-ONLY-NEXT: vmovaps %ymm8, 32(%rsi)
518 ; AVX1-ONLY-NEXT: vmovaps %ymm5, (%rsi)
519 ; AVX1-ONLY-NEXT: vmovaps %ymm12, 32(%rdx)
520 ; AVX1-ONLY-NEXT: vmovaps %ymm10, (%rdx)
521 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 32(%rcx)
522 ; AVX1-ONLY-NEXT: vmovaps %ymm3, (%rcx)
523 ; AVX1-ONLY-NEXT: vzeroupper
524 ; AVX1-ONLY-NEXT: retq
526 ; AVX2-SLOW-LABEL: load_i32_stride3_vf16:
527 ; AVX2-SLOW: # %bb.0:
528 ; AVX2-SLOW-NEXT: vmovaps 128(%rdi), %ymm1
529 ; AVX2-SLOW-NEXT: vmovaps 160(%rdi), %ymm0
530 ; AVX2-SLOW-NEXT: vmovaps (%rdi), %ymm2
531 ; AVX2-SLOW-NEXT: vmovaps 32(%rdi), %ymm3
532 ; AVX2-SLOW-NEXT: vmovaps 64(%rdi), %ymm4
533 ; AVX2-SLOW-NEXT: vmovaps 96(%rdi), %ymm5
534 ; AVX2-SLOW-NEXT: vbroadcastsd {{.*#+}} ymm6 = [2,5,2,5,2,5,2,5]
535 ; AVX2-SLOW-NEXT: vpermps %ymm4, %ymm6, %ymm7
536 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm8 = ymm2[0],ymm3[1],ymm2[2,3],ymm3[4],ymm2[5,6],ymm3[7]
537 ; AVX2-SLOW-NEXT: vmovaps {{.*#+}} ymm9 = [0,3,6,1,4,7,u,u]
538 ; AVX2-SLOW-NEXT: vpermps %ymm8, %ymm9, %ymm8
539 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm7 = ymm8[0,1,2,3,4,5],ymm7[6,7]
540 ; AVX2-SLOW-NEXT: vpermps %ymm0, %ymm6, %ymm6
541 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm8 = ymm5[0],ymm1[1],ymm5[2,3],ymm1[4],ymm5[5,6],ymm1[7]
542 ; AVX2-SLOW-NEXT: vpermps %ymm8, %ymm9, %ymm8
543 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm6 = ymm8[0,1,2,3,4,5],ymm6[6,7]
544 ; AVX2-SLOW-NEXT: vbroadcastf128 {{.*#+}} ymm8 = [0,0,3,6,0,0,3,6]
545 ; AVX2-SLOW-NEXT: # ymm8 = mem[0,1,0,1]
546 ; AVX2-SLOW-NEXT: vpermps %ymm4, %ymm8, %ymm9
547 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm10 = ymm2[0,1],ymm3[2],ymm2[3,4],ymm3[5],ymm2[6,7]
548 ; AVX2-SLOW-NEXT: vmovaps {{.*#+}} ymm11 = [1,4,7,2,5,u,u,u]
549 ; AVX2-SLOW-NEXT: vpermps %ymm10, %ymm11, %ymm10
550 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm9 = ymm10[0,1,2,3,4],ymm9[5,6,7]
551 ; AVX2-SLOW-NEXT: vpermps %ymm0, %ymm8, %ymm8
552 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm10 = ymm5[0,1],ymm1[2],ymm5[3,4],ymm1[5],ymm5[6,7]
553 ; AVX2-SLOW-NEXT: vpermps %ymm10, %ymm11, %ymm10
554 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm8 = ymm10[0,1,2,3,4],ymm8[5,6,7]
555 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1],ymm2[2],ymm3[3,4],ymm2[5],ymm3[6,7]
556 ; AVX2-SLOW-NEXT: vmovaps {{.*#+}} ymm3 = [2,5,0,3,6,u,u,u]
557 ; AVX2-SLOW-NEXT: vpermps %ymm2, %ymm3, %ymm2
558 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm4 = ymm4[0,1,0,3,4,5,4,7]
559 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,1,0,3]
560 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm4[5,6,7]
561 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm5[2],ymm1[3,4],ymm5[5],ymm1[6,7]
562 ; AVX2-SLOW-NEXT: vpermps %ymm1, %ymm3, %ymm1
563 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,1,0,3,4,5,4,7]
564 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,0,3]
565 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5,6,7]
566 ; AVX2-SLOW-NEXT: vmovaps %ymm6, 32(%rsi)
567 ; AVX2-SLOW-NEXT: vmovaps %ymm7, (%rsi)
568 ; AVX2-SLOW-NEXT: vmovaps %ymm8, 32(%rdx)
569 ; AVX2-SLOW-NEXT: vmovaps %ymm9, (%rdx)
570 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 32(%rcx)
571 ; AVX2-SLOW-NEXT: vmovaps %ymm2, (%rcx)
572 ; AVX2-SLOW-NEXT: vzeroupper
573 ; AVX2-SLOW-NEXT: retq
575 ; AVX2-FAST-LABEL: load_i32_stride3_vf16:
576 ; AVX2-FAST: # %bb.0:
577 ; AVX2-FAST-NEXT: vmovaps 128(%rdi), %ymm0
578 ; AVX2-FAST-NEXT: vmovaps 160(%rdi), %ymm1
579 ; AVX2-FAST-NEXT: vmovaps (%rdi), %ymm2
580 ; AVX2-FAST-NEXT: vmovaps 32(%rdi), %ymm3
581 ; AVX2-FAST-NEXT: vmovaps 64(%rdi), %ymm4
582 ; AVX2-FAST-NEXT: vmovaps 96(%rdi), %ymm5
583 ; AVX2-FAST-NEXT: vbroadcastsd {{.*#+}} ymm6 = [2,5,2,5,2,5,2,5]
584 ; AVX2-FAST-NEXT: vpermps %ymm4, %ymm6, %ymm7
585 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm8 = ymm2[0],ymm3[1],ymm2[2,3],ymm3[4],ymm2[5,6],ymm3[7]
586 ; AVX2-FAST-NEXT: vmovaps {{.*#+}} ymm9 = [0,3,6,1,4,7,u,u]
587 ; AVX2-FAST-NEXT: vpermps %ymm8, %ymm9, %ymm8
588 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm7 = ymm8[0,1,2,3,4,5],ymm7[6,7]
589 ; AVX2-FAST-NEXT: vpermps %ymm1, %ymm6, %ymm6
590 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm8 = ymm5[0],ymm0[1],ymm5[2,3],ymm0[4],ymm5[5,6],ymm0[7]
591 ; AVX2-FAST-NEXT: vpermps %ymm8, %ymm9, %ymm8
592 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm6 = ymm8[0,1,2,3,4,5],ymm6[6,7]
593 ; AVX2-FAST-NEXT: vbroadcastf128 {{.*#+}} ymm8 = [0,0,3,6,0,0,3,6]
594 ; AVX2-FAST-NEXT: # ymm8 = mem[0,1,0,1]
595 ; AVX2-FAST-NEXT: vpermps %ymm4, %ymm8, %ymm9
596 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm10 = ymm2[0,1],ymm3[2],ymm2[3,4],ymm3[5],ymm2[6,7]
597 ; AVX2-FAST-NEXT: vmovaps {{.*#+}} ymm11 = [1,4,7,2,5,u,u,u]
598 ; AVX2-FAST-NEXT: vpermps %ymm10, %ymm11, %ymm10
599 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm9 = ymm10[0,1,2,3,4],ymm9[5,6,7]
600 ; AVX2-FAST-NEXT: vpermps %ymm1, %ymm8, %ymm8
601 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm10 = ymm5[0,1],ymm0[2],ymm5[3,4],ymm0[5],ymm5[6,7]
602 ; AVX2-FAST-NEXT: vpermps %ymm10, %ymm11, %ymm10
603 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm8 = ymm10[0,1,2,3,4],ymm8[5,6,7]
604 ; AVX2-FAST-NEXT: vmovaps {{.*#+}} ymm10 = [0,1,0,3,0,1,4,7]
605 ; AVX2-FAST-NEXT: vpermps %ymm4, %ymm10, %ymm4
606 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1],ymm2[2],ymm3[3,4],ymm2[5],ymm3[6,7]
607 ; AVX2-FAST-NEXT: vmovaps {{.*#+}} ymm3 = [2,5,0,3,6,u,u,u]
608 ; AVX2-FAST-NEXT: vpermps %ymm2, %ymm3, %ymm2
609 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm4[5,6,7]
610 ; AVX2-FAST-NEXT: vpermps %ymm1, %ymm10, %ymm1
611 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm5[2],ymm0[3,4],ymm5[5],ymm0[6,7]
612 ; AVX2-FAST-NEXT: vpermps %ymm0, %ymm3, %ymm0
613 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
614 ; AVX2-FAST-NEXT: vmovaps %ymm6, 32(%rsi)
615 ; AVX2-FAST-NEXT: vmovaps %ymm7, (%rsi)
616 ; AVX2-FAST-NEXT: vmovaps %ymm8, 32(%rdx)
617 ; AVX2-FAST-NEXT: vmovaps %ymm9, (%rdx)
618 ; AVX2-FAST-NEXT: vmovaps %ymm0, 32(%rcx)
619 ; AVX2-FAST-NEXT: vmovaps %ymm2, (%rcx)
620 ; AVX2-FAST-NEXT: vzeroupper
621 ; AVX2-FAST-NEXT: retq
623 ; AVX2-FAST-PERLANE-LABEL: load_i32_stride3_vf16:
624 ; AVX2-FAST-PERLANE: # %bb.0:
625 ; AVX2-FAST-PERLANE-NEXT: vmovaps 128(%rdi), %ymm1
626 ; AVX2-FAST-PERLANE-NEXT: vmovaps 160(%rdi), %ymm0
627 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rdi), %ymm2
628 ; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%rdi), %ymm3
629 ; AVX2-FAST-PERLANE-NEXT: vmovaps 64(%rdi), %ymm4
630 ; AVX2-FAST-PERLANE-NEXT: vmovaps 96(%rdi), %ymm5
631 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd {{.*#+}} ymm6 = [2,5,2,5,2,5,2,5]
632 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm4, %ymm6, %ymm7
633 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm8 = ymm2[0],ymm3[1],ymm2[2,3],ymm3[4],ymm2[5,6],ymm3[7]
634 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{.*#+}} ymm9 = [0,3,6,1,4,7,u,u]
635 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm8, %ymm9, %ymm8
636 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm7 = ymm8[0,1,2,3,4,5],ymm7[6,7]
637 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm0, %ymm6, %ymm6
638 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm8 = ymm5[0],ymm1[1],ymm5[2,3],ymm1[4],ymm5[5,6],ymm1[7]
639 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm8, %ymm9, %ymm8
640 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm6 = ymm8[0,1,2,3,4,5],ymm6[6,7]
641 ; AVX2-FAST-PERLANE-NEXT: vbroadcastf128 {{.*#+}} ymm8 = [0,0,3,6,0,0,3,6]
642 ; AVX2-FAST-PERLANE-NEXT: # ymm8 = mem[0,1,0,1]
643 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm4, %ymm8, %ymm9
644 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm10 = ymm2[0,1],ymm3[2],ymm2[3,4],ymm3[5],ymm2[6,7]
645 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{.*#+}} ymm11 = [1,4,7,2,5,u,u,u]
646 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm10, %ymm11, %ymm10
647 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm9 = ymm10[0,1,2,3,4],ymm9[5,6,7]
648 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm0, %ymm8, %ymm8
649 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm10 = ymm5[0,1],ymm1[2],ymm5[3,4],ymm1[5],ymm5[6,7]
650 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm10, %ymm11, %ymm10
651 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm8 = ymm10[0,1,2,3,4],ymm8[5,6,7]
652 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1],ymm2[2],ymm3[3,4],ymm2[5],ymm3[6,7]
653 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{.*#+}} ymm3 = [2,5,0,3,6,u,u,u]
654 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm2, %ymm3, %ymm2
655 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm4 = ymm4[0,1,0,3,4,5,4,7]
656 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,1,0,3]
657 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm4[5,6,7]
658 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm5[2],ymm1[3,4],ymm5[5],ymm1[6,7]
659 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm1, %ymm3, %ymm1
660 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,1,0,3,4,5,4,7]
661 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,0,3]
662 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5,6,7]
663 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm6, 32(%rsi)
664 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm7, (%rsi)
665 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm8, 32(%rdx)
666 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm9, (%rdx)
667 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 32(%rcx)
668 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm2, (%rcx)
669 ; AVX2-FAST-PERLANE-NEXT: vzeroupper
670 ; AVX2-FAST-PERLANE-NEXT: retq
672 ; AVX512-LABEL: load_i32_stride3_vf16:
674 ; AVX512-NEXT: vmovdqa64 (%rdi), %zmm0
675 ; AVX512-NEXT: vmovdqa64 64(%rdi), %zmm1
676 ; AVX512-NEXT: vmovdqa64 128(%rdi), %zmm2
677 ; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,3,6,9,12,15,18,21,24,27,30,u,u,u,u,u]
678 ; AVX512-NEXT: vpermi2d %zmm1, %zmm0, %zmm3
679 ; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm4 = [0,1,2,3,4,5,6,7,8,9,10,17,20,23,26,29]
680 ; AVX512-NEXT: vpermi2d %zmm2, %zmm3, %zmm4
681 ; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm3 = [17,20,23,26,29,0,3,6,9,12,15,u,u,u,u,u]
682 ; AVX512-NEXT: vpermi2d %zmm0, %zmm1, %zmm3
683 ; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm5 = [0,1,2,3,4,5,6,7,8,9,10,18,21,24,27,30]
684 ; AVX512-NEXT: vpermi2d %zmm2, %zmm3, %zmm5
685 ; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm3 = [2,5,8,11,14,17,20,23,26,29,u,u,u,u,u,u]
686 ; AVX512-NEXT: vpermi2d %zmm1, %zmm0, %zmm3
687 ; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm0 = [0,1,2,3,4,5,6,7,8,9,16,19,22,25,28,31]
688 ; AVX512-NEXT: vpermi2d %zmm2, %zmm3, %zmm0
689 ; AVX512-NEXT: vmovdqa64 %zmm4, (%rsi)
690 ; AVX512-NEXT: vmovdqa64 %zmm5, (%rdx)
691 ; AVX512-NEXT: vmovdqa64 %zmm0, (%rcx)
692 ; AVX512-NEXT: vzeroupper
694 %wide.vec = load <48 x i32>, ptr %in.vec, align 64
695 %strided.vec0 = shufflevector <48 x i32> %wide.vec, <48 x i32> poison, <16 x i32> <i32 0, i32 3, i32 6, i32 9, i32 12, i32 15, i32 18, i32 21, i32 24, i32 27, i32 30, i32 33, i32 36, i32 39, i32 42, i32 45>
696 %strided.vec1 = shufflevector <48 x i32> %wide.vec, <48 x i32> poison, <16 x i32> <i32 1, i32 4, i32 7, i32 10, i32 13, i32 16, i32 19, i32 22, i32 25, i32 28, i32 31, i32 34, i32 37, i32 40, i32 43, i32 46>
697 %strided.vec2 = shufflevector <48 x i32> %wide.vec, <48 x i32> poison, <16 x i32> <i32 2, i32 5, i32 8, i32 11, i32 14, i32 17, i32 20, i32 23, i32 26, i32 29, i32 32, i32 35, i32 38, i32 41, i32 44, i32 47>
698 store <16 x i32> %strided.vec0, ptr %out.vec0, align 64
699 store <16 x i32> %strided.vec1, ptr %out.vec1, align 64
700 store <16 x i32> %strided.vec2, ptr %out.vec2, align 64
704 define void @load_i32_stride3_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2) nounwind {
705 ; SSE-LABEL: load_i32_stride3_vf32:
707 ; SSE-NEXT: subq $392, %rsp # imm = 0x188
708 ; SSE-NEXT: movaps 192(%rdi), %xmm4
709 ; SSE-NEXT: movaps 224(%rdi), %xmm3
710 ; SSE-NEXT: movaps 208(%rdi), %xmm14
711 ; SSE-NEXT: movaps %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
712 ; SSE-NEXT: movaps 240(%rdi), %xmm7
713 ; SSE-NEXT: movaps 272(%rdi), %xmm6
714 ; SSE-NEXT: movaps 256(%rdi), %xmm9
715 ; SSE-NEXT: movaps (%rdi), %xmm13
716 ; SSE-NEXT: movaps %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
717 ; SSE-NEXT: movaps 16(%rdi), %xmm8
718 ; SSE-NEXT: movaps %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
719 ; SSE-NEXT: movaps 32(%rdi), %xmm11
720 ; SSE-NEXT: movaps %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
721 ; SSE-NEXT: movaps 48(%rdi), %xmm2
722 ; SSE-NEXT: movaps 80(%rdi), %xmm1
723 ; SSE-NEXT: movaps 64(%rdi), %xmm5
724 ; SSE-NEXT: movaps %xmm5, %xmm0
725 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[1,0]
726 ; SSE-NEXT: movaps %xmm1, %xmm12
727 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
728 ; SSE-NEXT: movaps %xmm2, %xmm1
729 ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
730 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm0[0,2]
731 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
732 ; SSE-NEXT: movaps %xmm9, %xmm0
733 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm6[1,0]
734 ; SSE-NEXT: movaps %xmm6, %xmm10
735 ; SSE-NEXT: movaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
736 ; SSE-NEXT: movaps %xmm7, %xmm1
737 ; SSE-NEXT: movaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
738 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm0[0,2]
739 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
740 ; SSE-NEXT: movaps %xmm8, %xmm0
741 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm11[1,0]
742 ; SSE-NEXT: movaps %xmm13, %xmm1
743 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm0[0,2]
744 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
745 ; SSE-NEXT: movaps %xmm14, %xmm0
746 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm3[1,0]
747 ; SSE-NEXT: movaps %xmm3, %xmm13
748 ; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
749 ; SSE-NEXT: movaps %xmm4, %xmm1
750 ; SSE-NEXT: movaps %xmm4, %xmm11
751 ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
752 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm0[0,2]
753 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
754 ; SSE-NEXT: movaps 176(%rdi), %xmm1
755 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
756 ; SSE-NEXT: movaps 160(%rdi), %xmm0
757 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
758 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[1,0]
759 ; SSE-NEXT: movaps 144(%rdi), %xmm3
760 ; SSE-NEXT: movaps %xmm3, %xmm1
761 ; SSE-NEXT: movaps %xmm3, (%rsp) # 16-byte Spill
762 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm0[0,2]
763 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
764 ; SSE-NEXT: movaps 368(%rdi), %xmm1
765 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
766 ; SSE-NEXT: movaps 352(%rdi), %xmm0
767 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
768 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[1,0]
769 ; SSE-NEXT: movaps 336(%rdi), %xmm4
770 ; SSE-NEXT: movaps %xmm4, %xmm1
771 ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
772 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm0[0,2]
773 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
774 ; SSE-NEXT: movaps 128(%rdi), %xmm1
775 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
776 ; SSE-NEXT: movaps 112(%rdi), %xmm15
777 ; SSE-NEXT: movaps %xmm15, %xmm0
778 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[1,0]
779 ; SSE-NEXT: movaps 96(%rdi), %xmm1
780 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
781 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm0[0,2]
782 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
783 ; SSE-NEXT: movaps 320(%rdi), %xmm1
784 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
785 ; SSE-NEXT: movaps 304(%rdi), %xmm6
786 ; SSE-NEXT: movaps %xmm6, %xmm0
787 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[1,0]
788 ; SSE-NEXT: movaps 288(%rdi), %xmm8
789 ; SSE-NEXT: movaps %xmm8, %xmm1
790 ; SSE-NEXT: movaps %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
791 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm0[0,2]
792 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
793 ; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
794 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,0],xmm5[0,0]
795 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[3,1],xmm12[2,3]
796 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm5[0,2]
797 ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
798 ; SSE-NEXT: movaps %xmm7, %xmm14
799 ; SSE-NEXT: movaps %xmm9, %xmm0
800 ; SSE-NEXT: movaps %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
801 ; SSE-NEXT: shufps {{.*#+}} xmm14 = xmm14[1,0],xmm9[0,0]
802 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1],xmm10[2,3]
803 ; SSE-NEXT: shufps {{.*#+}} xmm14 = xmm14[0,2],xmm0[0,2]
804 ; SSE-NEXT: movaps %xmm11, %xmm10
805 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
806 ; SSE-NEXT: shufps {{.*#+}} xmm10 = xmm10[1,0],xmm0[0,0]
807 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1],xmm13[2,3]
808 ; SSE-NEXT: shufps {{.*#+}} xmm10 = xmm10[0,2],xmm0[0,2]
809 ; SSE-NEXT: movaps %xmm3, %xmm9
810 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
811 ; SSE-NEXT: shufps {{.*#+}} xmm9 = xmm9[1,0],xmm0[0,0]
812 ; SSE-NEXT: shufps $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
813 ; SSE-NEXT: # xmm0 = xmm0[3,1],mem[2,3]
814 ; SSE-NEXT: shufps {{.*#+}} xmm9 = xmm9[0,2],xmm0[0,2]
815 ; SSE-NEXT: movaps %xmm4, %xmm7
816 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
817 ; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[1,0],xmm0[0,0]
818 ; SSE-NEXT: shufps $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
819 ; SSE-NEXT: # xmm0 = xmm0[3,1],mem[2,3]
820 ; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,2],xmm0[0,2]
821 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
822 ; SSE-NEXT: movaps %xmm4, %xmm11
823 ; SSE-NEXT: shufps {{.*#+}} xmm11 = xmm11[1,0],xmm15[0,0]
824 ; SSE-NEXT: movaps %xmm15, %xmm0
825 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
826 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1],xmm12[2,3]
827 ; SSE-NEXT: shufps {{.*#+}} xmm11 = xmm11[0,2],xmm0[0,2]
828 ; SSE-NEXT: movaps %xmm8, %xmm3
829 ; SSE-NEXT: movaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
830 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,0],xmm6[0,0]
831 ; SSE-NEXT: shufps $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
832 ; SSE-NEXT: # xmm6 = xmm6[3,1],mem[2,3]
833 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,2],xmm6[0,2]
834 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
835 ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm2[2,3,2,3]
836 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
837 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,0],xmm0[0,0]
838 ; SSE-NEXT: movaps %xmm0, %xmm13
839 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
840 ; SSE-NEXT: shufps {{.*#+}} xmm13 = xmm13[3,1],xmm1[2,3]
841 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm13[0,2]
842 ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
843 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Folded Reload
844 ; SSE-NEXT: # xmm13 = mem[1,1,1,1]
845 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Folded Reload
846 ; SSE-NEXT: # xmm8 = mem[2,3,2,3]
847 ; SSE-NEXT: punpckldq {{.*#+}} xmm8 = xmm8[0],xmm13[0],xmm8[1],xmm13[1]
848 ; SSE-NEXT: shufps $196, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Folded Reload
849 ; SSE-NEXT: # xmm8 = xmm8[0,1],mem[0,3]
850 ; SSE-NEXT: pshufd {{.*#+}} xmm13 = xmm0[1,1,1,1]
851 ; SSE-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm13[0],xmm5[1],xmm13[1]
852 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,1],xmm1[0,3]
853 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Folded Reload
854 ; SSE-NEXT: # xmm13 = mem[1,1,1,1]
855 ; SSE-NEXT: pshufd $238, (%rsp), %xmm6 # 16-byte Folded Reload
856 ; SSE-NEXT: # xmm6 = mem[2,3,2,3]
857 ; SSE-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm13[0],xmm6[1],xmm13[1]
858 ; SSE-NEXT: shufps $196, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
859 ; SSE-NEXT: # xmm6 = xmm6[0,1],mem[0,3]
860 ; SSE-NEXT: pshufd {{.*#+}} xmm13 = xmm15[1,1,1,1]
861 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[2,3,2,3]
862 ; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm13[0],xmm4[1],xmm13[1]
863 ; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,1],xmm12[0,3]
864 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Folded Reload
865 ; SSE-NEXT: # xmm15 = mem[1,1,1,1]
866 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Folded Reload
867 ; SSE-NEXT: # xmm13 = mem[2,3,2,3]
868 ; SSE-NEXT: punpckldq {{.*#+}} xmm13 = xmm13[0],xmm15[0],xmm13[1],xmm15[1]
869 ; SSE-NEXT: shufps $196, {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Folded Reload
870 ; SSE-NEXT: # xmm13 = xmm13[0,1],mem[0,3]
871 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
872 ; SSE-NEXT: # xmm0 = mem[1,1,1,1]
873 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Folded Reload
874 ; SSE-NEXT: # xmm15 = mem[2,3,2,3]
875 ; SSE-NEXT: punpckldq {{.*#+}} xmm15 = xmm15[0],xmm0[0],xmm15[1],xmm0[1]
876 ; SSE-NEXT: shufps $196, {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Folded Reload
877 ; SSE-NEXT: # xmm15 = xmm15[0,1],mem[0,3]
878 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
879 ; SSE-NEXT: # xmm0 = mem[1,1,1,1]
880 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
881 ; SSE-NEXT: # xmm1 = mem[2,3,2,3]
882 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
883 ; SSE-NEXT: shufps $196, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
884 ; SSE-NEXT: # xmm1 = xmm1[0,1],mem[0,3]
885 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
886 ; SSE-NEXT: # xmm2 = mem[1,1,1,1]
887 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
888 ; SSE-NEXT: # xmm0 = mem[2,3,2,3]
889 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
890 ; SSE-NEXT: shufps $196, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
891 ; SSE-NEXT: # xmm0 = xmm0[0,1],mem[0,3]
892 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
893 ; SSE-NEXT: movaps %xmm2, 96(%rsi)
894 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
895 ; SSE-NEXT: movaps %xmm2, 32(%rsi)
896 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
897 ; SSE-NEXT: movaps %xmm2, 112(%rsi)
898 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
899 ; SSE-NEXT: movaps %xmm2, 48(%rsi)
900 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
901 ; SSE-NEXT: movaps %xmm2, 64(%rsi)
902 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
903 ; SSE-NEXT: movaps %xmm2, (%rsi)
904 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
905 ; SSE-NEXT: movaps %xmm2, 80(%rsi)
906 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
907 ; SSE-NEXT: movaps %xmm2, 16(%rsi)
908 ; SSE-NEXT: movaps %xmm3, 96(%rdx)
909 ; SSE-NEXT: movaps %xmm11, 32(%rdx)
910 ; SSE-NEXT: movaps %xmm7, 112(%rdx)
911 ; SSE-NEXT: movaps %xmm9, 48(%rdx)
912 ; SSE-NEXT: movaps %xmm10, 64(%rdx)
913 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
914 ; SSE-NEXT: movaps %xmm2, (%rdx)
915 ; SSE-NEXT: movaps %xmm14, 80(%rdx)
916 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
917 ; SSE-NEXT: movaps %xmm2, 16(%rdx)
918 ; SSE-NEXT: movaps %xmm0, 96(%rcx)
919 ; SSE-NEXT: movaps %xmm1, 112(%rcx)
920 ; SSE-NEXT: movaps %xmm15, 64(%rcx)
921 ; SSE-NEXT: movaps %xmm13, 80(%rcx)
922 ; SSE-NEXT: movaps %xmm4, 32(%rcx)
923 ; SSE-NEXT: movaps %xmm6, 48(%rcx)
924 ; SSE-NEXT: movaps %xmm5, (%rcx)
925 ; SSE-NEXT: movaps %xmm8, 16(%rcx)
926 ; SSE-NEXT: addq $392, %rsp # imm = 0x188
929 ; AVX1-ONLY-LABEL: load_i32_stride3_vf32:
930 ; AVX1-ONLY: # %bb.0:
931 ; AVX1-ONLY-NEXT: subq $392, %rsp # imm = 0x188
932 ; AVX1-ONLY-NEXT: vmovaps 256(%rdi), %ymm2
933 ; AVX1-ONLY-NEXT: vmovaps 224(%rdi), %ymm7
934 ; AVX1-ONLY-NEXT: vmovaps 192(%rdi), %ymm3
935 ; AVX1-ONLY-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
936 ; AVX1-ONLY-NEXT: vmovaps 352(%rdi), %ymm4
937 ; AVX1-ONLY-NEXT: vmovaps 320(%rdi), %ymm5
938 ; AVX1-ONLY-NEXT: vmovaps 288(%rdi), %ymm6
939 ; AVX1-ONLY-NEXT: vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
940 ; AVX1-ONLY-NEXT: vmovaps 160(%rdi), %ymm10
941 ; AVX1-ONLY-NEXT: vmovaps 128(%rdi), %ymm9
942 ; AVX1-ONLY-NEXT: vmovaps 96(%rdi), %ymm0
943 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
944 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm9[1],ymm0[2,3],ymm9[4],ymm0[5,6],ymm9[7]
945 ; AVX1-ONLY-NEXT: vmovaps 112(%rdi), %xmm1
946 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[2,1],ymm9[1,3],ymm1[6,5],ymm9[5,7]
947 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,3],ymm1[0,2],ymm0[4,7],ymm1[4,6]
948 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm11 = ymm10[2,3,0,1]
949 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm10[1,0],ymm11[2,0],ymm10[5,4],ymm11[6,4]
950 ; AVX1-ONLY-NEXT: vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
951 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,1,2,0,4,5,6,4]
952 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
953 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
954 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm6[0],ymm5[1],ymm6[2,3],ymm5[4],ymm6[5,6],ymm5[7]
955 ; AVX1-ONLY-NEXT: vmovaps 304(%rdi), %xmm1
956 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[2,1],ymm5[1,3],ymm1[6,5],ymm5[5,7]
957 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,3],ymm1[0,2],ymm0[4,7],ymm1[4,6]
958 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm14 = ymm4[2,3,0,1]
959 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm4[1,0],ymm14[2,0],ymm4[5,4],ymm14[6,4]
960 ; AVX1-ONLY-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
961 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,1,2,0,4,5,6,4]
962 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
963 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
964 ; AVX1-ONLY-NEXT: vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
965 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm3[0],ymm7[1],ymm3[2,3],ymm7[4],ymm3[5,6],ymm7[7]
966 ; AVX1-ONLY-NEXT: vmovaps 208(%rdi), %xmm1
967 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[2,1],ymm7[1,3],ymm1[6,5],ymm7[5,7]
968 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,3],ymm1[0,2],ymm0[4,7],ymm1[4,6]
969 ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
970 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm12 = ymm2[2,3,0,1]
971 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm2[1,0],ymm12[2,0],ymm2[5,4],ymm12[6,4]
972 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,1,2,0,4,5,6,4]
973 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
974 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
975 ; AVX1-ONLY-NEXT: vmovaps 32(%rdi), %ymm15
976 ; AVX1-ONLY-NEXT: vmovaps 16(%rdi), %xmm0
977 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,1],ymm15[1,3],ymm0[6,5],ymm15[5,7]
978 ; AVX1-ONLY-NEXT: vmovaps (%rdi), %ymm2
979 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0],ymm15[1],ymm2[2,3],ymm15[4],ymm2[5,6],ymm15[7]
980 ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
981 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm13 = ymm1[0,3],ymm0[0,2],ymm1[4,7],ymm0[4,6]
982 ; AVX1-ONLY-NEXT: vmovaps 64(%rdi), %ymm7
983 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm7[2,3,0,1]
984 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm8 = ymm7[1,0],ymm0[2,0],ymm7[5,4],ymm0[6,4]
985 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm8 = ymm8[0,1,2,0,4,5,6,4]
986 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm13[0,1,2,3,4,5],ymm8[6,7]
987 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
988 ; AVX1-ONLY-NEXT: vmovups %ymm11, (%rsp) # 32-byte Spill
989 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm8 = ymm10[2,0],ymm11[3,0],ymm10[6,4],ymm11[7,4]
990 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm8 = ymm11[0,0],ymm8[2,0],ymm11[4,4],ymm8[6,4]
991 ; AVX1-ONLY-NEXT: vmovaps 112(%rdi), %xmm13
992 ; AVX1-ONLY-NEXT: vblendps $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm6 # 32-byte Folded Reload
993 ; AVX1-ONLY-NEXT: # ymm6 = mem[0,1],ymm9[2],mem[3,4],ymm9[5],mem[6,7]
994 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm6 = ymm6[1,2],ymm13[0,3],ymm6[5,6],ymm13[4,7]
995 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm6 = ymm6[0,2,3,1,4,6,7,5]
996 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm6[0,1,2,3,4],ymm8[5,6,7]
997 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
998 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm6 = ymm4[2,0],ymm14[3,0],ymm4[6,4],ymm14[7,4]
999 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm6 = ymm14[0,0],ymm6[2,0],ymm14[4,4],ymm6[6,4]
1000 ; AVX1-ONLY-NEXT: vmovaps 304(%rdi), %xmm8
1001 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
1002 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm10 = ymm3[0,1],ymm5[2],ymm3[3,4],ymm5[5],ymm3[6,7]
1003 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm10 = ymm10[1,2],ymm8[0,3],ymm10[5,6],ymm8[4,7]
1004 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm10 = ymm10[0,2,3,1,4,6,7,5]
1005 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm10[0,1,2,3,4],ymm6[5,6,7]
1006 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1007 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm6 = ymm7[2,0],ymm0[3,0],ymm7[6,4],ymm0[7,4]
1008 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm6 = ymm0[0,0],ymm6[2,0],ymm0[4,4],ymm6[6,4]
1009 ; AVX1-ONLY-NEXT: vmovaps 16(%rdi), %xmm11
1010 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm4 = ymm2[0,1],ymm15[2],ymm2[3,4],ymm15[5],ymm2[6,7]
1011 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm4 = ymm4[1,2],ymm11[0,3],ymm4[5,6],ymm11[4,7]
1012 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm4 = ymm4[0,2,3,1,4,6,7,5]
1013 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm4[0,1,2,3,4],ymm6[5,6,7]
1014 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1015 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
1016 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm4 = ymm1[2,0],ymm12[3,0],ymm1[6,4],ymm12[7,4]
1017 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm4 = ymm12[0,0],ymm4[2,0],ymm12[4,4],ymm4[6,4]
1018 ; AVX1-ONLY-NEXT: vmovaps 208(%rdi), %xmm10
1019 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
1020 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
1021 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm6 = ymm1[0,1],ymm2[2],ymm1[3,4],ymm2[5],ymm1[6,7]
1022 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm6 = ymm6[1,2],ymm10[0,3],ymm6[5,6],ymm10[4,7]
1023 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm6 = ymm6[0,2,3,1,4,6,7,5]
1024 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm4 = ymm6[0,1,2,3,4],ymm4[5,6,7]
1025 ; AVX1-ONLY-NEXT: vblendps $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm6 # 32-byte Folded Reload
1026 ; AVX1-ONLY-NEXT: # ymm6 = ymm9[0,1],mem[2],ymm9[3,4],mem[5],ymm9[6,7]
1027 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm6 = ymm13[1,0],ymm6[2,0],ymm13[5,4],ymm6[6,4]
1028 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm6 = ymm6[2,0],ymm9[0,3],ymm6[6,4],ymm9[4,7]
1029 ; AVX1-ONLY-NEXT: vmovups (%rsp), %ymm9 # 32-byte Reload
1030 ; AVX1-ONLY-NEXT: vshufps $196, {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm9 # 32-byte Folded Reload
1031 ; AVX1-ONLY-NEXT: # ymm9 = ymm9[0,1],mem[0,3],ymm9[4,5],mem[4,7]
1032 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3,4],ymm9[5,6,7]
1033 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm9 = ymm5[0,1],ymm3[2],ymm5[3,4],ymm3[5],ymm5[6,7]
1034 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm8 = ymm8[1,0],ymm9[2,0],ymm8[5,4],ymm9[6,4]
1035 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm5 = ymm8[2,0],ymm5[0,3],ymm8[6,4],ymm5[4,7]
1036 ; AVX1-ONLY-NEXT: vshufps $196, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm8 # 32-byte Folded Reload
1037 ; AVX1-ONLY-NEXT: # ymm8 = ymm14[0,1],mem[0,3],ymm14[4,5],mem[4,7]
1038 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3,4],ymm8[5,6,7]
1039 ; AVX1-ONLY-NEXT: vblendps $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm3 # 32-byte Folded Reload
1040 ; AVX1-ONLY-NEXT: # ymm3 = ymm15[0,1],mem[2],ymm15[3,4],mem[5],ymm15[6,7]
1041 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm3 = ymm11[1,0],ymm3[2,0],ymm11[5,4],ymm3[6,4]
1042 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm3 = ymm3[2,0],ymm15[0,3],ymm3[6,4],ymm15[4,7]
1043 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,1],ymm7[0,3],ymm0[4,5],ymm7[4,7]
1044 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm3[0,1,2,3,4],ymm0[5,6,7]
1045 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7]
1046 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm10[1,0],ymm1[2,0],ymm10[5,4],ymm1[6,4]
1047 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[2,0],ymm2[0,3],ymm1[6,4],ymm2[4,7]
1048 ; AVX1-ONLY-NEXT: vshufps $196, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm2 # 32-byte Folded Reload
1049 ; AVX1-ONLY-NEXT: # ymm2 = ymm12[0,1],mem[0,3],ymm12[4,5],mem[4,7]
1050 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5,6,7]
1051 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
1052 ; AVX1-ONLY-NEXT: vmovaps %ymm2, (%rsi)
1053 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
1054 ; AVX1-ONLY-NEXT: vmovaps %ymm2, 64(%rsi)
1055 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
1056 ; AVX1-ONLY-NEXT: vmovaps %ymm2, 96(%rsi)
1057 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
1058 ; AVX1-ONLY-NEXT: vmovaps %ymm2, 32(%rsi)
1059 ; AVX1-ONLY-NEXT: vmovaps %ymm4, 64(%rdx)
1060 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
1061 ; AVX1-ONLY-NEXT: vmovaps %ymm2, (%rdx)
1062 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
1063 ; AVX1-ONLY-NEXT: vmovaps %ymm2, 96(%rdx)
1064 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
1065 ; AVX1-ONLY-NEXT: vmovaps %ymm2, 32(%rdx)
1066 ; AVX1-ONLY-NEXT: vmovaps %ymm1, 64(%rcx)
1067 ; AVX1-ONLY-NEXT: vmovaps %ymm0, (%rcx)
1068 ; AVX1-ONLY-NEXT: vmovaps %ymm5, 96(%rcx)
1069 ; AVX1-ONLY-NEXT: vmovaps %ymm6, 32(%rcx)
1070 ; AVX1-ONLY-NEXT: addq $392, %rsp # imm = 0x188
1071 ; AVX1-ONLY-NEXT: vzeroupper
1072 ; AVX1-ONLY-NEXT: retq
1074 ; AVX2-SLOW-LABEL: load_i32_stride3_vf32:
1075 ; AVX2-SLOW: # %bb.0:
1076 ; AVX2-SLOW-NEXT: subq $136, %rsp
1077 ; AVX2-SLOW-NEXT: vmovaps 224(%rdi), %ymm0
1078 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1079 ; AVX2-SLOW-NEXT: vmovaps 320(%rdi), %ymm4
1080 ; AVX2-SLOW-NEXT: vmovaps 288(%rdi), %ymm7
1081 ; AVX2-SLOW-NEXT: vmovaps 352(%rdi), %ymm5
1082 ; AVX2-SLOW-NEXT: vmovaps 128(%rdi), %ymm10
1083 ; AVX2-SLOW-NEXT: vmovaps (%rdi), %ymm12
1084 ; AVX2-SLOW-NEXT: vmovaps 32(%rdi), %ymm6
1085 ; AVX2-SLOW-NEXT: vmovaps 64(%rdi), %ymm3
1086 ; AVX2-SLOW-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1087 ; AVX2-SLOW-NEXT: vmovaps 96(%rdi), %ymm13
1088 ; AVX2-SLOW-NEXT: vmovaps 160(%rdi), %ymm11
1089 ; AVX2-SLOW-NEXT: vbroadcastsd {{.*#+}} ymm1 = [2,5,2,5,2,5,2,5]
1090 ; AVX2-SLOW-NEXT: vpermps %ymm11, %ymm1, %ymm8
1091 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm9 = ymm13[0],ymm10[1],ymm13[2,3],ymm10[4],ymm13[5,6],ymm10[7]
1092 ; AVX2-SLOW-NEXT: vmovaps {{.*#+}} ymm14 = [0,3,6,1,4,7,u,u]
1093 ; AVX2-SLOW-NEXT: vpermps %ymm9, %ymm14, %ymm9
1094 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm9[0,1,2,3,4,5],ymm8[6,7]
1095 ; AVX2-SLOW-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1096 ; AVX2-SLOW-NEXT: vpermps %ymm5, %ymm1, %ymm8
1097 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm9 = ymm7[0],ymm4[1],ymm7[2,3],ymm4[4],ymm7[5,6],ymm4[7]
1098 ; AVX2-SLOW-NEXT: vpermps %ymm9, %ymm14, %ymm9
1099 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm9[0,1,2,3,4,5],ymm8[6,7]
1100 ; AVX2-SLOW-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1101 ; AVX2-SLOW-NEXT: vpermps %ymm3, %ymm1, %ymm8
1102 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm9 = ymm12[0],ymm6[1],ymm12[2,3],ymm6[4],ymm12[5,6],ymm6[7]
1103 ; AVX2-SLOW-NEXT: vmovaps %ymm12, %ymm3
1104 ; AVX2-SLOW-NEXT: vpermps %ymm9, %ymm14, %ymm9
1105 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm9[0,1,2,3,4,5],ymm8[6,7]
1106 ; AVX2-SLOW-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1107 ; AVX2-SLOW-NEXT: vmovaps 192(%rdi), %ymm12
1108 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm8 = ymm12[0],ymm0[1],ymm12[2,3],ymm0[4],ymm12[5,6],ymm0[7]
1109 ; AVX2-SLOW-NEXT: vpermps %ymm8, %ymm14, %ymm8
1110 ; AVX2-SLOW-NEXT: vmovaps 256(%rdi), %ymm15
1111 ; AVX2-SLOW-NEXT: vpermps %ymm15, %ymm1, %ymm1
1112 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm8[0,1,2,3,4,5],ymm1[6,7]
1113 ; AVX2-SLOW-NEXT: vmovups %ymm0, (%rsp) # 32-byte Spill
1114 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm13[0,1],ymm10[2],ymm13[3,4],ymm10[5],ymm13[6,7]
1115 ; AVX2-SLOW-NEXT: vmovaps {{.*#+}} ymm2 = [1,4,7,2,5,u,u,u]
1116 ; AVX2-SLOW-NEXT: vpermps %ymm1, %ymm2, %ymm1
1117 ; AVX2-SLOW-NEXT: vbroadcastf128 {{.*#+}} ymm0 = [0,0,3,6,0,0,3,6]
1118 ; AVX2-SLOW-NEXT: # ymm0 = mem[0,1,0,1]
1119 ; AVX2-SLOW-NEXT: vpermps %ymm11, %ymm0, %ymm8
1120 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm8[5,6,7]
1121 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1122 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm7[0,1],ymm4[2],ymm7[3,4],ymm4[5],ymm7[6,7]
1123 ; AVX2-SLOW-NEXT: vpermps %ymm1, %ymm2, %ymm1
1124 ; AVX2-SLOW-NEXT: vpermps %ymm5, %ymm0, %ymm9
1125 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm9[5,6,7]
1126 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1127 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm9 = ymm3[0,1],ymm6[2],ymm3[3,4],ymm6[5],ymm3[6,7]
1128 ; AVX2-SLOW-NEXT: vmovaps %ymm6, %ymm8
1129 ; AVX2-SLOW-NEXT: vpermps %ymm9, %ymm2, %ymm9
1130 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
1131 ; AVX2-SLOW-NEXT: vpermps %ymm6, %ymm0, %ymm14
1132 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm9 = ymm9[0,1,2,3,4],ymm14[5,6,7]
1133 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
1134 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm14 = ymm12[0,1],ymm1[2],ymm12[3,4],ymm1[5],ymm12[6,7]
1135 ; AVX2-SLOW-NEXT: vpermps %ymm14, %ymm2, %ymm2
1136 ; AVX2-SLOW-NEXT: vpermps %ymm15, %ymm0, %ymm0
1137 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3,4],ymm0[5,6,7]
1138 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm10[0,1],ymm13[2],ymm10[3,4],ymm13[5],ymm10[6,7]
1139 ; AVX2-SLOW-NEXT: vmovaps {{.*#+}} ymm10 = [2,5,0,3,6,u,u,u]
1140 ; AVX2-SLOW-NEXT: vpermps %ymm2, %ymm10, %ymm2
1141 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm11 = ymm11[0,1,0,3,4,5,4,7]
1142 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm11 = ymm11[0,1,0,3]
1143 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm11[5,6,7]
1144 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm7[2],ymm4[3,4],ymm7[5],ymm4[6,7]
1145 ; AVX2-SLOW-NEXT: vpermps %ymm4, %ymm10, %ymm4
1146 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm5 = ymm5[0,1,0,3,4,5,4,7]
1147 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,1,0,3]
1148 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3,4],ymm5[5,6,7]
1149 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm5 = ymm8[0,1],ymm3[2],ymm8[3,4],ymm3[5],ymm8[6,7]
1150 ; AVX2-SLOW-NEXT: vpermps %ymm5, %ymm10, %ymm5
1151 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm3 = ymm6[0,1,0,3,4,5,4,7]
1152 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[0,1,0,3]
1153 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm3 = ymm5[0,1,2,3,4],ymm3[5,6,7]
1154 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm5 = ymm1[0,1],ymm12[2],ymm1[3,4],ymm12[5],ymm1[6,7]
1155 ; AVX2-SLOW-NEXT: vpermps %ymm5, %ymm10, %ymm5
1156 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm6 = ymm15[0,1,0,3,4,5,4,7]
1157 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[0,1,0,3]
1158 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3,4],ymm6[5,6,7]
1159 ; AVX2-SLOW-NEXT: vmovups (%rsp), %ymm6 # 32-byte Reload
1160 ; AVX2-SLOW-NEXT: vmovaps %ymm6, 64(%rsi)
1161 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
1162 ; AVX2-SLOW-NEXT: vmovaps %ymm6, (%rsi)
1163 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
1164 ; AVX2-SLOW-NEXT: vmovaps %ymm6, 96(%rsi)
1165 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
1166 ; AVX2-SLOW-NEXT: vmovaps %ymm6, 32(%rsi)
1167 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 64(%rdx)
1168 ; AVX2-SLOW-NEXT: vmovaps %ymm9, (%rdx)
1169 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
1170 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 96(%rdx)
1171 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
1172 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 32(%rdx)
1173 ; AVX2-SLOW-NEXT: vmovaps %ymm5, 64(%rcx)
1174 ; AVX2-SLOW-NEXT: vmovaps %ymm3, (%rcx)
1175 ; AVX2-SLOW-NEXT: vmovaps %ymm4, 96(%rcx)
1176 ; AVX2-SLOW-NEXT: vmovaps %ymm2, 32(%rcx)
1177 ; AVX2-SLOW-NEXT: addq $136, %rsp
1178 ; AVX2-SLOW-NEXT: vzeroupper
1179 ; AVX2-SLOW-NEXT: retq
1181 ; AVX2-FAST-LABEL: load_i32_stride3_vf32:
1182 ; AVX2-FAST: # %bb.0:
1183 ; AVX2-FAST-NEXT: subq $104, %rsp
1184 ; AVX2-FAST-NEXT: vmovaps 224(%rdi), %ymm1
1185 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1186 ; AVX2-FAST-NEXT: vmovaps 320(%rdi), %ymm3
1187 ; AVX2-FAST-NEXT: vmovaps 288(%rdi), %ymm6
1188 ; AVX2-FAST-NEXT: vmovaps 352(%rdi), %ymm4
1189 ; AVX2-FAST-NEXT: vmovaps 128(%rdi), %ymm10
1190 ; AVX2-FAST-NEXT: vmovaps (%rdi), %ymm2
1191 ; AVX2-FAST-NEXT: vmovaps 32(%rdi), %ymm15
1192 ; AVX2-FAST-NEXT: vmovaps 64(%rdi), %ymm5
1193 ; AVX2-FAST-NEXT: vmovaps 96(%rdi), %ymm13
1194 ; AVX2-FAST-NEXT: vmovaps 160(%rdi), %ymm12
1195 ; AVX2-FAST-NEXT: vbroadcastsd {{.*#+}} ymm8 = [2,5,2,5,2,5,2,5]
1196 ; AVX2-FAST-NEXT: vpermps %ymm12, %ymm8, %ymm0
1197 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm9 = ymm13[0],ymm10[1],ymm13[2,3],ymm10[4],ymm13[5,6],ymm10[7]
1198 ; AVX2-FAST-NEXT: vmovaps {{.*#+}} ymm14 = [0,3,6,1,4,7,u,u]
1199 ; AVX2-FAST-NEXT: vpermps %ymm9, %ymm14, %ymm9
1200 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm9[0,1,2,3,4,5],ymm0[6,7]
1201 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1202 ; AVX2-FAST-NEXT: vpermps %ymm4, %ymm8, %ymm0
1203 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm9 = ymm6[0],ymm3[1],ymm6[2,3],ymm3[4],ymm6[5,6],ymm3[7]
1204 ; AVX2-FAST-NEXT: vpermps %ymm9, %ymm14, %ymm9
1205 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm9[0,1,2,3,4,5],ymm0[6,7]
1206 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1207 ; AVX2-FAST-NEXT: vpermps %ymm5, %ymm8, %ymm0
1208 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm9 = ymm2[0],ymm15[1],ymm2[2,3],ymm15[4],ymm2[5,6],ymm15[7]
1209 ; AVX2-FAST-NEXT: vmovaps %ymm2, %ymm7
1210 ; AVX2-FAST-NEXT: vpermps %ymm9, %ymm14, %ymm9
1211 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm9[0,1,2,3,4,5],ymm0[6,7]
1212 ; AVX2-FAST-NEXT: vmovups %ymm0, (%rsp) # 32-byte Spill
1213 ; AVX2-FAST-NEXT: vmovaps 192(%rdi), %ymm11
1214 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm11[0],ymm1[1],ymm11[2,3],ymm1[4],ymm11[5,6],ymm1[7]
1215 ; AVX2-FAST-NEXT: vpermps %ymm0, %ymm14, %ymm9
1216 ; AVX2-FAST-NEXT: vmovaps 256(%rdi), %ymm2
1217 ; AVX2-FAST-NEXT: vpermps %ymm2, %ymm8, %ymm8
1218 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm9[0,1,2,3,4,5],ymm8[6,7]
1219 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1220 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm8 = ymm13[0,1],ymm10[2],ymm13[3,4],ymm10[5],ymm13[6,7]
1221 ; AVX2-FAST-NEXT: vmovaps {{.*#+}} ymm1 = [1,4,7,2,5,u,u,u]
1222 ; AVX2-FAST-NEXT: vpermps %ymm8, %ymm1, %ymm8
1223 ; AVX2-FAST-NEXT: vbroadcastf128 {{.*#+}} ymm0 = [0,0,3,6,0,0,3,6]
1224 ; AVX2-FAST-NEXT: # ymm0 = mem[0,1,0,1]
1225 ; AVX2-FAST-NEXT: vpermps %ymm12, %ymm0, %ymm9
1226 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1,2,3,4],ymm9[5,6,7]
1227 ; AVX2-FAST-NEXT: vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1228 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm8 = ymm6[0,1],ymm3[2],ymm6[3,4],ymm3[5],ymm6[6,7]
1229 ; AVX2-FAST-NEXT: vpermps %ymm8, %ymm1, %ymm8
1230 ; AVX2-FAST-NEXT: vpermps %ymm4, %ymm0, %ymm9
1231 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1,2,3,4],ymm9[5,6,7]
1232 ; AVX2-FAST-NEXT: vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1233 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm8 = ymm7[0,1],ymm15[2],ymm7[3,4],ymm15[5],ymm7[6,7]
1234 ; AVX2-FAST-NEXT: vmovaps %ymm7, %ymm9
1235 ; AVX2-FAST-NEXT: vpermps %ymm8, %ymm1, %ymm8
1236 ; AVX2-FAST-NEXT: vpermps %ymm5, %ymm0, %ymm14
1237 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1,2,3,4],ymm14[5,6,7]
1238 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
1239 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm14 = ymm11[0,1],ymm7[2],ymm11[3,4],ymm7[5],ymm11[6,7]
1240 ; AVX2-FAST-NEXT: vpermps %ymm14, %ymm1, %ymm1
1241 ; AVX2-FAST-NEXT: vpermps %ymm2, %ymm0, %ymm0
1242 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5,6,7]
1243 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm10[0,1],ymm13[2],ymm10[3,4],ymm13[5],ymm10[6,7]
1244 ; AVX2-FAST-NEXT: vmovaps {{.*#+}} ymm10 = [0,1,0,3,0,1,4,7]
1245 ; AVX2-FAST-NEXT: vpermps %ymm12, %ymm10, %ymm12
1246 ; AVX2-FAST-NEXT: vmovaps {{.*#+}} ymm13 = [2,5,0,3,6,u,u,u]
1247 ; AVX2-FAST-NEXT: vpermps %ymm1, %ymm13, %ymm1
1248 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm12[5,6,7]
1249 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm6[2],ymm3[3,4],ymm6[5],ymm3[6,7]
1250 ; AVX2-FAST-NEXT: vpermps %ymm4, %ymm10, %ymm6
1251 ; AVX2-FAST-NEXT: vpermps %ymm3, %ymm13, %ymm3
1252 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3,4],ymm6[5,6,7]
1253 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm4 = ymm15[0,1],ymm9[2],ymm15[3,4],ymm9[5],ymm15[6,7]
1254 ; AVX2-FAST-NEXT: vpermps %ymm5, %ymm10, %ymm5
1255 ; AVX2-FAST-NEXT: vpermps %ymm4, %ymm13, %ymm4
1256 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3,4],ymm5[5,6,7]
1257 ; AVX2-FAST-NEXT: vpermps %ymm2, %ymm10, %ymm2
1258 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm5 = ymm7[0,1],ymm11[2],ymm7[3,4],ymm11[5],ymm7[6,7]
1259 ; AVX2-FAST-NEXT: vpermps %ymm5, %ymm13, %ymm5
1260 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm5[0,1,2,3,4],ymm2[5,6,7]
1261 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
1262 ; AVX2-FAST-NEXT: vmovaps %ymm5, 64(%rsi)
1263 ; AVX2-FAST-NEXT: vmovups (%rsp), %ymm5 # 32-byte Reload
1264 ; AVX2-FAST-NEXT: vmovaps %ymm5, (%rsi)
1265 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
1266 ; AVX2-FAST-NEXT: vmovaps %ymm5, 96(%rsi)
1267 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
1268 ; AVX2-FAST-NEXT: vmovaps %ymm5, 32(%rsi)
1269 ; AVX2-FAST-NEXT: vmovaps %ymm0, 64(%rdx)
1270 ; AVX2-FAST-NEXT: vmovaps %ymm8, (%rdx)
1271 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
1272 ; AVX2-FAST-NEXT: vmovaps %ymm0, 96(%rdx)
1273 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
1274 ; AVX2-FAST-NEXT: vmovaps %ymm0, 32(%rdx)
1275 ; AVX2-FAST-NEXT: vmovaps %ymm2, 64(%rcx)
1276 ; AVX2-FAST-NEXT: vmovaps %ymm4, (%rcx)
1277 ; AVX2-FAST-NEXT: vmovaps %ymm3, 96(%rcx)
1278 ; AVX2-FAST-NEXT: vmovaps %ymm1, 32(%rcx)
1279 ; AVX2-FAST-NEXT: addq $104, %rsp
1280 ; AVX2-FAST-NEXT: vzeroupper
1281 ; AVX2-FAST-NEXT: retq
1283 ; AVX2-FAST-PERLANE-LABEL: load_i32_stride3_vf32:
1284 ; AVX2-FAST-PERLANE: # %bb.0:
1285 ; AVX2-FAST-PERLANE-NEXT: subq $136, %rsp
1286 ; AVX2-FAST-PERLANE-NEXT: vmovaps 224(%rdi), %ymm0
1287 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1288 ; AVX2-FAST-PERLANE-NEXT: vmovaps 320(%rdi), %ymm4
1289 ; AVX2-FAST-PERLANE-NEXT: vmovaps 288(%rdi), %ymm7
1290 ; AVX2-FAST-PERLANE-NEXT: vmovaps 352(%rdi), %ymm5
1291 ; AVX2-FAST-PERLANE-NEXT: vmovaps 128(%rdi), %ymm10
1292 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rdi), %ymm12
1293 ; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%rdi), %ymm6
1294 ; AVX2-FAST-PERLANE-NEXT: vmovaps 64(%rdi), %ymm3
1295 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1296 ; AVX2-FAST-PERLANE-NEXT: vmovaps 96(%rdi), %ymm13
1297 ; AVX2-FAST-PERLANE-NEXT: vmovaps 160(%rdi), %ymm11
1298 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd {{.*#+}} ymm1 = [2,5,2,5,2,5,2,5]
1299 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm11, %ymm1, %ymm8
1300 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm9 = ymm13[0],ymm10[1],ymm13[2,3],ymm10[4],ymm13[5,6],ymm10[7]
1301 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{.*#+}} ymm14 = [0,3,6,1,4,7,u,u]
1302 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm9, %ymm14, %ymm9
1303 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm9[0,1,2,3,4,5],ymm8[6,7]
1304 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1305 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm5, %ymm1, %ymm8
1306 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm9 = ymm7[0],ymm4[1],ymm7[2,3],ymm4[4],ymm7[5,6],ymm4[7]
1307 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm9, %ymm14, %ymm9
1308 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm9[0,1,2,3,4,5],ymm8[6,7]
1309 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1310 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm3, %ymm1, %ymm8
1311 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm9 = ymm12[0],ymm6[1],ymm12[2,3],ymm6[4],ymm12[5,6],ymm6[7]
1312 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm12, %ymm3
1313 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm9, %ymm14, %ymm9
1314 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm9[0,1,2,3,4,5],ymm8[6,7]
1315 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1316 ; AVX2-FAST-PERLANE-NEXT: vmovaps 192(%rdi), %ymm12
1317 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm8 = ymm12[0],ymm0[1],ymm12[2,3],ymm0[4],ymm12[5,6],ymm0[7]
1318 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm8, %ymm14, %ymm8
1319 ; AVX2-FAST-PERLANE-NEXT: vmovaps 256(%rdi), %ymm15
1320 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm15, %ymm1, %ymm1
1321 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm8[0,1,2,3,4,5],ymm1[6,7]
1322 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, (%rsp) # 32-byte Spill
1323 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm13[0,1],ymm10[2],ymm13[3,4],ymm10[5],ymm13[6,7]
1324 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{.*#+}} ymm2 = [1,4,7,2,5,u,u,u]
1325 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm1, %ymm2, %ymm1
1326 ; AVX2-FAST-PERLANE-NEXT: vbroadcastf128 {{.*#+}} ymm0 = [0,0,3,6,0,0,3,6]
1327 ; AVX2-FAST-PERLANE-NEXT: # ymm0 = mem[0,1,0,1]
1328 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm11, %ymm0, %ymm8
1329 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm8[5,6,7]
1330 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1331 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm7[0,1],ymm4[2],ymm7[3,4],ymm4[5],ymm7[6,7]
1332 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm1, %ymm2, %ymm1
1333 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm5, %ymm0, %ymm9
1334 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm9[5,6,7]
1335 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1336 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm9 = ymm3[0,1],ymm6[2],ymm3[3,4],ymm6[5],ymm3[6,7]
1337 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm6, %ymm8
1338 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm9, %ymm2, %ymm9
1339 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
1340 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm6, %ymm0, %ymm14
1341 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm9 = ymm9[0,1,2,3,4],ymm14[5,6,7]
1342 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
1343 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm14 = ymm12[0,1],ymm1[2],ymm12[3,4],ymm1[5],ymm12[6,7]
1344 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm14, %ymm2, %ymm2
1345 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm15, %ymm0, %ymm0
1346 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3,4],ymm0[5,6,7]
1347 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm10[0,1],ymm13[2],ymm10[3,4],ymm13[5],ymm10[6,7]
1348 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{.*#+}} ymm10 = [2,5,0,3,6,u,u,u]
1349 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm2, %ymm10, %ymm2
1350 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm11 = ymm11[0,1,0,3,4,5,4,7]
1351 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm11 = ymm11[0,1,0,3]
1352 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm11[5,6,7]
1353 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm7[2],ymm4[3,4],ymm7[5],ymm4[6,7]
1354 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm4, %ymm10, %ymm4
1355 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm5 = ymm5[0,1,0,3,4,5,4,7]
1356 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,1,0,3]
1357 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3,4],ymm5[5,6,7]
1358 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm5 = ymm8[0,1],ymm3[2],ymm8[3,4],ymm3[5],ymm8[6,7]
1359 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm5, %ymm10, %ymm5
1360 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm3 = ymm6[0,1,0,3,4,5,4,7]
1361 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[0,1,0,3]
1362 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm3 = ymm5[0,1,2,3,4],ymm3[5,6,7]
1363 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm5 = ymm1[0,1],ymm12[2],ymm1[3,4],ymm12[5],ymm1[6,7]
1364 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm5, %ymm10, %ymm5
1365 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm6 = ymm15[0,1,0,3,4,5,4,7]
1366 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[0,1,0,3]
1367 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3,4],ymm6[5,6,7]
1368 ; AVX2-FAST-PERLANE-NEXT: vmovups (%rsp), %ymm6 # 32-byte Reload
1369 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm6, 64(%rsi)
1370 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
1371 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm6, (%rsi)
1372 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
1373 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm6, 96(%rsi)
1374 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
1375 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm6, 32(%rsi)
1376 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 64(%rdx)
1377 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm9, (%rdx)
1378 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
1379 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 96(%rdx)
1380 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
1381 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 32(%rdx)
1382 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm5, 64(%rcx)
1383 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm3, (%rcx)
1384 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm4, 96(%rcx)
1385 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm2, 32(%rcx)
1386 ; AVX2-FAST-PERLANE-NEXT: addq $136, %rsp
1387 ; AVX2-FAST-PERLANE-NEXT: vzeroupper
1388 ; AVX2-FAST-PERLANE-NEXT: retq
1390 ; AVX512-LABEL: load_i32_stride3_vf32:
1392 ; AVX512-NEXT: vmovdqa64 320(%rdi), %zmm0
1393 ; AVX512-NEXT: vmovdqa64 256(%rdi), %zmm1
1394 ; AVX512-NEXT: vmovdqa64 (%rdi), %zmm2
1395 ; AVX512-NEXT: vmovdqa64 64(%rdi), %zmm3
1396 ; AVX512-NEXT: vmovdqa64 128(%rdi), %zmm4
1397 ; AVX512-NEXT: vmovdqa64 192(%rdi), %zmm5
1398 ; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm6 = [0,3,6,9,12,15,18,21,24,27,30,u,u,u,u,u]
1399 ; AVX512-NEXT: vmovdqa64 %zmm5, %zmm7
1400 ; AVX512-NEXT: vpermt2d %zmm1, %zmm6, %zmm7
1401 ; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm8 = [0,1,2,3,4,5,6,7,8,9,10,17,20,23,26,29]
1402 ; AVX512-NEXT: vpermt2d %zmm0, %zmm8, %zmm7
1403 ; AVX512-NEXT: vpermi2d %zmm3, %zmm2, %zmm6
1404 ; AVX512-NEXT: vpermt2d %zmm4, %zmm8, %zmm6
1405 ; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm8 = [17,20,23,26,29,0,3,6,9,12,15,u,u,u,u,u]
1406 ; AVX512-NEXT: vmovdqa64 %zmm1, %zmm9
1407 ; AVX512-NEXT: vpermt2d %zmm5, %zmm8, %zmm9
1408 ; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm10 = [0,1,2,3,4,5,6,7,8,9,10,18,21,24,27,30]
1409 ; AVX512-NEXT: vpermt2d %zmm0, %zmm10, %zmm9
1410 ; AVX512-NEXT: vpermi2d %zmm2, %zmm3, %zmm8
1411 ; AVX512-NEXT: vpermt2d %zmm4, %zmm10, %zmm8
1412 ; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm10 = [2,5,8,11,14,17,20,23,26,29,u,u,u,u,u,u]
1413 ; AVX512-NEXT: vpermt2d %zmm1, %zmm10, %zmm5
1414 ; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm1 = [0,1,2,3,4,5,6,7,8,9,16,19,22,25,28,31]
1415 ; AVX512-NEXT: vpermt2d %zmm0, %zmm1, %zmm5
1416 ; AVX512-NEXT: vpermt2d %zmm3, %zmm10, %zmm2
1417 ; AVX512-NEXT: vpermt2d %zmm4, %zmm1, %zmm2
1418 ; AVX512-NEXT: vmovdqa64 %zmm7, 64(%rsi)
1419 ; AVX512-NEXT: vmovdqa64 %zmm6, (%rsi)
1420 ; AVX512-NEXT: vmovdqa64 %zmm9, 64(%rdx)
1421 ; AVX512-NEXT: vmovdqa64 %zmm8, (%rdx)
1422 ; AVX512-NEXT: vmovdqa64 %zmm5, 64(%rcx)
1423 ; AVX512-NEXT: vmovdqa64 %zmm2, (%rcx)
1424 ; AVX512-NEXT: vzeroupper
1426 %wide.vec = load <96 x i32>, ptr %in.vec, align 64
1427 %strided.vec0 = shufflevector <96 x i32> %wide.vec, <96 x i32> poison, <32 x i32> <i32 0, i32 3, i32 6, i32 9, i32 12, i32 15, i32 18, i32 21, i32 24, i32 27, i32 30, i32 33, i32 36, i32 39, i32 42, i32 45, i32 48, i32 51, i32 54, i32 57, i32 60, i32 63, i32 66, i32 69, i32 72, i32 75, i32 78, i32 81, i32 84, i32 87, i32 90, i32 93>
1428 %strided.vec1 = shufflevector <96 x i32> %wide.vec, <96 x i32> poison, <32 x i32> <i32 1, i32 4, i32 7, i32 10, i32 13, i32 16, i32 19, i32 22, i32 25, i32 28, i32 31, i32 34, i32 37, i32 40, i32 43, i32 46, i32 49, i32 52, i32 55, i32 58, i32 61, i32 64, i32 67, i32 70, i32 73, i32 76, i32 79, i32 82, i32 85, i32 88, i32 91, i32 94>
1429 %strided.vec2 = shufflevector <96 x i32> %wide.vec, <96 x i32> poison, <32 x i32> <i32 2, i32 5, i32 8, i32 11, i32 14, i32 17, i32 20, i32 23, i32 26, i32 29, i32 32, i32 35, i32 38, i32 41, i32 44, i32 47, i32 50, i32 53, i32 56, i32 59, i32 62, i32 65, i32 68, i32 71, i32 74, i32 77, i32 80, i32 83, i32 86, i32 89, i32 92, i32 95>
1430 store <32 x i32> %strided.vec0, ptr %out.vec0, align 64
1431 store <32 x i32> %strided.vec1, ptr %out.vec1, align 64
1432 store <32 x i32> %strided.vec2, ptr %out.vec2, align 64
1436 define void @load_i32_stride3_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2) nounwind {
1437 ; SSE-LABEL: load_i32_stride3_vf64:
1439 ; SSE-NEXT: subq $1112, %rsp # imm = 0x458
1440 ; SSE-NEXT: movaps 624(%rdi), %xmm2
1441 ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1442 ; SSE-NEXT: movaps 656(%rdi), %xmm4
1443 ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1444 ; SSE-NEXT: movaps 640(%rdi), %xmm10
1445 ; SSE-NEXT: movaps %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1446 ; SSE-NEXT: movaps 432(%rdi), %xmm6
1447 ; SSE-NEXT: movaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1448 ; SSE-NEXT: movaps 464(%rdi), %xmm5
1449 ; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1450 ; SSE-NEXT: movaps 448(%rdi), %xmm11
1451 ; SSE-NEXT: movaps %xmm11, (%rsp) # 16-byte Spill
1452 ; SSE-NEXT: movaps 240(%rdi), %xmm7
1453 ; SSE-NEXT: movaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1454 ; SSE-NEXT: movaps 272(%rdi), %xmm3
1455 ; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1456 ; SSE-NEXT: movaps 256(%rdi), %xmm13
1457 ; SSE-NEXT: movaps %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1458 ; SSE-NEXT: movaps 48(%rdi), %xmm9
1459 ; SSE-NEXT: movaps 80(%rdi), %xmm1
1460 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1461 ; SSE-NEXT: movaps 64(%rdi), %xmm12
1462 ; SSE-NEXT: movaps %xmm12, %xmm0
1463 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[1,0]
1464 ; SSE-NEXT: movaps %xmm9, %xmm1
1465 ; SSE-NEXT: movaps %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1466 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm0[0,2]
1467 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1468 ; SSE-NEXT: movaps %xmm13, %xmm0
1469 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm3[1,0]
1470 ; SSE-NEXT: movaps %xmm7, %xmm1
1471 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm0[0,2]
1472 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1473 ; SSE-NEXT: movaps %xmm11, %xmm0
1474 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm5[1,0]
1475 ; SSE-NEXT: movaps %xmm6, %xmm1
1476 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm0[0,2]
1477 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1478 ; SSE-NEXT: movaps %xmm10, %xmm0
1479 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm4[1,0]
1480 ; SSE-NEXT: movaps %xmm2, %xmm1
1481 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm0[0,2]
1482 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1483 ; SSE-NEXT: movaps 16(%rdi), %xmm0
1484 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1485 ; SSE-NEXT: movaps 32(%rdi), %xmm1
1486 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1487 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[1,0]
1488 ; SSE-NEXT: movaps (%rdi), %xmm1
1489 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1490 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm0[0,2]
1491 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1492 ; SSE-NEXT: movaps 224(%rdi), %xmm1
1493 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1494 ; SSE-NEXT: movaps 208(%rdi), %xmm0
1495 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1496 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[1,0]
1497 ; SSE-NEXT: movaps 192(%rdi), %xmm1
1498 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1499 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm0[0,2]
1500 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1501 ; SSE-NEXT: movaps 416(%rdi), %xmm1
1502 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1503 ; SSE-NEXT: movaps 400(%rdi), %xmm0
1504 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1505 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[1,0]
1506 ; SSE-NEXT: movaps 384(%rdi), %xmm1
1507 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1508 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm0[0,2]
1509 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1510 ; SSE-NEXT: movaps 608(%rdi), %xmm1
1511 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1512 ; SSE-NEXT: movaps 592(%rdi), %xmm0
1513 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1514 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[1,0]
1515 ; SSE-NEXT: movaps 576(%rdi), %xmm1
1516 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1517 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm0[0,2]
1518 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1519 ; SSE-NEXT: movaps 176(%rdi), %xmm10
1520 ; SSE-NEXT: movaps 160(%rdi), %xmm8
1521 ; SSE-NEXT: movaps %xmm8, %xmm0
1522 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm10[1,0]
1523 ; SSE-NEXT: movaps %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1524 ; SSE-NEXT: movaps 144(%rdi), %xmm2
1525 ; SSE-NEXT: movaps %xmm2, %xmm1
1526 ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1527 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm0[0,2]
1528 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1529 ; SSE-NEXT: movaps 368(%rdi), %xmm1
1530 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1531 ; SSE-NEXT: movaps 352(%rdi), %xmm15
1532 ; SSE-NEXT: movaps %xmm15, %xmm0
1533 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[1,0]
1534 ; SSE-NEXT: movaps 336(%rdi), %xmm14
1535 ; SSE-NEXT: movaps %xmm14, %xmm1
1536 ; SSE-NEXT: movaps %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1537 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm0[0,2]
1538 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1539 ; SSE-NEXT: movaps 560(%rdi), %xmm1
1540 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1541 ; SSE-NEXT: movaps 544(%rdi), %xmm0
1542 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1543 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[1,0]
1544 ; SSE-NEXT: movaps 528(%rdi), %xmm1
1545 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1546 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm0[0,2]
1547 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1548 ; SSE-NEXT: movaps 752(%rdi), %xmm1
1549 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1550 ; SSE-NEXT: movaps 736(%rdi), %xmm0
1551 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1552 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[1,0]
1553 ; SSE-NEXT: movaps 720(%rdi), %xmm1
1554 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1555 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm0[0,2]
1556 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1557 ; SSE-NEXT: movaps 128(%rdi), %xmm6
1558 ; SSE-NEXT: movaps 112(%rdi), %xmm4
1559 ; SSE-NEXT: movaps %xmm4, %xmm1
1560 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm6[1,0]
1561 ; SSE-NEXT: movaps 96(%rdi), %xmm3
1562 ; SSE-NEXT: movaps %xmm3, %xmm7
1563 ; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1564 ; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,3],xmm1[0,2]
1565 ; SSE-NEXT: movaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1566 ; SSE-NEXT: movaps 320(%rdi), %xmm13
1567 ; SSE-NEXT: movaps 304(%rdi), %xmm11
1568 ; SSE-NEXT: movaps %xmm11, %xmm1
1569 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm13[1,0]
1570 ; SSE-NEXT: movaps %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1571 ; SSE-NEXT: movaps 288(%rdi), %xmm5
1572 ; SSE-NEXT: movaps %xmm5, %xmm7
1573 ; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1574 ; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,3],xmm1[0,2]
1575 ; SSE-NEXT: movaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1576 ; SSE-NEXT: movaps 512(%rdi), %xmm0
1577 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1578 ; SSE-NEXT: movaps 496(%rdi), %xmm1
1579 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1580 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[1,0]
1581 ; SSE-NEXT: movaps 480(%rdi), %xmm7
1582 ; SSE-NEXT: movaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1583 ; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,3],xmm1[0,2]
1584 ; SSE-NEXT: movaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1585 ; SSE-NEXT: movaps 704(%rdi), %xmm7
1586 ; SSE-NEXT: movaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1587 ; SSE-NEXT: movaps 688(%rdi), %xmm1
1588 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1589 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm7[1,0]
1590 ; SSE-NEXT: movaps 672(%rdi), %xmm7
1591 ; SSE-NEXT: movaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1592 ; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,3],xmm1[0,2]
1593 ; SSE-NEXT: movaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1594 ; SSE-NEXT: movaps %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1595 ; SSE-NEXT: shufps {{.*#+}} xmm9 = xmm9[1,0],xmm12[0,0]
1596 ; SSE-NEXT: shufps $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Folded Reload
1597 ; SSE-NEXT: # xmm12 = xmm12[3,1],mem[2,3]
1598 ; SSE-NEXT: shufps {{.*#+}} xmm9 = xmm9[0,2],xmm12[0,2]
1599 ; SSE-NEXT: movaps %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1600 ; SSE-NEXT: movaps %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1601 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,0],xmm8[0,0]
1602 ; SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[3,1],xmm10[2,3]
1603 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm8[0,2]
1604 ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1605 ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1606 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,0],xmm4[0,0]
1607 ; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[3,1],xmm6[2,3]
1608 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,2],xmm4[0,2]
1609 ; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1610 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
1611 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
1612 ; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[1,0],xmm1[0,0]
1613 ; SSE-NEXT: shufps $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
1614 ; SSE-NEXT: # xmm1 = xmm1[3,1],mem[2,3]
1615 ; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,2],xmm1[0,2]
1616 ; SSE-NEXT: movaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1617 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
1618 ; SSE-NEXT: movaps %xmm9, %xmm0
1619 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
1620 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm3[0,0]
1621 ; SSE-NEXT: movaps %xmm3, %xmm1
1622 ; SSE-NEXT: movaps %xmm3, %xmm12
1623 ; SSE-NEXT: shufps $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
1624 ; SSE-NEXT: # xmm1 = xmm1[3,1],mem[2,3]
1625 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
1626 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1627 ; SSE-NEXT: movaps %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1628 ; SSE-NEXT: shufps {{.*#+}} xmm14 = xmm14[1,0],xmm15[0,0]
1629 ; SSE-NEXT: shufps $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Folded Reload
1630 ; SSE-NEXT: # xmm15 = xmm15[3,1],mem[2,3]
1631 ; SSE-NEXT: shufps {{.*#+}} xmm14 = xmm14[0,2],xmm15[0,2]
1632 ; SSE-NEXT: movaps %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1633 ; SSE-NEXT: movaps %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1634 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[1,0],xmm11[0,0]
1635 ; SSE-NEXT: shufps {{.*#+}} xmm11 = xmm11[3,1],xmm13[2,3]
1636 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,2],xmm11[0,2]
1637 ; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1638 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1639 ; SSE-NEXT: movaps (%rsp), %xmm1 # 16-byte Reload
1640 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm1[0,0]
1641 ; SSE-NEXT: shufps $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
1642 ; SSE-NEXT: # xmm1 = xmm1[3,1],mem[2,3]
1643 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
1644 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1645 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
1646 ; SSE-NEXT: movaps %xmm7, %xmm0
1647 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
1648 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm4[0,0]
1649 ; SSE-NEXT: movaps %xmm4, %xmm1
1650 ; SSE-NEXT: movaps %xmm4, %xmm8
1651 ; SSE-NEXT: shufps $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
1652 ; SSE-NEXT: # xmm1 = xmm1[3,1],mem[2,3]
1653 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
1654 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1655 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1656 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
1657 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm1[0,0]
1658 ; SSE-NEXT: shufps $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
1659 ; SSE-NEXT: # xmm1 = xmm1[3,1],mem[2,3]
1660 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
1661 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1662 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1663 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
1664 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm1[0,0]
1665 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
1666 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1],xmm5[2,3]
1667 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
1668 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1669 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1670 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
1671 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm1[0,0]
1672 ; SSE-NEXT: shufps $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
1673 ; SSE-NEXT: # xmm1 = xmm1[3,1],mem[2,3]
1674 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
1675 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1676 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1677 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
1678 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm14[0,0]
1679 ; SSE-NEXT: movaps %xmm14, %xmm1
1680 ; SSE-NEXT: movaps %xmm14, %xmm3
1681 ; SSE-NEXT: shufps $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
1682 ; SSE-NEXT: # xmm1 = xmm1[3,1],mem[2,3]
1683 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
1684 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1685 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1686 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
1687 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm1[0,0]
1688 ; SSE-NEXT: shufps $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
1689 ; SSE-NEXT: # xmm1 = xmm1[3,1],mem[2,3]
1690 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
1691 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1692 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
1693 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
1694 ; SSE-NEXT: shufps {{.*#+}} xmm14 = xmm14[1,0],xmm1[0,0]
1695 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
1696 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1],xmm4[2,3]
1697 ; SSE-NEXT: shufps {{.*#+}} xmm14 = xmm14[0,2],xmm1[0,2]
1698 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
1699 ; SSE-NEXT: pshufd {{.*#+}} xmm10 = xmm2[2,3,2,3]
1700 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1701 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,0],xmm0[0,0]
1702 ; SSE-NEXT: movaps %xmm2, %xmm11
1703 ; SSE-NEXT: movaps %xmm0, %xmm1
1704 ; SSE-NEXT: movaps %xmm0, %xmm2
1705 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1706 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1],xmm0[2,3]
1707 ; SSE-NEXT: shufps {{.*#+}} xmm11 = xmm11[0,2],xmm1[0,2]
1708 ; SSE-NEXT: movaps %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1709 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,1,1]
1710 ; SSE-NEXT: punpckldq {{.*#+}} xmm10 = xmm10[0],xmm1[0],xmm10[1],xmm1[1]
1711 ; SSE-NEXT: shufps {{.*#+}} xmm10 = xmm10[0,1],xmm0[0,3]
1712 ; SSE-NEXT: movaps %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1713 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
1714 ; SSE-NEXT: # xmm1 = mem[1,1,1,1]
1715 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
1716 ; SSE-NEXT: # xmm0 = mem[2,3,2,3]
1717 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
1718 ; SSE-NEXT: shufps $196, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
1719 ; SSE-NEXT: # xmm0 = xmm0[0,1],mem[0,3]
1720 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1721 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
1722 ; SSE-NEXT: # xmm0 = mem[1,1,1,1]
1723 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Folded Reload
1724 ; SSE-NEXT: # xmm15 = mem[2,3,2,3]
1725 ; SSE-NEXT: punpckldq {{.*#+}} xmm15 = xmm15[0],xmm0[0],xmm15[1],xmm0[1]
1726 ; SSE-NEXT: shufps {{.*#+}} xmm15 = xmm15[0,1],xmm6[0,3]
1727 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
1728 ; SSE-NEXT: # xmm0 = mem[1,1,1,1]
1729 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Folded Reload
1730 ; SSE-NEXT: # xmm13 = mem[2,3,2,3]
1731 ; SSE-NEXT: punpckldq {{.*#+}} xmm13 = xmm13[0],xmm0[0],xmm13[1],xmm0[1]
1732 ; SSE-NEXT: shufps $196, {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Folded Reload
1733 ; SSE-NEXT: # xmm13 = xmm13[0,1],mem[0,3]
1734 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm12[1,1,1,1]
1735 ; SSE-NEXT: pshufd {{.*#+}} xmm12 = xmm9[2,3,2,3]
1736 ; SSE-NEXT: punpckldq {{.*#+}} xmm12 = xmm12[0],xmm0[0],xmm12[1],xmm0[1]
1737 ; SSE-NEXT: shufps $196, {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Folded Reload
1738 ; SSE-NEXT: # xmm12 = xmm12[0,1],mem[0,3]
1739 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
1740 ; SSE-NEXT: # xmm0 = mem[1,1,1,1]
1741 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Folded Reload
1742 ; SSE-NEXT: # xmm11 = mem[2,3,2,3]
1743 ; SSE-NEXT: punpckldq {{.*#+}} xmm11 = xmm11[0],xmm0[0],xmm11[1],xmm0[1]
1744 ; SSE-NEXT: shufps $196, {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Folded Reload
1745 ; SSE-NEXT: # xmm11 = xmm11[0,1],mem[0,3]
1746 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
1747 ; SSE-NEXT: # xmm0 = mem[1,1,1,1]
1748 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Folded Reload
1749 ; SSE-NEXT: # xmm10 = mem[2,3,2,3]
1750 ; SSE-NEXT: punpckldq {{.*#+}} xmm10 = xmm10[0],xmm0[0],xmm10[1],xmm0[1]
1751 ; SSE-NEXT: shufps $196, {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Folded Reload
1752 ; SSE-NEXT: # xmm10 = xmm10[0,1],mem[0,3]
1753 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
1754 ; SSE-NEXT: # xmm0 = mem[1,1,1,1]
1755 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Folded Reload
1756 ; SSE-NEXT: # xmm9 = mem[2,3,2,3]
1757 ; SSE-NEXT: punpckldq {{.*#+}} xmm9 = xmm9[0],xmm0[0],xmm9[1],xmm0[1]
1758 ; SSE-NEXT: shufps $196, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Folded Reload
1759 ; SSE-NEXT: # xmm9 = xmm9[0,1],mem[0,3]
1760 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm8[1,1,1,1]
1761 ; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm7[2,3,2,3]
1762 ; SSE-NEXT: punpckldq {{.*#+}} xmm8 = xmm8[0],xmm0[0],xmm8[1],xmm0[1]
1763 ; SSE-NEXT: shufps $196, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Folded Reload
1764 ; SSE-NEXT: # xmm8 = xmm8[0,1],mem[0,3]
1765 ; SSE-NEXT: pshufd $85, (%rsp), %xmm0 # 16-byte Folded Reload
1766 ; SSE-NEXT: # xmm0 = mem[1,1,1,1]
1767 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload
1768 ; SSE-NEXT: # xmm7 = mem[2,3,2,3]
1769 ; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm0[0],xmm7[1],xmm0[1]
1770 ; SSE-NEXT: shufps $196, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload
1771 ; SSE-NEXT: # xmm7 = xmm7[0,1],mem[0,3]
1772 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
1773 ; SSE-NEXT: # xmm0 = mem[1,1,1,1]
1774 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
1775 ; SSE-NEXT: # xmm6 = mem[2,3,2,3]
1776 ; SSE-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm0[0],xmm6[1],xmm0[1]
1777 ; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,1],xmm5[0,3]
1778 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
1779 ; SSE-NEXT: # xmm0 = mem[1,1,1,1]
1780 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Folded Reload
1781 ; SSE-NEXT: # xmm5 = mem[2,3,2,3]
1782 ; SSE-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1]
1783 ; SSE-NEXT: shufps $196, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Folded Reload
1784 ; SSE-NEXT: # xmm5 = xmm5[0,1],mem[0,3]
1785 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,1,1,1]
1786 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
1787 ; SSE-NEXT: # xmm3 = mem[2,3,2,3]
1788 ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1]
1789 ; SSE-NEXT: shufps $196, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
1790 ; SSE-NEXT: # xmm3 = xmm3[0,1],mem[0,3]
1791 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
1792 ; SSE-NEXT: # xmm0 = mem[1,1,1,1]
1793 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
1794 ; SSE-NEXT: # xmm2 = mem[2,3,2,3]
1795 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
1796 ; SSE-NEXT: shufps $196, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
1797 ; SSE-NEXT: # xmm2 = xmm2[0,1],mem[0,3]
1798 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
1799 ; SSE-NEXT: # xmm0 = mem[1,1,1,1]
1800 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
1801 ; SSE-NEXT: # xmm1 = mem[2,3,2,3]
1802 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
1803 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm4[0,3]
1804 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
1805 ; SSE-NEXT: # xmm4 = mem[1,1,1,1]
1806 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
1807 ; SSE-NEXT: # xmm0 = mem[2,3,2,3]
1808 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
1809 ; SSE-NEXT: shufps $196, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
1810 ; SSE-NEXT: # xmm0 = xmm0[0,1],mem[0,3]
1811 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
1812 ; SSE-NEXT: movaps %xmm4, 224(%rsi)
1813 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
1814 ; SSE-NEXT: movaps %xmm4, 160(%rsi)
1815 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
1816 ; SSE-NEXT: movaps %xmm4, 96(%rsi)
1817 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
1818 ; SSE-NEXT: movaps %xmm4, 32(%rsi)
1819 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
1820 ; SSE-NEXT: movaps %xmm4, 240(%rsi)
1821 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
1822 ; SSE-NEXT: movaps %xmm4, 176(%rsi)
1823 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
1824 ; SSE-NEXT: movaps %xmm4, 112(%rsi)
1825 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
1826 ; SSE-NEXT: movaps %xmm4, 48(%rsi)
1827 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
1828 ; SSE-NEXT: movaps %xmm4, 192(%rsi)
1829 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
1830 ; SSE-NEXT: movaps %xmm4, 128(%rsi)
1831 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
1832 ; SSE-NEXT: movaps %xmm4, 64(%rsi)
1833 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
1834 ; SSE-NEXT: movaps %xmm4, (%rsi)
1835 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
1836 ; SSE-NEXT: movaps %xmm4, 208(%rsi)
1837 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
1838 ; SSE-NEXT: movaps %xmm4, 144(%rsi)
1839 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
1840 ; SSE-NEXT: movaps %xmm4, 80(%rsi)
1841 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
1842 ; SSE-NEXT: movaps %xmm4, 16(%rsi)
1843 ; SSE-NEXT: movaps %xmm14, 224(%rdx)
1844 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
1845 ; SSE-NEXT: movaps %xmm4, 240(%rdx)
1846 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
1847 ; SSE-NEXT: movaps %xmm4, 192(%rdx)
1848 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
1849 ; SSE-NEXT: movaps %xmm4, 208(%rdx)
1850 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
1851 ; SSE-NEXT: movaps %xmm4, 160(%rdx)
1852 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
1853 ; SSE-NEXT: movaps %xmm4, 176(%rdx)
1854 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
1855 ; SSE-NEXT: movaps %xmm4, 128(%rdx)
1856 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
1857 ; SSE-NEXT: movaps %xmm4, 144(%rdx)
1858 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
1859 ; SSE-NEXT: movaps %xmm4, 96(%rdx)
1860 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
1861 ; SSE-NEXT: movaps %xmm4, 112(%rdx)
1862 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
1863 ; SSE-NEXT: movaps %xmm4, 64(%rdx)
1864 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
1865 ; SSE-NEXT: movaps %xmm4, 80(%rdx)
1866 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
1867 ; SSE-NEXT: movaps %xmm4, 32(%rdx)
1868 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
1869 ; SSE-NEXT: movaps %xmm4, 48(%rdx)
1870 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
1871 ; SSE-NEXT: movaps %xmm4, (%rdx)
1872 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
1873 ; SSE-NEXT: movaps %xmm4, 16(%rdx)
1874 ; SSE-NEXT: movaps %xmm0, 240(%rcx)
1875 ; SSE-NEXT: movaps %xmm1, 224(%rcx)
1876 ; SSE-NEXT: movaps %xmm2, 208(%rcx)
1877 ; SSE-NEXT: movaps %xmm3, 192(%rcx)
1878 ; SSE-NEXT: movaps %xmm5, 176(%rcx)
1879 ; SSE-NEXT: movaps %xmm6, 160(%rcx)
1880 ; SSE-NEXT: movaps %xmm7, 144(%rcx)
1881 ; SSE-NEXT: movaps %xmm8, 128(%rcx)
1882 ; SSE-NEXT: movaps %xmm9, 112(%rcx)
1883 ; SSE-NEXT: movaps %xmm10, 96(%rcx)
1884 ; SSE-NEXT: movaps %xmm11, 80(%rcx)
1885 ; SSE-NEXT: movaps %xmm12, 64(%rcx)
1886 ; SSE-NEXT: movaps %xmm13, 48(%rcx)
1887 ; SSE-NEXT: movaps %xmm15, 32(%rcx)
1888 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1889 ; SSE-NEXT: movaps %xmm0, 16(%rcx)
1890 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1891 ; SSE-NEXT: movaps %xmm0, (%rcx)
1892 ; SSE-NEXT: addq $1112, %rsp # imm = 0x458
1895 ; AVX1-ONLY-LABEL: load_i32_stride3_vf64:
1896 ; AVX1-ONLY: # %bb.0:
1897 ; AVX1-ONLY-NEXT: subq $1384, %rsp # imm = 0x568
1898 ; AVX1-ONLY-NEXT: vmovaps 544(%rdi), %ymm2
1899 ; AVX1-ONLY-NEXT: vmovaps 512(%rdi), %ymm3
1900 ; AVX1-ONLY-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1901 ; AVX1-ONLY-NEXT: vmovaps 480(%rdi), %ymm4
1902 ; AVX1-ONLY-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1903 ; AVX1-ONLY-NEXT: vmovaps 352(%rdi), %ymm5
1904 ; AVX1-ONLY-NEXT: vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1905 ; AVX1-ONLY-NEXT: vmovaps 320(%rdi), %ymm6
1906 ; AVX1-ONLY-NEXT: vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1907 ; AVX1-ONLY-NEXT: vmovaps 288(%rdi), %ymm7
1908 ; AVX1-ONLY-NEXT: vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1909 ; AVX1-ONLY-NEXT: vmovaps 160(%rdi), %ymm8
1910 ; AVX1-ONLY-NEXT: vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1911 ; AVX1-ONLY-NEXT: vmovaps 128(%rdi), %ymm9
1912 ; AVX1-ONLY-NEXT: vmovups %ymm9, (%rsp) # 32-byte Spill
1913 ; AVX1-ONLY-NEXT: vmovaps 96(%rdi), %ymm0
1914 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1915 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm9[1],ymm0[2,3],ymm9[4],ymm0[5,6],ymm9[7]
1916 ; AVX1-ONLY-NEXT: vmovaps 112(%rdi), %xmm1
1917 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[2,1],ymm9[1,3],ymm1[6,5],ymm9[5,7]
1918 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,3],ymm1[0,2],ymm0[4,7],ymm1[4,6]
1919 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm8[2,3,0,1]
1920 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1921 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm8[1,0],ymm1[2,0],ymm8[5,4],ymm1[6,4]
1922 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,1,2,0,4,5,6,4]
1923 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
1924 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1925 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm7[0],ymm6[1],ymm7[2,3],ymm6[4],ymm7[5,6],ymm6[7]
1926 ; AVX1-ONLY-NEXT: vmovaps 304(%rdi), %xmm1
1927 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[2,1],ymm6[1,3],ymm1[6,5],ymm6[5,7]
1928 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,3],ymm1[0,2],ymm0[4,7],ymm1[4,6]
1929 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm14 = ymm5[2,3,0,1]
1930 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm5[1,0],ymm14[2,0],ymm5[5,4],ymm14[6,4]
1931 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,1,2,0,4,5,6,4]
1932 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
1933 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1934 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm4[0],ymm3[1],ymm4[2,3],ymm3[4],ymm4[5,6],ymm3[7]
1935 ; AVX1-ONLY-NEXT: vmovaps 496(%rdi), %xmm1
1936 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[2,1],ymm3[1,3],ymm1[6,5],ymm3[5,7]
1937 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,3],ymm1[0,2],ymm0[4,7],ymm1[4,6]
1938 ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1939 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm3 = ymm2[2,3,0,1]
1940 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm2[1,0],ymm3[2,0],ymm2[5,4],ymm3[6,4]
1941 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,1,2,0,4,5,6,4]
1942 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
1943 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1944 ; AVX1-ONLY-NEXT: vmovaps 704(%rdi), %ymm2
1945 ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1946 ; AVX1-ONLY-NEXT: vmovaps 688(%rdi), %xmm0
1947 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,1],ymm2[1,3],ymm0[6,5],ymm2[5,7]
1948 ; AVX1-ONLY-NEXT: vmovaps 672(%rdi), %ymm1
1949 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1950 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3],ymm2[4],ymm1[5,6],ymm2[7]
1951 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm1[0,3],ymm0[0,2],ymm1[4,7],ymm0[4,6]
1952 ; AVX1-ONLY-NEXT: vmovaps 736(%rdi), %ymm1
1953 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1954 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm15 = ymm1[2,3,0,1]
1955 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[1,0],ymm15[2,0],ymm1[5,4],ymm15[6,4]
1956 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,1,2,0,4,5,6,4]
1957 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
1958 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1959 ; AVX1-ONLY-NEXT: vmovaps 32(%rdi), %ymm7
1960 ; AVX1-ONLY-NEXT: vmovaps 16(%rdi), %xmm0
1961 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,1],ymm7[1,3],ymm0[6,5],ymm7[5,7]
1962 ; AVX1-ONLY-NEXT: vmovaps (%rdi), %ymm1
1963 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1964 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm7[1],ymm1[2,3],ymm7[4],ymm1[5,6],ymm7[7]
1965 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm1[0,3],ymm0[0,2],ymm1[4,7],ymm0[4,6]
1966 ; AVX1-ONLY-NEXT: vmovaps 64(%rdi), %ymm4
1967 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm4[2,3,0,1]
1968 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1969 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm4[1,0],ymm1[2,0],ymm4[5,4],ymm1[6,4]
1970 ; AVX1-ONLY-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1971 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,1,2,0,4,5,6,4]
1972 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
1973 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1974 ; AVX1-ONLY-NEXT: vmovaps 224(%rdi), %ymm6
1975 ; AVX1-ONLY-NEXT: vmovaps 208(%rdi), %xmm0
1976 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,1],ymm6[1,3],ymm0[6,5],ymm6[5,7]
1977 ; AVX1-ONLY-NEXT: vmovaps 192(%rdi), %ymm1
1978 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1979 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm6[1],ymm1[2,3],ymm6[4],ymm1[5,6],ymm6[7]
1980 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm1[0,3],ymm0[0,2],ymm1[4,7],ymm0[4,6]
1981 ; AVX1-ONLY-NEXT: vmovaps 256(%rdi), %ymm5
1982 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm5[2,3,0,1]
1983 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1984 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm5[1,0],ymm1[2,0],ymm5[5,4],ymm1[6,4]
1985 ; AVX1-ONLY-NEXT: vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1986 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,1,2,0,4,5,6,4]
1987 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
1988 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1989 ; AVX1-ONLY-NEXT: vmovaps 416(%rdi), %ymm12
1990 ; AVX1-ONLY-NEXT: vmovaps 400(%rdi), %xmm0
1991 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,1],ymm12[1,3],ymm0[6,5],ymm12[5,7]
1992 ; AVX1-ONLY-NEXT: vmovaps 384(%rdi), %ymm1
1993 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1994 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm12[1],ymm1[2,3],ymm12[4],ymm1[5,6],ymm12[7]
1995 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm1[0,3],ymm0[0,2],ymm1[4,7],ymm0[4,6]
1996 ; AVX1-ONLY-NEXT: vmovaps 448(%rdi), %ymm8
1997 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm9 = ymm8[2,3,0,1]
1998 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm8[1,0],ymm9[2,0],ymm8[5,4],ymm9[6,4]
1999 ; AVX1-ONLY-NEXT: vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2000 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,1,2,0,4,5,6,4]
2001 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
2002 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2003 ; AVX1-ONLY-NEXT: vmovaps 608(%rdi), %ymm10
2004 ; AVX1-ONLY-NEXT: vmovaps 592(%rdi), %xmm0
2005 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,1],ymm10[1,3],ymm0[6,5],ymm10[5,7]
2006 ; AVX1-ONLY-NEXT: vmovaps 576(%rdi), %ymm1
2007 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2008 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm10[1],ymm1[2,3],ymm10[4],ymm1[5,6],ymm10[7]
2009 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm1[0,3],ymm0[0,2],ymm1[4,7],ymm0[4,6]
2010 ; AVX1-ONLY-NEXT: vmovaps 640(%rdi), %ymm2
2011 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm11 = ymm2[2,3,0,1]
2012 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm2[1,0],ymm11[2,0],ymm2[5,4],ymm11[6,4]
2013 ; AVX1-ONLY-NEXT: vmovaps %ymm2, %ymm13
2014 ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2015 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,1,2,0,4,5,6,4]
2016 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
2017 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2018 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
2019 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2020 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0],ymm1[3,0],ymm0[6,4],ymm1[7,4]
2021 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm2 = ymm1[0,0],ymm0[2,0],ymm1[4,4],ymm0[6,4]
2022 ; AVX1-ONLY-NEXT: vmovaps 112(%rdi), %xmm0
2023 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2024 ; AVX1-ONLY-NEXT: vmovups (%rsp), %ymm1 # 32-byte Reload
2025 ; AVX1-ONLY-NEXT: vblendps $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
2026 ; AVX1-ONLY-NEXT: # ymm1 = mem[0,1],ymm1[2],mem[3,4],ymm1[5],mem[6,7]
2027 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[1,2],ymm0[0,3],ymm1[5,6],ymm0[4,7]
2028 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,2,3,1,4,6,7,5]
2029 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm2[5,6,7]
2030 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2031 ; AVX1-ONLY-NEXT: vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2032 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2033 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0],ymm14[3,0],ymm0[6,4],ymm14[7,4]
2034 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm14[0,0],ymm0[2,0],ymm14[4,4],ymm0[6,4]
2035 ; AVX1-ONLY-NEXT: vmovaps 304(%rdi), %xmm2
2036 ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2037 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
2038 ; AVX1-ONLY-NEXT: vblendps $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
2039 ; AVX1-ONLY-NEXT: # ymm1 = mem[0,1],ymm1[2],mem[3,4],ymm1[5],mem[6,7]
2040 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[1,2],ymm2[0,3],ymm1[5,6],ymm2[4,7]
2041 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,2,3,1,4,6,7,5]
2042 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5,6,7]
2043 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2044 ; AVX1-ONLY-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2045 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2046 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0],ymm3[3,0],ymm0[6,4],ymm3[7,4]
2047 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm3[0,0],ymm0[2,0],ymm3[4,4],ymm0[6,4]
2048 ; AVX1-ONLY-NEXT: vmovaps 496(%rdi), %xmm3
2049 ; AVX1-ONLY-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2050 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
2051 ; AVX1-ONLY-NEXT: vblendps $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
2052 ; AVX1-ONLY-NEXT: # ymm1 = mem[0,1],ymm1[2],mem[3,4],ymm1[5],mem[6,7]
2053 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[1,2],ymm3[0,3],ymm1[5,6],ymm3[4,7]
2054 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,2,3,1,4,6,7,5]
2055 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5,6,7]
2056 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2057 ; AVX1-ONLY-NEXT: vmovups %ymm15, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2058 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2059 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0],ymm15[3,0],ymm0[6,4],ymm15[7,4]
2060 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm15[0,0],ymm0[2,0],ymm15[4,4],ymm0[6,4]
2061 ; AVX1-ONLY-NEXT: vmovaps 688(%rdi), %xmm3
2062 ; AVX1-ONLY-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2063 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
2064 ; AVX1-ONLY-NEXT: vblendps $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
2065 ; AVX1-ONLY-NEXT: # ymm1 = mem[0,1],ymm1[2],mem[3,4],ymm1[5],mem[6,7]
2066 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[1,2],ymm3[0,3],ymm1[5,6],ymm3[4,7]
2067 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,2,3,1,4,6,7,5]
2068 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5,6,7]
2069 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2070 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
2071 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm4[2,0],ymm1[3,0],ymm4[6,4],ymm1[7,4]
2072 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,0],ymm0[2,0],ymm1[4,4],ymm0[6,4]
2073 ; AVX1-ONLY-NEXT: vmovaps 16(%rdi), %xmm4
2074 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2075 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm0[0,1],ymm7[2],ymm0[3,4],ymm7[5],ymm0[6,7]
2076 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm2 = ymm2[1,2],ymm4[0,3],ymm2[5,6],ymm4[4,7]
2077 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm2 = ymm2[0,2,3,1,4,6,7,5]
2078 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3,4],ymm1[5,6,7]
2079 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2080 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
2081 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm5[2,0],ymm2[3,0],ymm5[6,4],ymm2[7,4]
2082 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm2 = ymm2[0,0],ymm1[2,0],ymm2[4,4],ymm1[6,4]
2083 ; AVX1-ONLY-NEXT: vmovaps 208(%rdi), %xmm5
2084 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
2085 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm3 = ymm1[0,1],ymm6[2],ymm1[3,4],ymm6[5],ymm1[6,7]
2086 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm3 = ymm3[1,2],ymm5[0,3],ymm3[5,6],ymm5[4,7]
2087 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm3 = ymm3[0,2,3,1,4,6,7,5]
2088 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1,2,3,4],ymm2[5,6,7]
2089 ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2090 ; AVX1-ONLY-NEXT: vmovups %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2091 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm2 = ymm8[2,0],ymm9[3,0],ymm8[6,4],ymm9[7,4]
2092 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm3 = ymm9[0,0],ymm2[2,0],ymm9[4,4],ymm2[6,4]
2093 ; AVX1-ONLY-NEXT: vmovaps 400(%rdi), %xmm8
2094 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
2095 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm15 = ymm2[0,1],ymm12[2],ymm2[3,4],ymm12[5],ymm2[6,7]
2096 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm15 = ymm15[1,2],ymm8[0,3],ymm15[5,6],ymm8[4,7]
2097 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm15 = ymm15[0,2,3,1,4,6,7,5]
2098 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm3 = ymm15[0,1,2,3,4],ymm3[5,6,7]
2099 ; AVX1-ONLY-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2100 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm3 = ymm13[2,0],ymm11[3,0],ymm13[6,4],ymm11[7,4]
2101 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm15 = ymm11[0,0],ymm3[2,0],ymm11[4,4],ymm3[6,4]
2102 ; AVX1-ONLY-NEXT: vmovaps 592(%rdi), %xmm9
2103 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
2104 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm14 = ymm3[0,1],ymm10[2],ymm3[3,4],ymm10[5],ymm3[6,7]
2105 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm14 = ymm14[1,2],ymm9[0,3],ymm14[5,6],ymm9[4,7]
2106 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm14 = ymm14[0,2,3,1,4,6,7,5]
2107 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm13 = ymm14[0,1,2,3,4],ymm15[5,6,7]
2108 ; AVX1-ONLY-NEXT: vmovups %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2109 ; AVX1-ONLY-NEXT: vmovups (%rsp), %ymm15 # 32-byte Reload
2110 ; AVX1-ONLY-NEXT: vblendps $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm14 # 32-byte Folded Reload
2111 ; AVX1-ONLY-NEXT: # ymm14 = ymm15[0,1],mem[2],ymm15[3,4],mem[5],ymm15[6,7]
2112 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
2113 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm14 = ymm13[1,0],ymm14[2,0],ymm13[5,4],ymm14[6,4]
2114 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm14 = ymm14[2,0],ymm15[0,3],ymm14[6,4],ymm15[4,7]
2115 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
2116 ; AVX1-ONLY-NEXT: vshufps $196, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm15 # 32-byte Folded Reload
2117 ; AVX1-ONLY-NEXT: # ymm15 = ymm15[0,1],mem[0,3],ymm15[4,5],mem[4,7]
2118 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm13 = ymm14[0,1,2,3,4],ymm15[5,6,7]
2119 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm14 = ymm7[0,1],ymm0[2],ymm7[3,4],ymm0[5],ymm7[6,7]
2120 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm4[1,0],ymm14[2,0],ymm4[5,4],ymm14[6,4]
2121 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0],ymm7[0,3],ymm0[6,4],ymm7[4,7]
2122 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
2123 ; AVX1-ONLY-NEXT: vshufps $196, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm7 # 32-byte Folded Reload
2124 ; AVX1-ONLY-NEXT: # ymm7 = ymm4[0,1],mem[0,3],ymm4[4,5],mem[4,7]
2125 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm7 = ymm0[0,1,2,3,4],ymm7[5,6,7]
2126 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
2127 ; AVX1-ONLY-NEXT: vblendps $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm0 # 32-byte Folded Reload
2128 ; AVX1-ONLY-NEXT: # ymm0 = ymm14[0,1],mem[2],ymm14[3,4],mem[5],ymm14[6,7]
2129 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
2130 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm4[1,0],ymm0[2,0],ymm4[5,4],ymm0[6,4]
2131 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0],ymm14[0,3],ymm0[6,4],ymm14[4,7]
2132 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
2133 ; AVX1-ONLY-NEXT: vshufps $196, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm14 # 32-byte Folded Reload
2134 ; AVX1-ONLY-NEXT: # ymm14 = ymm4[0,1],mem[0,3],ymm4[4,5],mem[4,7]
2135 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm14 = ymm0[0,1,2,3,4],ymm14[5,6,7]
2136 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm6[0,1],ymm1[2],ymm6[3,4],ymm1[5],ymm6[6,7]
2137 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm5[1,0],ymm0[2,0],ymm5[5,4],ymm0[6,4]
2138 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0],ymm6[0,3],ymm0[6,4],ymm6[4,7]
2139 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
2140 ; AVX1-ONLY-NEXT: vshufps $196, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
2141 ; AVX1-ONLY-NEXT: # ymm1 = ymm1[0,1],mem[0,3],ymm1[4,5],mem[4,7]
2142 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
2143 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
2144 ; AVX1-ONLY-NEXT: vblendps $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm1 # 32-byte Folded Reload
2145 ; AVX1-ONLY-NEXT: # ymm1 = ymm6[0,1],mem[2],ymm6[3,4],mem[5],ymm6[6,7]
2146 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
2147 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm4[1,0],ymm1[2,0],ymm4[5,4],ymm1[6,4]
2148 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[2,0],ymm6[0,3],ymm1[6,4],ymm6[4,7]
2149 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
2150 ; AVX1-ONLY-NEXT: vshufps $196, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm6 # 32-byte Folded Reload
2151 ; AVX1-ONLY-NEXT: # ymm6 = ymm4[0,1],mem[0,3],ymm4[4,5],mem[4,7]
2152 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm6[5,6,7]
2153 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm6 = ymm12[0,1],ymm2[2],ymm12[3,4],ymm2[5],ymm12[6,7]
2154 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm2 = ymm8[1,0],ymm6[2,0],ymm8[5,4],ymm6[6,4]
2155 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm2 = ymm2[2,0],ymm12[0,3],ymm2[6,4],ymm12[4,7]
2156 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
2157 ; AVX1-ONLY-NEXT: vshufps $196, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm5 # 32-byte Folded Reload
2158 ; AVX1-ONLY-NEXT: # ymm5 = ymm4[0,1],mem[0,3],ymm4[4,5],mem[4,7]
2159 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm5[5,6,7]
2160 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
2161 ; AVX1-ONLY-NEXT: vblendps $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm5 # 32-byte Folded Reload
2162 ; AVX1-ONLY-NEXT: # ymm5 = ymm6[0,1],mem[2],ymm6[3,4],mem[5],ymm6[6,7]
2163 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
2164 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm5 = ymm4[1,0],ymm5[2,0],ymm4[5,4],ymm5[6,4]
2165 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm5 = ymm5[2,0],ymm6[0,3],ymm5[6,4],ymm6[4,7]
2166 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
2167 ; AVX1-ONLY-NEXT: vshufps $196, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm6 # 32-byte Folded Reload
2168 ; AVX1-ONLY-NEXT: # ymm6 = ymm4[0,1],mem[0,3],ymm4[4,5],mem[4,7]
2169 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3,4],ymm6[5,6,7]
2170 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm6 = ymm10[0,1],ymm3[2],ymm10[3,4],ymm3[5],ymm10[6,7]
2171 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm3 = ymm9[1,0],ymm6[2,0],ymm9[5,4],ymm6[6,4]
2172 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm3 = ymm3[2,0],ymm10[0,3],ymm3[6,4],ymm10[4,7]
2173 ; AVX1-ONLY-NEXT: vshufps $196, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm4 # 32-byte Folded Reload
2174 ; AVX1-ONLY-NEXT: # ymm4 = ymm11[0,1],mem[0,3],ymm11[4,5],mem[4,7]
2175 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3,4],ymm4[5,6,7]
2176 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
2177 ; AVX1-ONLY-NEXT: vmovaps %ymm4, 192(%rsi)
2178 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
2179 ; AVX1-ONLY-NEXT: vmovaps %ymm4, 128(%rsi)
2180 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
2181 ; AVX1-ONLY-NEXT: vmovaps %ymm4, 64(%rsi)
2182 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
2183 ; AVX1-ONLY-NEXT: vmovaps %ymm4, (%rsi)
2184 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
2185 ; AVX1-ONLY-NEXT: vmovaps %ymm4, 224(%rsi)
2186 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
2187 ; AVX1-ONLY-NEXT: vmovaps %ymm4, 160(%rsi)
2188 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
2189 ; AVX1-ONLY-NEXT: vmovaps %ymm4, 96(%rsi)
2190 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
2191 ; AVX1-ONLY-NEXT: vmovaps %ymm4, 32(%rsi)
2192 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
2193 ; AVX1-ONLY-NEXT: vmovaps %ymm4, 192(%rdx)
2194 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
2195 ; AVX1-ONLY-NEXT: vmovaps %ymm4, 128(%rdx)
2196 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
2197 ; AVX1-ONLY-NEXT: vmovaps %ymm4, 64(%rdx)
2198 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
2199 ; AVX1-ONLY-NEXT: vmovaps %ymm4, (%rdx)
2200 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
2201 ; AVX1-ONLY-NEXT: vmovaps %ymm4, 224(%rdx)
2202 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
2203 ; AVX1-ONLY-NEXT: vmovaps %ymm4, 160(%rdx)
2204 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
2205 ; AVX1-ONLY-NEXT: vmovaps %ymm4, 96(%rdx)
2206 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
2207 ; AVX1-ONLY-NEXT: vmovaps %ymm4, 32(%rdx)
2208 ; AVX1-ONLY-NEXT: vmovaps %ymm3, 192(%rcx)
2209 ; AVX1-ONLY-NEXT: vmovaps %ymm5, 224(%rcx)
2210 ; AVX1-ONLY-NEXT: vmovaps %ymm2, 128(%rcx)
2211 ; AVX1-ONLY-NEXT: vmovaps %ymm1, 160(%rcx)
2212 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 64(%rcx)
2213 ; AVX1-ONLY-NEXT: vmovaps %ymm14, 96(%rcx)
2214 ; AVX1-ONLY-NEXT: vmovaps %ymm7, (%rcx)
2215 ; AVX1-ONLY-NEXT: vmovaps %ymm13, 32(%rcx)
2216 ; AVX1-ONLY-NEXT: addq $1384, %rsp # imm = 0x568
2217 ; AVX1-ONLY-NEXT: vzeroupper
2218 ; AVX1-ONLY-NEXT: retq
2220 ; AVX2-SLOW-LABEL: load_i32_stride3_vf64:
2221 ; AVX2-SLOW: # %bb.0:
2222 ; AVX2-SLOW-NEXT: subq $1032, %rsp # imm = 0x408
2223 ; AVX2-SLOW-NEXT: vmovaps 704(%rdi), %ymm3
2224 ; AVX2-SLOW-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2225 ; AVX2-SLOW-NEXT: vmovaps 512(%rdi), %ymm4
2226 ; AVX2-SLOW-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2227 ; AVX2-SLOW-NEXT: vmovaps 480(%rdi), %ymm6
2228 ; AVX2-SLOW-NEXT: vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2229 ; AVX2-SLOW-NEXT: vmovaps 544(%rdi), %ymm5
2230 ; AVX2-SLOW-NEXT: vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2231 ; AVX2-SLOW-NEXT: vmovaps 320(%rdi), %ymm7
2232 ; AVX2-SLOW-NEXT: vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2233 ; AVX2-SLOW-NEXT: vmovaps 288(%rdi), %ymm10
2234 ; AVX2-SLOW-NEXT: vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2235 ; AVX2-SLOW-NEXT: vmovaps 352(%rdi), %ymm9
2236 ; AVX2-SLOW-NEXT: vmovups %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2237 ; AVX2-SLOW-NEXT: vmovaps 128(%rdi), %ymm13
2238 ; AVX2-SLOW-NEXT: vmovaps 96(%rdi), %ymm14
2239 ; AVX2-SLOW-NEXT: vmovaps 160(%rdi), %ymm1
2240 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2241 ; AVX2-SLOW-NEXT: vbroadcastsd {{.*#+}} ymm0 = [2,5,2,5,2,5,2,5]
2242 ; AVX2-SLOW-NEXT: vpermps %ymm1, %ymm0, %ymm1
2243 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm14[0],ymm13[1],ymm14[2,3],ymm13[4],ymm14[5,6],ymm13[7]
2244 ; AVX2-SLOW-NEXT: vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2245 ; AVX2-SLOW-NEXT: vmovups %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2246 ; AVX2-SLOW-NEXT: vmovaps {{.*#+}} ymm8 = [0,3,6,1,4,7,u,u]
2247 ; AVX2-SLOW-NEXT: vpermps %ymm2, %ymm8, %ymm2
2248 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3,4,5],ymm1[6,7]
2249 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2250 ; AVX2-SLOW-NEXT: vpermps %ymm9, %ymm0, %ymm1
2251 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm10[0],ymm7[1],ymm10[2,3],ymm7[4],ymm10[5,6],ymm7[7]
2252 ; AVX2-SLOW-NEXT: vpermps %ymm2, %ymm8, %ymm2
2253 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3,4,5],ymm1[6,7]
2254 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2255 ; AVX2-SLOW-NEXT: vpermps %ymm5, %ymm0, %ymm1
2256 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm6[0],ymm4[1],ymm6[2,3],ymm4[4],ymm6[5,6],ymm4[7]
2257 ; AVX2-SLOW-NEXT: vpermps %ymm2, %ymm8, %ymm2
2258 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3,4,5],ymm1[6,7]
2259 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2260 ; AVX2-SLOW-NEXT: vmovaps 672(%rdi), %ymm1
2261 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2262 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm3[1],ymm1[2,3],ymm3[4],ymm1[5,6],ymm3[7]
2263 ; AVX2-SLOW-NEXT: vpermps %ymm1, %ymm8, %ymm1
2264 ; AVX2-SLOW-NEXT: vmovaps 736(%rdi), %ymm2
2265 ; AVX2-SLOW-NEXT: vmovups %ymm2, (%rsp) # 32-byte Spill
2266 ; AVX2-SLOW-NEXT: vpermps %ymm2, %ymm0, %ymm2
2267 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm2[6,7]
2268 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2269 ; AVX2-SLOW-NEXT: vmovaps (%rdi), %ymm1
2270 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2271 ; AVX2-SLOW-NEXT: vmovaps 32(%rdi), %ymm3
2272 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm3[1],ymm1[2,3],ymm3[4],ymm1[5,6],ymm3[7]
2273 ; AVX2-SLOW-NEXT: vmovaps %ymm3, %ymm5
2274 ; AVX2-SLOW-NEXT: vpermps %ymm1, %ymm8, %ymm1
2275 ; AVX2-SLOW-NEXT: vmovaps 64(%rdi), %ymm2
2276 ; AVX2-SLOW-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2277 ; AVX2-SLOW-NEXT: vpermps %ymm2, %ymm0, %ymm4
2278 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm4[6,7]
2279 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2280 ; AVX2-SLOW-NEXT: vmovaps 224(%rdi), %ymm7
2281 ; AVX2-SLOW-NEXT: vmovaps 192(%rdi), %ymm15
2282 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm15[0],ymm7[1],ymm15[2,3],ymm7[4],ymm15[5,6],ymm7[7]
2283 ; AVX2-SLOW-NEXT: vmovups %ymm15, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2284 ; AVX2-SLOW-NEXT: vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2285 ; AVX2-SLOW-NEXT: vpermps %ymm1, %ymm8, %ymm1
2286 ; AVX2-SLOW-NEXT: vmovaps 256(%rdi), %ymm2
2287 ; AVX2-SLOW-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2288 ; AVX2-SLOW-NEXT: vpermps %ymm2, %ymm0, %ymm6
2289 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm6[6,7]
2290 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2291 ; AVX2-SLOW-NEXT: vmovaps 416(%rdi), %ymm1
2292 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2293 ; AVX2-SLOW-NEXT: vmovaps 384(%rdi), %ymm3
2294 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm3[0],ymm1[1],ymm3[2,3],ymm1[4],ymm3[5,6],ymm1[7]
2295 ; AVX2-SLOW-NEXT: vmovaps %ymm3, %ymm6
2296 ; AVX2-SLOW-NEXT: vpermps %ymm1, %ymm8, %ymm1
2297 ; AVX2-SLOW-NEXT: vmovaps 448(%rdi), %ymm2
2298 ; AVX2-SLOW-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2299 ; AVX2-SLOW-NEXT: vpermps %ymm2, %ymm0, %ymm12
2300 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm12[6,7]
2301 ; AVX2-SLOW-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2302 ; AVX2-SLOW-NEXT: vmovaps 608(%rdi), %ymm9
2303 ; AVX2-SLOW-NEXT: vmovaps 576(%rdi), %ymm11
2304 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm11[0],ymm9[1],ymm11[2,3],ymm9[4],ymm11[5,6],ymm9[7]
2305 ; AVX2-SLOW-NEXT: vmovups %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2306 ; AVX2-SLOW-NEXT: vpermps %ymm1, %ymm8, %ymm1
2307 ; AVX2-SLOW-NEXT: vmovaps 640(%rdi), %ymm2
2308 ; AVX2-SLOW-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2309 ; AVX2-SLOW-NEXT: vpermps %ymm2, %ymm0, %ymm0
2310 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
2311 ; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2312 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm14[0,1],ymm13[2],ymm14[3,4],ymm13[5],ymm14[6,7]
2313 ; AVX2-SLOW-NEXT: vmovaps {{.*#+}} ymm1 = [1,4,7,2,5,u,u,u]
2314 ; AVX2-SLOW-NEXT: vpermps %ymm0, %ymm1, %ymm2
2315 ; AVX2-SLOW-NEXT: vbroadcastf128 {{.*#+}} ymm0 = [0,0,3,6,0,0,3,6]
2316 ; AVX2-SLOW-NEXT: # ymm0 = mem[0,1,0,1]
2317 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
2318 ; AVX2-SLOW-NEXT: vpermps %ymm10, %ymm0, %ymm14
2319 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm14[5,6,7]
2320 ; AVX2-SLOW-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2321 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
2322 ; AVX2-SLOW-NEXT: vblendps $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm2 # 32-byte Folded Reload
2323 ; AVX2-SLOW-NEXT: # ymm2 = mem[0,1],ymm12[2],mem[3,4],ymm12[5],mem[6,7]
2324 ; AVX2-SLOW-NEXT: vpermps %ymm2, %ymm1, %ymm2
2325 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
2326 ; AVX2-SLOW-NEXT: vpermps %ymm13, %ymm0, %ymm14
2327 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm14[5,6,7]
2328 ; AVX2-SLOW-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2329 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
2330 ; AVX2-SLOW-NEXT: vblendps $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
2331 ; AVX2-SLOW-NEXT: # ymm2 = mem[0,1],ymm2[2],mem[3,4],ymm2[5],mem[6,7]
2332 ; AVX2-SLOW-NEXT: vpermps %ymm2, %ymm1, %ymm2
2333 ; AVX2-SLOW-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm14 # 32-byte Folded Reload
2334 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm14[5,6,7]
2335 ; AVX2-SLOW-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2336 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
2337 ; AVX2-SLOW-NEXT: vblendps $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
2338 ; AVX2-SLOW-NEXT: # ymm2 = mem[0,1],ymm2[2],mem[3,4],ymm2[5],mem[6,7]
2339 ; AVX2-SLOW-NEXT: vpermps %ymm2, %ymm1, %ymm2
2340 ; AVX2-SLOW-NEXT: vpermps (%rsp), %ymm0, %ymm14 # 32-byte Folded Reload
2341 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm14[5,6,7]
2342 ; AVX2-SLOW-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2343 ; AVX2-SLOW-NEXT: vmovaps %ymm5, %ymm4
2344 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
2345 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1],ymm5[2],ymm3[3,4],ymm5[5],ymm3[6,7]
2346 ; AVX2-SLOW-NEXT: vpermps %ymm2, %ymm1, %ymm2
2347 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
2348 ; AVX2-SLOW-NEXT: vpermps %ymm5, %ymm0, %ymm14
2349 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm14[5,6,7]
2350 ; AVX2-SLOW-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2351 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm15[0,1],ymm7[2],ymm15[3,4],ymm7[5],ymm15[6,7]
2352 ; AVX2-SLOW-NEXT: vpermps %ymm2, %ymm1, %ymm2
2353 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
2354 ; AVX2-SLOW-NEXT: vpermps %ymm15, %ymm0, %ymm14
2355 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm14[5,6,7]
2356 ; AVX2-SLOW-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2357 ; AVX2-SLOW-NEXT: vmovaps %ymm6, %ymm7
2358 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
2359 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm7[0,1],ymm6[2],ymm7[3,4],ymm6[5],ymm7[6,7]
2360 ; AVX2-SLOW-NEXT: vpermps %ymm2, %ymm1, %ymm2
2361 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
2362 ; AVX2-SLOW-NEXT: vpermps %ymm8, %ymm0, %ymm14
2363 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm14[5,6,7]
2364 ; AVX2-SLOW-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2365 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm11[0,1],ymm9[2],ymm11[3,4],ymm9[5],ymm11[6,7]
2366 ; AVX2-SLOW-NEXT: vpermps %ymm2, %ymm1, %ymm1
2367 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
2368 ; AVX2-SLOW-NEXT: vpermps %ymm9, %ymm0, %ymm0
2369 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5,6,7]
2370 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
2371 ; AVX2-SLOW-NEXT: vblendps $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
2372 ; AVX2-SLOW-NEXT: # ymm1 = mem[0,1],ymm1[2],mem[3,4],ymm1[5],mem[6,7]
2373 ; AVX2-SLOW-NEXT: vmovaps {{.*#+}} ymm14 = [2,5,0,3,6,u,u,u]
2374 ; AVX2-SLOW-NEXT: vpermps %ymm1, %ymm14, %ymm1
2375 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm2 = ymm10[0,1,0,3,4,5,4,7]
2376 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,1,0,3]
2377 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm10 = ymm1[0,1,2,3,4],ymm2[5,6,7]
2378 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm4[0,1],ymm3[2],ymm4[3,4],ymm3[5],ymm4[6,7]
2379 ; AVX2-SLOW-NEXT: vpermps %ymm1, %ymm14, %ymm1
2380 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm2 = ymm5[0,1,0,3,4,5,4,7]
2381 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,1,0,3]
2382 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5,6,7]
2383 ; AVX2-SLOW-NEXT: vblendps $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm2 # 32-byte Folded Reload
2384 ; AVX2-SLOW-NEXT: # ymm2 = ymm12[0,1],mem[2],ymm12[3,4],mem[5],ymm12[6,7]
2385 ; AVX2-SLOW-NEXT: vpermps %ymm2, %ymm14, %ymm2
2386 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm3 = ymm13[0,1,0,3,4,5,4,7]
2387 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[0,1,0,3]
2388 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm3 = ymm2[0,1,2,3,4],ymm3[5,6,7]
2389 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
2390 ; AVX2-SLOW-NEXT: vblendps $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
2391 ; AVX2-SLOW-NEXT: # ymm2 = mem[0,1],ymm2[2],mem[3,4],ymm2[5],mem[6,7]
2392 ; AVX2-SLOW-NEXT: vpermps %ymm2, %ymm14, %ymm2
2393 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm4 = ymm15[0,1,0,3,4,5,4,7]
2394 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,1,0,3]
2395 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm4[5,6,7]
2396 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
2397 ; AVX2-SLOW-NEXT: vblendps $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm4 # 32-byte Folded Reload
2398 ; AVX2-SLOW-NEXT: # ymm4 = ymm4[0,1],mem[2],ymm4[3,4],mem[5],ymm4[6,7]
2399 ; AVX2-SLOW-NEXT: vpermps %ymm4, %ymm14, %ymm4
2400 ; AVX2-SLOW-NEXT: vpermilps $196, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload
2401 ; AVX2-SLOW-NEXT: # ymm5 = mem[0,1,0,3,4,5,4,7]
2402 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,1,0,3]
2403 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3,4],ymm5[5,6,7]
2404 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm5 = ymm6[0,1],ymm7[2],ymm6[3,4],ymm7[5],ymm6[6,7]
2405 ; AVX2-SLOW-NEXT: vpermps %ymm5, %ymm14, %ymm5
2406 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm6 = ymm8[0,1,0,3,4,5,4,7]
2407 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[0,1,0,3]
2408 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3,4],ymm6[5,6,7]
2409 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
2410 ; AVX2-SLOW-NEXT: vblendps $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm6 # 32-byte Folded Reload
2411 ; AVX2-SLOW-NEXT: # ymm6 = ymm6[0,1],mem[2],ymm6[3,4],mem[5],ymm6[6,7]
2412 ; AVX2-SLOW-NEXT: vpermps %ymm6, %ymm14, %ymm6
2413 ; AVX2-SLOW-NEXT: vpermilps $196, (%rsp), %ymm7 # 32-byte Folded Reload
2414 ; AVX2-SLOW-NEXT: # ymm7 = mem[0,1,0,3,4,5,4,7]
2415 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm7 = ymm7[0,1,0,3]
2416 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3,4],ymm7[5,6,7]
2417 ; AVX2-SLOW-NEXT: vblendps $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm7 # 32-byte Folded Reload
2418 ; AVX2-SLOW-NEXT: # ymm7 = mem[0,1],ymm11[2],mem[3,4],ymm11[5],mem[6,7]
2419 ; AVX2-SLOW-NEXT: vpermps %ymm7, %ymm14, %ymm7
2420 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm8 = ymm9[0,1,0,3,4,5,4,7]
2421 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm8 = ymm8[0,1,0,3]
2422 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3,4],ymm8[5,6,7]
2423 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
2424 ; AVX2-SLOW-NEXT: vmovaps %ymm8, 192(%rsi)
2425 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
2426 ; AVX2-SLOW-NEXT: vmovaps %ymm8, 128(%rsi)
2427 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
2428 ; AVX2-SLOW-NEXT: vmovaps %ymm8, 64(%rsi)
2429 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
2430 ; AVX2-SLOW-NEXT: vmovaps %ymm8, (%rsi)
2431 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
2432 ; AVX2-SLOW-NEXT: vmovaps %ymm8, 224(%rsi)
2433 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
2434 ; AVX2-SLOW-NEXT: vmovaps %ymm8, 160(%rsi)
2435 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
2436 ; AVX2-SLOW-NEXT: vmovaps %ymm8, 96(%rsi)
2437 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
2438 ; AVX2-SLOW-NEXT: vmovaps %ymm8, 32(%rsi)
2439 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 192(%rdx)
2440 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2441 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 128(%rdx)
2442 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2443 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 64(%rdx)
2444 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2445 ; AVX2-SLOW-NEXT: vmovaps %ymm0, (%rdx)
2446 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2447 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 224(%rdx)
2448 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2449 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 160(%rdx)
2450 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2451 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 96(%rdx)
2452 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2453 ; AVX2-SLOW-NEXT: vmovaps %ymm0, 32(%rdx)
2454 ; AVX2-SLOW-NEXT: vmovaps %ymm7, 192(%rcx)
2455 ; AVX2-SLOW-NEXT: vmovaps %ymm6, 224(%rcx)
2456 ; AVX2-SLOW-NEXT: vmovaps %ymm5, 128(%rcx)
2457 ; AVX2-SLOW-NEXT: vmovaps %ymm4, 160(%rcx)
2458 ; AVX2-SLOW-NEXT: vmovaps %ymm2, 64(%rcx)
2459 ; AVX2-SLOW-NEXT: vmovaps %ymm3, 96(%rcx)
2460 ; AVX2-SLOW-NEXT: vmovaps %ymm1, (%rcx)
2461 ; AVX2-SLOW-NEXT: vmovaps %ymm10, 32(%rcx)
2462 ; AVX2-SLOW-NEXT: addq $1032, %rsp # imm = 0x408
2463 ; AVX2-SLOW-NEXT: vzeroupper
2464 ; AVX2-SLOW-NEXT: retq
2466 ; AVX2-FAST-LABEL: load_i32_stride3_vf64:
2467 ; AVX2-FAST: # %bb.0:
2468 ; AVX2-FAST-NEXT: subq $1032, %rsp # imm = 0x408
2469 ; AVX2-FAST-NEXT: vmovaps 704(%rdi), %ymm3
2470 ; AVX2-FAST-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2471 ; AVX2-FAST-NEXT: vmovaps 512(%rdi), %ymm4
2472 ; AVX2-FAST-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2473 ; AVX2-FAST-NEXT: vmovaps 480(%rdi), %ymm5
2474 ; AVX2-FAST-NEXT: vmovups %ymm5, (%rsp) # 32-byte Spill
2475 ; AVX2-FAST-NEXT: vmovaps 544(%rdi), %ymm7
2476 ; AVX2-FAST-NEXT: vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2477 ; AVX2-FAST-NEXT: vmovaps 320(%rdi), %ymm8
2478 ; AVX2-FAST-NEXT: vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2479 ; AVX2-FAST-NEXT: vmovaps 288(%rdi), %ymm9
2480 ; AVX2-FAST-NEXT: vmovups %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2481 ; AVX2-FAST-NEXT: vmovaps 352(%rdi), %ymm10
2482 ; AVX2-FAST-NEXT: vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2483 ; AVX2-FAST-NEXT: vmovaps 128(%rdi), %ymm11
2484 ; AVX2-FAST-NEXT: vmovaps 96(%rdi), %ymm14
2485 ; AVX2-FAST-NEXT: vmovaps 160(%rdi), %ymm1
2486 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2487 ; AVX2-FAST-NEXT: vbroadcastsd {{.*#+}} ymm0 = [2,5,2,5,2,5,2,5]
2488 ; AVX2-FAST-NEXT: vpermps %ymm1, %ymm0, %ymm1
2489 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm14[0],ymm11[1],ymm14[2,3],ymm11[4],ymm14[5,6],ymm11[7]
2490 ; AVX2-FAST-NEXT: vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2491 ; AVX2-FAST-NEXT: vmovups %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2492 ; AVX2-FAST-NEXT: vmovaps {{.*#+}} ymm6 = [0,3,6,1,4,7,u,u]
2493 ; AVX2-FAST-NEXT: vpermps %ymm2, %ymm6, %ymm2
2494 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3,4,5],ymm1[6,7]
2495 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2496 ; AVX2-FAST-NEXT: vpermps %ymm10, %ymm0, %ymm1
2497 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm9[0],ymm8[1],ymm9[2,3],ymm8[4],ymm9[5,6],ymm8[7]
2498 ; AVX2-FAST-NEXT: vpermps %ymm2, %ymm6, %ymm2
2499 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3,4,5],ymm1[6,7]
2500 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2501 ; AVX2-FAST-NEXT: vpermps %ymm7, %ymm0, %ymm1
2502 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm5[0],ymm4[1],ymm5[2,3],ymm4[4],ymm5[5,6],ymm4[7]
2503 ; AVX2-FAST-NEXT: vpermps %ymm2, %ymm6, %ymm2
2504 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3,4,5],ymm1[6,7]
2505 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2506 ; AVX2-FAST-NEXT: vmovaps 672(%rdi), %ymm1
2507 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2508 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm3[1],ymm1[2,3],ymm3[4],ymm1[5,6],ymm3[7]
2509 ; AVX2-FAST-NEXT: vpermps %ymm1, %ymm6, %ymm1
2510 ; AVX2-FAST-NEXT: vmovaps 736(%rdi), %ymm2
2511 ; AVX2-FAST-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2512 ; AVX2-FAST-NEXT: vpermps %ymm2, %ymm0, %ymm2
2513 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm2[6,7]
2514 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2515 ; AVX2-FAST-NEXT: vmovaps (%rdi), %ymm4
2516 ; AVX2-FAST-NEXT: vmovaps 32(%rdi), %ymm5
2517 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm4[0],ymm5[1],ymm4[2,3],ymm5[4],ymm4[5,6],ymm5[7]
2518 ; AVX2-FAST-NEXT: vpermps %ymm1, %ymm6, %ymm1
2519 ; AVX2-FAST-NEXT: vmovaps 64(%rdi), %ymm2
2520 ; AVX2-FAST-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2521 ; AVX2-FAST-NEXT: vpermps %ymm2, %ymm0, %ymm3
2522 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm3[6,7]
2523 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2524 ; AVX2-FAST-NEXT: vmovaps 224(%rdi), %ymm15
2525 ; AVX2-FAST-NEXT: vmovaps 192(%rdi), %ymm1
2526 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2527 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm15[1],ymm1[2,3],ymm15[4],ymm1[5,6],ymm15[7]
2528 ; AVX2-FAST-NEXT: vmovups %ymm15, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2529 ; AVX2-FAST-NEXT: vpermps %ymm1, %ymm6, %ymm1
2530 ; AVX2-FAST-NEXT: vmovaps 256(%rdi), %ymm2
2531 ; AVX2-FAST-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2532 ; AVX2-FAST-NEXT: vpermps %ymm2, %ymm0, %ymm7
2533 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm7[6,7]
2534 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2535 ; AVX2-FAST-NEXT: vmovaps 416(%rdi), %ymm1
2536 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2537 ; AVX2-FAST-NEXT: vmovaps 384(%rdi), %ymm7
2538 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm7[0],ymm1[1],ymm7[2,3],ymm1[4],ymm7[5,6],ymm1[7]
2539 ; AVX2-FAST-NEXT: vpermps %ymm1, %ymm6, %ymm1
2540 ; AVX2-FAST-NEXT: vmovaps 448(%rdi), %ymm10
2541 ; AVX2-FAST-NEXT: vpermps %ymm10, %ymm0, %ymm12
2542 ; AVX2-FAST-NEXT: vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2543 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm12[6,7]
2544 ; AVX2-FAST-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2545 ; AVX2-FAST-NEXT: vmovaps 608(%rdi), %ymm13
2546 ; AVX2-FAST-NEXT: vmovaps 576(%rdi), %ymm9
2547 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm9[0],ymm13[1],ymm9[2,3],ymm13[4],ymm9[5,6],ymm13[7]
2548 ; AVX2-FAST-NEXT: vmovups %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2549 ; AVX2-FAST-NEXT: vpermps %ymm1, %ymm6, %ymm1
2550 ; AVX2-FAST-NEXT: vmovaps 640(%rdi), %ymm2
2551 ; AVX2-FAST-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2552 ; AVX2-FAST-NEXT: vpermps %ymm2, %ymm0, %ymm0
2553 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
2554 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2555 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm14[0,1],ymm11[2],ymm14[3,4],ymm11[5],ymm14[6,7]
2556 ; AVX2-FAST-NEXT: vmovaps {{.*#+}} ymm1 = [1,4,7,2,5,u,u,u]
2557 ; AVX2-FAST-NEXT: vpermps %ymm0, %ymm1, %ymm2
2558 ; AVX2-FAST-NEXT: vbroadcastf128 {{.*#+}} ymm0 = [0,0,3,6,0,0,3,6]
2559 ; AVX2-FAST-NEXT: # ymm0 = mem[0,1,0,1]
2560 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
2561 ; AVX2-FAST-NEXT: vpermps %ymm8, %ymm0, %ymm14
2562 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm14[5,6,7]
2563 ; AVX2-FAST-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2564 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
2565 ; AVX2-FAST-NEXT: vblendps $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm2 # 32-byte Folded Reload
2566 ; AVX2-FAST-NEXT: # ymm2 = mem[0,1],ymm12[2],mem[3,4],ymm12[5],mem[6,7]
2567 ; AVX2-FAST-NEXT: vpermps %ymm2, %ymm1, %ymm2
2568 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
2569 ; AVX2-FAST-NEXT: vpermps %ymm11, %ymm0, %ymm14
2570 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm14[5,6,7]
2571 ; AVX2-FAST-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2572 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
2573 ; AVX2-FAST-NEXT: vblendps $219, (%rsp), %ymm2, %ymm2 # 32-byte Folded Reload
2574 ; AVX2-FAST-NEXT: # ymm2 = mem[0,1],ymm2[2],mem[3,4],ymm2[5],mem[6,7]
2575 ; AVX2-FAST-NEXT: vpermps %ymm2, %ymm1, %ymm2
2576 ; AVX2-FAST-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm14 # 32-byte Folded Reload
2577 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm14[5,6,7]
2578 ; AVX2-FAST-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2579 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
2580 ; AVX2-FAST-NEXT: vblendps $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
2581 ; AVX2-FAST-NEXT: # ymm2 = mem[0,1],ymm2[2],mem[3,4],ymm2[5],mem[6,7]
2582 ; AVX2-FAST-NEXT: vpermps %ymm2, %ymm1, %ymm2
2583 ; AVX2-FAST-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm14 # 32-byte Folded Reload
2584 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm14[5,6,7]
2585 ; AVX2-FAST-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2586 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm4[0,1],ymm5[2],ymm4[3,4],ymm5[5],ymm4[6,7]
2587 ; AVX2-FAST-NEXT: vpermps %ymm2, %ymm1, %ymm2
2588 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
2589 ; AVX2-FAST-NEXT: vpermps %ymm3, %ymm0, %ymm14
2590 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm14[5,6,7]
2591 ; AVX2-FAST-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2592 ; AVX2-FAST-NEXT: vblendps $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm2 # 32-byte Folded Reload
2593 ; AVX2-FAST-NEXT: # ymm2 = mem[0,1],ymm15[2],mem[3,4],ymm15[5],mem[6,7]
2594 ; AVX2-FAST-NEXT: vpermps %ymm2, %ymm1, %ymm2
2595 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
2596 ; AVX2-FAST-NEXT: vpermps %ymm6, %ymm0, %ymm14
2597 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm14[5,6,7]
2598 ; AVX2-FAST-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2599 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
2600 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm7[0,1],ymm15[2],ymm7[3,4],ymm15[5],ymm7[6,7]
2601 ; AVX2-FAST-NEXT: vpermps %ymm2, %ymm1, %ymm2
2602 ; AVX2-FAST-NEXT: vpermps %ymm10, %ymm0, %ymm14
2603 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm14[5,6,7]
2604 ; AVX2-FAST-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2605 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm9[0,1],ymm13[2],ymm9[3,4],ymm13[5],ymm9[6,7]
2606 ; AVX2-FAST-NEXT: vmovaps %ymm9, %ymm13
2607 ; AVX2-FAST-NEXT: vpermps %ymm2, %ymm1, %ymm1
2608 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
2609 ; AVX2-FAST-NEXT: vpermps %ymm9, %ymm0, %ymm0
2610 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5,6,7]
2611 ; AVX2-FAST-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2612 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2613 ; AVX2-FAST-NEXT: vblendps $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm2 # 32-byte Folded Reload
2614 ; AVX2-FAST-NEXT: # ymm2 = mem[0,1],ymm0[2],mem[3,4],ymm0[5],mem[6,7]
2615 ; AVX2-FAST-NEXT: vmovaps {{.*#+}} ymm14 = [0,1,0,3,0,1,4,7]
2616 ; AVX2-FAST-NEXT: vpermps %ymm8, %ymm14, %ymm0
2617 ; AVX2-FAST-NEXT: vmovaps {{.*#+}} ymm1 = [2,5,0,3,6,u,u,u]
2618 ; AVX2-FAST-NEXT: vpermps %ymm2, %ymm1, %ymm2
2619 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm10 = ymm2[0,1,2,3,4],ymm0[5,6,7]
2620 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm5[0,1],ymm4[2],ymm5[3,4],ymm4[5],ymm5[6,7]
2621 ; AVX2-FAST-NEXT: vpermps %ymm3, %ymm14, %ymm2
2622 ; AVX2-FAST-NEXT: vpermps %ymm0, %ymm1, %ymm0
2623 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm5 = ymm0[0,1,2,3,4],ymm2[5,6,7]
2624 ; AVX2-FAST-NEXT: vblendps $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm0 # 32-byte Folded Reload
2625 ; AVX2-FAST-NEXT: # ymm0 = ymm12[0,1],mem[2],ymm12[3,4],mem[5],ymm12[6,7]
2626 ; AVX2-FAST-NEXT: vpermps %ymm11, %ymm14, %ymm2
2627 ; AVX2-FAST-NEXT: vpermps %ymm0, %ymm1, %ymm0
2628 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm11 = ymm0[0,1,2,3,4],ymm2[5,6,7]
2629 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2630 ; AVX2-FAST-NEXT: vblendps $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
2631 ; AVX2-FAST-NEXT: # ymm0 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
2632 ; AVX2-FAST-NEXT: vpermps %ymm6, %ymm14, %ymm2
2633 ; AVX2-FAST-NEXT: vpermps %ymm0, %ymm1, %ymm0
2634 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm0[0,1,2,3,4],ymm2[5,6,7]
2635 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2636 ; AVX2-FAST-NEXT: vblendps $36, (%rsp), %ymm0, %ymm0 # 32-byte Folded Reload
2637 ; AVX2-FAST-NEXT: # ymm0 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
2638 ; AVX2-FAST-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm3 # 32-byte Folded Reload
2639 ; AVX2-FAST-NEXT: vpermps %ymm0, %ymm1, %ymm0
2640 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm3 = ymm0[0,1,2,3,4],ymm3[5,6,7]
2641 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm15[0,1],ymm7[2],ymm15[3,4],ymm7[5],ymm15[6,7]
2642 ; AVX2-FAST-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm7 # 32-byte Folded Reload
2643 ; AVX2-FAST-NEXT: vpermps %ymm0, %ymm1, %ymm0
2644 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm7[5,6,7]
2645 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
2646 ; AVX2-FAST-NEXT: vblendps $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm7 # 32-byte Folded Reload
2647 ; AVX2-FAST-NEXT: # ymm7 = ymm6[0,1],mem[2],ymm6[3,4],mem[5],ymm6[6,7]
2648 ; AVX2-FAST-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm8 # 32-byte Folded Reload
2649 ; AVX2-FAST-NEXT: vpermps %ymm7, %ymm1, %ymm7
2650 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3,4],ymm8[5,6,7]
2651 ; AVX2-FAST-NEXT: vpermps %ymm9, %ymm14, %ymm6
2652 ; AVX2-FAST-NEXT: vblendps $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm8 # 32-byte Folded Reload
2653 ; AVX2-FAST-NEXT: # ymm8 = mem[0,1],ymm13[2],mem[3,4],ymm13[5],mem[6,7]
2654 ; AVX2-FAST-NEXT: vpermps %ymm8, %ymm1, %ymm1
2655 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm6[5,6,7]
2656 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
2657 ; AVX2-FAST-NEXT: vmovaps %ymm4, 192(%rsi)
2658 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
2659 ; AVX2-FAST-NEXT: vmovaps %ymm4, 128(%rsi)
2660 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
2661 ; AVX2-FAST-NEXT: vmovaps %ymm4, 64(%rsi)
2662 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
2663 ; AVX2-FAST-NEXT: vmovaps %ymm6, (%rsi)
2664 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
2665 ; AVX2-FAST-NEXT: vmovaps %ymm6, 224(%rsi)
2666 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
2667 ; AVX2-FAST-NEXT: vmovaps %ymm6, 160(%rsi)
2668 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
2669 ; AVX2-FAST-NEXT: vmovaps %ymm6, 96(%rsi)
2670 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
2671 ; AVX2-FAST-NEXT: vmovaps %ymm6, 32(%rsi)
2672 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
2673 ; AVX2-FAST-NEXT: vmovaps %ymm4, 192(%rdx)
2674 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
2675 ; AVX2-FAST-NEXT: vmovaps %ymm4, 128(%rdx)
2676 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
2677 ; AVX2-FAST-NEXT: vmovaps %ymm4, 64(%rdx)
2678 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
2679 ; AVX2-FAST-NEXT: vmovaps %ymm4, (%rdx)
2680 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
2681 ; AVX2-FAST-NEXT: vmovaps %ymm4, 224(%rdx)
2682 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
2683 ; AVX2-FAST-NEXT: vmovaps %ymm4, 160(%rdx)
2684 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
2685 ; AVX2-FAST-NEXT: vmovaps %ymm4, 96(%rdx)
2686 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
2687 ; AVX2-FAST-NEXT: vmovaps %ymm4, 32(%rdx)
2688 ; AVX2-FAST-NEXT: vmovaps %ymm1, 192(%rcx)
2689 ; AVX2-FAST-NEXT: vmovaps %ymm7, 224(%rcx)
2690 ; AVX2-FAST-NEXT: vmovaps %ymm0, 128(%rcx)
2691 ; AVX2-FAST-NEXT: vmovaps %ymm3, 160(%rcx)
2692 ; AVX2-FAST-NEXT: vmovaps %ymm2, 64(%rcx)
2693 ; AVX2-FAST-NEXT: vmovaps %ymm11, 96(%rcx)
2694 ; AVX2-FAST-NEXT: vmovaps %ymm5, (%rcx)
2695 ; AVX2-FAST-NEXT: vmovaps %ymm10, 32(%rcx)
2696 ; AVX2-FAST-NEXT: addq $1032, %rsp # imm = 0x408
2697 ; AVX2-FAST-NEXT: vzeroupper
2698 ; AVX2-FAST-NEXT: retq
2700 ; AVX2-FAST-PERLANE-LABEL: load_i32_stride3_vf64:
2701 ; AVX2-FAST-PERLANE: # %bb.0:
2702 ; AVX2-FAST-PERLANE-NEXT: subq $1032, %rsp # imm = 0x408
2703 ; AVX2-FAST-PERLANE-NEXT: vmovaps 704(%rdi), %ymm3
2704 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2705 ; AVX2-FAST-PERLANE-NEXT: vmovaps 512(%rdi), %ymm4
2706 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2707 ; AVX2-FAST-PERLANE-NEXT: vmovaps 480(%rdi), %ymm6
2708 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2709 ; AVX2-FAST-PERLANE-NEXT: vmovaps 544(%rdi), %ymm5
2710 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2711 ; AVX2-FAST-PERLANE-NEXT: vmovaps 320(%rdi), %ymm7
2712 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2713 ; AVX2-FAST-PERLANE-NEXT: vmovaps 288(%rdi), %ymm10
2714 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2715 ; AVX2-FAST-PERLANE-NEXT: vmovaps 352(%rdi), %ymm9
2716 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2717 ; AVX2-FAST-PERLANE-NEXT: vmovaps 128(%rdi), %ymm13
2718 ; AVX2-FAST-PERLANE-NEXT: vmovaps 96(%rdi), %ymm14
2719 ; AVX2-FAST-PERLANE-NEXT: vmovaps 160(%rdi), %ymm1
2720 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2721 ; AVX2-FAST-PERLANE-NEXT: vbroadcastsd {{.*#+}} ymm0 = [2,5,2,5,2,5,2,5]
2722 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm1, %ymm0, %ymm1
2723 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm14[0],ymm13[1],ymm14[2,3],ymm13[4],ymm14[5,6],ymm13[7]
2724 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2725 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2726 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{.*#+}} ymm8 = [0,3,6,1,4,7,u,u]
2727 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm2, %ymm8, %ymm2
2728 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3,4,5],ymm1[6,7]
2729 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2730 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm9, %ymm0, %ymm1
2731 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm10[0],ymm7[1],ymm10[2,3],ymm7[4],ymm10[5,6],ymm7[7]
2732 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm2, %ymm8, %ymm2
2733 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3,4,5],ymm1[6,7]
2734 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2735 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm5, %ymm0, %ymm1
2736 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm6[0],ymm4[1],ymm6[2,3],ymm4[4],ymm6[5,6],ymm4[7]
2737 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm2, %ymm8, %ymm2
2738 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3,4,5],ymm1[6,7]
2739 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2740 ; AVX2-FAST-PERLANE-NEXT: vmovaps 672(%rdi), %ymm1
2741 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2742 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm3[1],ymm1[2,3],ymm3[4],ymm1[5,6],ymm3[7]
2743 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm1, %ymm8, %ymm1
2744 ; AVX2-FAST-PERLANE-NEXT: vmovaps 736(%rdi), %ymm2
2745 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm2, (%rsp) # 32-byte Spill
2746 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm2, %ymm0, %ymm2
2747 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm2[6,7]
2748 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2749 ; AVX2-FAST-PERLANE-NEXT: vmovaps (%rdi), %ymm1
2750 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2751 ; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%rdi), %ymm3
2752 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm3[1],ymm1[2,3],ymm3[4],ymm1[5,6],ymm3[7]
2753 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm3, %ymm5
2754 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm1, %ymm8, %ymm1
2755 ; AVX2-FAST-PERLANE-NEXT: vmovaps 64(%rdi), %ymm2
2756 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2757 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm2, %ymm0, %ymm4
2758 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm4[6,7]
2759 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2760 ; AVX2-FAST-PERLANE-NEXT: vmovaps 224(%rdi), %ymm7
2761 ; AVX2-FAST-PERLANE-NEXT: vmovaps 192(%rdi), %ymm15
2762 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm15[0],ymm7[1],ymm15[2,3],ymm7[4],ymm15[5,6],ymm7[7]
2763 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm15, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2764 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2765 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm1, %ymm8, %ymm1
2766 ; AVX2-FAST-PERLANE-NEXT: vmovaps 256(%rdi), %ymm2
2767 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2768 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm2, %ymm0, %ymm6
2769 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm6[6,7]
2770 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2771 ; AVX2-FAST-PERLANE-NEXT: vmovaps 416(%rdi), %ymm1
2772 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2773 ; AVX2-FAST-PERLANE-NEXT: vmovaps 384(%rdi), %ymm3
2774 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm3[0],ymm1[1],ymm3[2,3],ymm1[4],ymm3[5,6],ymm1[7]
2775 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm3, %ymm6
2776 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm1, %ymm8, %ymm1
2777 ; AVX2-FAST-PERLANE-NEXT: vmovaps 448(%rdi), %ymm2
2778 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2779 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm2, %ymm0, %ymm12
2780 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm12[6,7]
2781 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2782 ; AVX2-FAST-PERLANE-NEXT: vmovaps 608(%rdi), %ymm9
2783 ; AVX2-FAST-PERLANE-NEXT: vmovaps 576(%rdi), %ymm11
2784 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm11[0],ymm9[1],ymm11[2,3],ymm9[4],ymm11[5,6],ymm9[7]
2785 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2786 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm1, %ymm8, %ymm1
2787 ; AVX2-FAST-PERLANE-NEXT: vmovaps 640(%rdi), %ymm2
2788 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2789 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm2, %ymm0, %ymm0
2790 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
2791 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2792 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm14[0,1],ymm13[2],ymm14[3,4],ymm13[5],ymm14[6,7]
2793 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{.*#+}} ymm1 = [1,4,7,2,5,u,u,u]
2794 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm0, %ymm1, %ymm2
2795 ; AVX2-FAST-PERLANE-NEXT: vbroadcastf128 {{.*#+}} ymm0 = [0,0,3,6,0,0,3,6]
2796 ; AVX2-FAST-PERLANE-NEXT: # ymm0 = mem[0,1,0,1]
2797 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
2798 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm10, %ymm0, %ymm14
2799 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm14[5,6,7]
2800 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2801 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
2802 ; AVX2-FAST-PERLANE-NEXT: vblendps $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm2 # 32-byte Folded Reload
2803 ; AVX2-FAST-PERLANE-NEXT: # ymm2 = mem[0,1],ymm12[2],mem[3,4],ymm12[5],mem[6,7]
2804 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm2, %ymm1, %ymm2
2805 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
2806 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm13, %ymm0, %ymm14
2807 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm14[5,6,7]
2808 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2809 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
2810 ; AVX2-FAST-PERLANE-NEXT: vblendps $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
2811 ; AVX2-FAST-PERLANE-NEXT: # ymm2 = mem[0,1],ymm2[2],mem[3,4],ymm2[5],mem[6,7]
2812 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm2, %ymm1, %ymm2
2813 ; AVX2-FAST-PERLANE-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm14 # 32-byte Folded Reload
2814 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm14[5,6,7]
2815 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2816 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
2817 ; AVX2-FAST-PERLANE-NEXT: vblendps $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
2818 ; AVX2-FAST-PERLANE-NEXT: # ymm2 = mem[0,1],ymm2[2],mem[3,4],ymm2[5],mem[6,7]
2819 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm2, %ymm1, %ymm2
2820 ; AVX2-FAST-PERLANE-NEXT: vpermps (%rsp), %ymm0, %ymm14 # 32-byte Folded Reload
2821 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm14[5,6,7]
2822 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2823 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm5, %ymm4
2824 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
2825 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1],ymm5[2],ymm3[3,4],ymm5[5],ymm3[6,7]
2826 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm2, %ymm1, %ymm2
2827 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
2828 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm5, %ymm0, %ymm14
2829 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm14[5,6,7]
2830 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2831 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm15[0,1],ymm7[2],ymm15[3,4],ymm7[5],ymm15[6,7]
2832 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm2, %ymm1, %ymm2
2833 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
2834 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm15, %ymm0, %ymm14
2835 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm14[5,6,7]
2836 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2837 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm6, %ymm7
2838 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
2839 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm7[0,1],ymm6[2],ymm7[3,4],ymm6[5],ymm7[6,7]
2840 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm2, %ymm1, %ymm2
2841 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
2842 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm8, %ymm0, %ymm14
2843 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm14[5,6,7]
2844 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2845 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm11[0,1],ymm9[2],ymm11[3,4],ymm9[5],ymm11[6,7]
2846 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm2, %ymm1, %ymm1
2847 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
2848 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm9, %ymm0, %ymm0
2849 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5,6,7]
2850 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
2851 ; AVX2-FAST-PERLANE-NEXT: vblendps $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
2852 ; AVX2-FAST-PERLANE-NEXT: # ymm1 = mem[0,1],ymm1[2],mem[3,4],ymm1[5],mem[6,7]
2853 ; AVX2-FAST-PERLANE-NEXT: vmovaps {{.*#+}} ymm14 = [2,5,0,3,6,u,u,u]
2854 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm1, %ymm14, %ymm1
2855 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm2 = ymm10[0,1,0,3,4,5,4,7]
2856 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,1,0,3]
2857 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm10 = ymm1[0,1,2,3,4],ymm2[5,6,7]
2858 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm4[0,1],ymm3[2],ymm4[3,4],ymm3[5],ymm4[6,7]
2859 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm1, %ymm14, %ymm1
2860 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm2 = ymm5[0,1,0,3,4,5,4,7]
2861 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,1,0,3]
2862 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5,6,7]
2863 ; AVX2-FAST-PERLANE-NEXT: vblendps $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm2 # 32-byte Folded Reload
2864 ; AVX2-FAST-PERLANE-NEXT: # ymm2 = ymm12[0,1],mem[2],ymm12[3,4],mem[5],ymm12[6,7]
2865 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm2, %ymm14, %ymm2
2866 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm3 = ymm13[0,1,0,3,4,5,4,7]
2867 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[0,1,0,3]
2868 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm3 = ymm2[0,1,2,3,4],ymm3[5,6,7]
2869 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
2870 ; AVX2-FAST-PERLANE-NEXT: vblendps $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
2871 ; AVX2-FAST-PERLANE-NEXT: # ymm2 = mem[0,1],ymm2[2],mem[3,4],ymm2[5],mem[6,7]
2872 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm2, %ymm14, %ymm2
2873 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm4 = ymm15[0,1,0,3,4,5,4,7]
2874 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,1,0,3]
2875 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm4[5,6,7]
2876 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
2877 ; AVX2-FAST-PERLANE-NEXT: vblendps $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm4 # 32-byte Folded Reload
2878 ; AVX2-FAST-PERLANE-NEXT: # ymm4 = ymm4[0,1],mem[2],ymm4[3,4],mem[5],ymm4[6,7]
2879 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm4, %ymm14, %ymm4
2880 ; AVX2-FAST-PERLANE-NEXT: vpermilps $196, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload
2881 ; AVX2-FAST-PERLANE-NEXT: # ymm5 = mem[0,1,0,3,4,5,4,7]
2882 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,1,0,3]
2883 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3,4],ymm5[5,6,7]
2884 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm5 = ymm6[0,1],ymm7[2],ymm6[3,4],ymm7[5],ymm6[6,7]
2885 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm5, %ymm14, %ymm5
2886 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm6 = ymm8[0,1,0,3,4,5,4,7]
2887 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[0,1,0,3]
2888 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3,4],ymm6[5,6,7]
2889 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
2890 ; AVX2-FAST-PERLANE-NEXT: vblendps $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm6 # 32-byte Folded Reload
2891 ; AVX2-FAST-PERLANE-NEXT: # ymm6 = ymm6[0,1],mem[2],ymm6[3,4],mem[5],ymm6[6,7]
2892 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm6, %ymm14, %ymm6
2893 ; AVX2-FAST-PERLANE-NEXT: vpermilps $196, (%rsp), %ymm7 # 32-byte Folded Reload
2894 ; AVX2-FAST-PERLANE-NEXT: # ymm7 = mem[0,1,0,3,4,5,4,7]
2895 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm7 = ymm7[0,1,0,3]
2896 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3,4],ymm7[5,6,7]
2897 ; AVX2-FAST-PERLANE-NEXT: vblendps $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm7 # 32-byte Folded Reload
2898 ; AVX2-FAST-PERLANE-NEXT: # ymm7 = mem[0,1],ymm11[2],mem[3,4],ymm11[5],mem[6,7]
2899 ; AVX2-FAST-PERLANE-NEXT: vpermps %ymm7, %ymm14, %ymm7
2900 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm8 = ymm9[0,1,0,3,4,5,4,7]
2901 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm8 = ymm8[0,1,0,3]
2902 ; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3,4],ymm8[5,6,7]
2903 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
2904 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm8, 192(%rsi)
2905 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
2906 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm8, 128(%rsi)
2907 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
2908 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm8, 64(%rsi)
2909 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
2910 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm8, (%rsi)
2911 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
2912 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm8, 224(%rsi)
2913 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
2914 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm8, 160(%rsi)
2915 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
2916 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm8, 96(%rsi)
2917 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
2918 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm8, 32(%rsi)
2919 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 192(%rdx)
2920 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2921 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 128(%rdx)
2922 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2923 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 64(%rdx)
2924 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2925 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, (%rdx)
2926 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2927 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 224(%rdx)
2928 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2929 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 160(%rdx)
2930 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2931 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 96(%rdx)
2932 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2933 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 32(%rdx)
2934 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm7, 192(%rcx)
2935 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm6, 224(%rcx)
2936 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm5, 128(%rcx)
2937 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm4, 160(%rcx)
2938 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm2, 64(%rcx)
2939 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm3, 96(%rcx)
2940 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm1, (%rcx)
2941 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm10, 32(%rcx)
2942 ; AVX2-FAST-PERLANE-NEXT: addq $1032, %rsp # imm = 0x408
2943 ; AVX2-FAST-PERLANE-NEXT: vzeroupper
2944 ; AVX2-FAST-PERLANE-NEXT: retq
2946 ; AVX512-LABEL: load_i32_stride3_vf64:
2948 ; AVX512-NEXT: vmovdqa64 704(%rdi), %zmm4
2949 ; AVX512-NEXT: vmovdqa64 640(%rdi), %zmm5
2950 ; AVX512-NEXT: vmovdqa64 576(%rdi), %zmm0
2951 ; AVX512-NEXT: vmovdqa64 512(%rdi), %zmm6
2952 ; AVX512-NEXT: vmovdqa64 448(%rdi), %zmm7
2953 ; AVX512-NEXT: vmovdqa64 384(%rdi), %zmm1
2954 ; AVX512-NEXT: vmovdqa64 320(%rdi), %zmm8
2955 ; AVX512-NEXT: vmovdqa64 256(%rdi), %zmm9
2956 ; AVX512-NEXT: vmovdqa64 (%rdi), %zmm2
2957 ; AVX512-NEXT: vmovdqa64 64(%rdi), %zmm10
2958 ; AVX512-NEXT: vmovdqa64 128(%rdi), %zmm11
2959 ; AVX512-NEXT: vmovdqa64 192(%rdi), %zmm3
2960 ; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm12 = [0,3,6,9,12,15,18,21,24,27,30,u,u,u,u,u]
2961 ; AVX512-NEXT: vmovdqa64 %zmm3, %zmm13
2962 ; AVX512-NEXT: vpermt2d %zmm9, %zmm12, %zmm13
2963 ; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm14 = [0,1,2,3,4,5,6,7,8,9,10,17,20,23,26,29]
2964 ; AVX512-NEXT: vpermt2d %zmm8, %zmm14, %zmm13
2965 ; AVX512-NEXT: vmovdqa64 %zmm1, %zmm15
2966 ; AVX512-NEXT: vpermt2d %zmm7, %zmm12, %zmm15
2967 ; AVX512-NEXT: vpermt2d %zmm6, %zmm14, %zmm15
2968 ; AVX512-NEXT: vmovdqa64 %zmm0, %zmm16
2969 ; AVX512-NEXT: vpermt2d %zmm5, %zmm12, %zmm16
2970 ; AVX512-NEXT: vpermt2d %zmm4, %zmm14, %zmm16
2971 ; AVX512-NEXT: vpermi2d %zmm10, %zmm2, %zmm12
2972 ; AVX512-NEXT: vpermt2d %zmm11, %zmm14, %zmm12
2973 ; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm14 = [17,20,23,26,29,0,3,6,9,12,15,u,u,u,u,u]
2974 ; AVX512-NEXT: vmovdqa64 %zmm7, %zmm17
2975 ; AVX512-NEXT: vpermt2d %zmm1, %zmm14, %zmm17
2976 ; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm18 = [0,1,2,3,4,5,6,7,8,9,10,18,21,24,27,30]
2977 ; AVX512-NEXT: vpermt2d %zmm6, %zmm18, %zmm17
2978 ; AVX512-NEXT: vmovdqa64 %zmm9, %zmm19
2979 ; AVX512-NEXT: vpermt2d %zmm3, %zmm14, %zmm19
2980 ; AVX512-NEXT: vpermt2d %zmm8, %zmm18, %zmm19
2981 ; AVX512-NEXT: vmovdqa64 %zmm5, %zmm20
2982 ; AVX512-NEXT: vpermt2d %zmm0, %zmm14, %zmm20
2983 ; AVX512-NEXT: vpermt2d %zmm4, %zmm18, %zmm20
2984 ; AVX512-NEXT: vpermi2d %zmm2, %zmm10, %zmm14
2985 ; AVX512-NEXT: vpermt2d %zmm11, %zmm18, %zmm14
2986 ; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm18 = [2,5,8,11,14,17,20,23,26,29,u,u,u,u,u,u]
2987 ; AVX512-NEXT: vpermt2d %zmm9, %zmm18, %zmm3
2988 ; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm9 = [0,1,2,3,4,5,6,7,8,9,16,19,22,25,28,31]
2989 ; AVX512-NEXT: vpermt2d %zmm8, %zmm9, %zmm3
2990 ; AVX512-NEXT: vpermt2d %zmm5, %zmm18, %zmm0
2991 ; AVX512-NEXT: vpermt2d %zmm4, %zmm9, %zmm0
2992 ; AVX512-NEXT: vpermt2d %zmm7, %zmm18, %zmm1
2993 ; AVX512-NEXT: vpermt2d %zmm6, %zmm9, %zmm1
2994 ; AVX512-NEXT: vpermt2d %zmm10, %zmm18, %zmm2
2995 ; AVX512-NEXT: vpermt2d %zmm11, %zmm9, %zmm2
2996 ; AVX512-NEXT: vmovdqa64 %zmm16, 192(%rsi)
2997 ; AVX512-NEXT: vmovdqa64 %zmm15, 128(%rsi)
2998 ; AVX512-NEXT: vmovdqa64 %zmm13, 64(%rsi)
2999 ; AVX512-NEXT: vmovdqa64 %zmm12, (%rsi)
3000 ; AVX512-NEXT: vmovdqa64 %zmm20, 192(%rdx)
3001 ; AVX512-NEXT: vmovdqa64 %zmm14, (%rdx)
3002 ; AVX512-NEXT: vmovdqa64 %zmm19, 64(%rdx)
3003 ; AVX512-NEXT: vmovdqa64 %zmm17, 128(%rdx)
3004 ; AVX512-NEXT: vmovdqa64 %zmm1, 128(%rcx)
3005 ; AVX512-NEXT: vmovdqa64 %zmm0, 192(%rcx)
3006 ; AVX512-NEXT: vmovdqa64 %zmm2, (%rcx)
3007 ; AVX512-NEXT: vmovdqa64 %zmm3, 64(%rcx)
3008 ; AVX512-NEXT: vzeroupper
3010 %wide.vec = load <192 x i32>, ptr %in.vec, align 64
3011 %strided.vec0 = shufflevector <192 x i32> %wide.vec, <192 x i32> poison, <64 x i32> <i32 0, i32 3, i32 6, i32 9, i32 12, i32 15, i32 18, i32 21, i32 24, i32 27, i32 30, i32 33, i32 36, i32 39, i32 42, i32 45, i32 48, i32 51, i32 54, i32 57, i32 60, i32 63, i32 66, i32 69, i32 72, i32 75, i32 78, i32 81, i32 84, i32 87, i32 90, i32 93, i32 96, i32 99, i32 102, i32 105, i32 108, i32 111, i32 114, i32 117, i32 120, i32 123, i32 126, i32 129, i32 132, i32 135, i32 138, i32 141, i32 144, i32 147, i32 150, i32 153, i32 156, i32 159, i32 162, i32 165, i32 168, i32 171, i32 174, i32 177, i32 180, i32 183, i32 186, i32 189>
3012 %strided.vec1 = shufflevector <192 x i32> %wide.vec, <192 x i32> poison, <64 x i32> <i32 1, i32 4, i32 7, i32 10, i32 13, i32 16, i32 19, i32 22, i32 25, i32 28, i32 31, i32 34, i32 37, i32 40, i32 43, i32 46, i32 49, i32 52, i32 55, i32 58, i32 61, i32 64, i32 67, i32 70, i32 73, i32 76, i32 79, i32 82, i32 85, i32 88, i32 91, i32 94, i32 97, i32 100, i32 103, i32 106, i32 109, i32 112, i32 115, i32 118, i32 121, i32 124, i32 127, i32 130, i32 133, i32 136, i32 139, i32 142, i32 145, i32 148, i32 151, i32 154, i32 157, i32 160, i32 163, i32 166, i32 169, i32 172, i32 175, i32 178, i32 181, i32 184, i32 187, i32 190>
3013 %strided.vec2 = shufflevector <192 x i32> %wide.vec, <192 x i32> poison, <64 x i32> <i32 2, i32 5, i32 8, i32 11, i32 14, i32 17, i32 20, i32 23, i32 26, i32 29, i32 32, i32 35, i32 38, i32 41, i32 44, i32 47, i32 50, i32 53, i32 56, i32 59, i32 62, i32 65, i32 68, i32 71, i32 74, i32 77, i32 80, i32 83, i32 86, i32 89, i32 92, i32 95, i32 98, i32 101, i32 104, i32 107, i32 110, i32 113, i32 116, i32 119, i32 122, i32 125, i32 128, i32 131, i32 134, i32 137, i32 140, i32 143, i32 146, i32 149, i32 152, i32 155, i32 158, i32 161, i32 164, i32 167, i32 170, i32 173, i32 176, i32 179, i32 182, i32 185, i32 188, i32 191>
3014 store <64 x i32> %strided.vec0, ptr %out.vec0, align 64
3015 store <64 x i32> %strided.vec1, ptr %out.vec1, align 64
3016 store <64 x i32> %strided.vec2, ptr %out.vec2, align 64
3019 ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
3024 ; AVX512BW-FAST: {{.*}}
3025 ; AVX512BW-ONLY: {{.*}}
3026 ; AVX512BW-ONLY-FAST: {{.*}}
3027 ; AVX512BW-ONLY-SLOW: {{.*}}
3028 ; AVX512BW-SLOW: {{.*}}
3029 ; AVX512DQ-FAST: {{.*}}
3030 ; AVX512DQ-ONLY: {{.*}}
3031 ; AVX512DQ-SLOW: {{.*}}
3032 ; AVX512DQBW-FAST: {{.*}}
3033 ; AVX512DQBW-ONLY: {{.*}}
3034 ; AVX512DQBW-SLOW: {{.*}}
3036 ; AVX512F-FAST: {{.*}}
3037 ; AVX512F-ONLY: {{.*}}
3038 ; AVX512F-ONLY-FAST: {{.*}}
3039 ; AVX512F-ONLY-SLOW: {{.*}}
3040 ; AVX512F-SLOW: {{.*}}
3043 ; FALLBACK10: {{.*}}
3044 ; FALLBACK11: {{.*}}
3045 ; FALLBACK12: {{.*}}