1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+sse2 | FileCheck %s --check-prefixes=SSE,FALLBACK0
3 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1,AVX1-ONLY,FALLBACK1
4 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-SLOW,FALLBACK2
5 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST,FALLBACK3
6 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST-PERLANE,FALLBACK4
7 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
8 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
9 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
10 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
11 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
12 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
13 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
14 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
16 ; These patterns are produced by LoopVectorizer for interleaved loads.
18 define void @load_i32_stride5_vf2(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2, ptr %out.vec3, ptr %out.vec4) nounwind {
19 ; SSE-LABEL: load_i32_stride5_vf2:
21 ; SSE-NEXT: movdqa (%rdi), %xmm0
22 ; SSE-NEXT: movdqa 16(%rdi), %xmm1
23 ; SSE-NEXT: movdqa 32(%rdi), %xmm2
24 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,1,1]
25 ; SSE-NEXT: movdqa %xmm0, %xmm4
26 ; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
27 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,3,2,3]
28 ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm0[1,1,1,1]
29 ; SSE-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm3[0],xmm5[1],xmm3[1]
30 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,2,3,3]
31 ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm0[3,3,3,3]
32 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm3[2],xmm0[3],xmm3[3]
33 ; SSE-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm2[0],xmm6[1],xmm2[1]
34 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,1,1]
35 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
36 ; SSE-NEXT: movq %xmm4, (%rsi)
37 ; SSE-NEXT: movq %xmm5, (%rdx)
38 ; SSE-NEXT: movq %xmm0, (%rcx)
39 ; SSE-NEXT: movq %xmm6, (%r8)
40 ; SSE-NEXT: movq %xmm1, (%r9)
43 ; AVX1-ONLY-LABEL: load_i32_stride5_vf2:
45 ; AVX1-ONLY-NEXT: vmovdqa (%rdi), %xmm0
46 ; AVX1-ONLY-NEXT: vmovdqa 16(%rdi), %xmm1
47 ; AVX1-ONLY-NEXT: vmovdqa 32(%rdi), %xmm2
48 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm3 = xmm0[0,1],xmm1[2,3],xmm0[4,5,6,7]
49 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm4 = xmm0[0,1,2,3],xmm1[4,5,6,7]
50 ; AVX1-ONLY-NEXT: vpshufd {{.*#+}} xmm4 = xmm4[1,2,2,3]
51 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm5 = xmm0[0,1,2,3,4,5],xmm1[6,7]
52 ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[12,13,14,15],xmm2[0,1,2,3,4,5,6,7,8,9,10,11]
53 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],mem[2,3],xmm1[4,5,6,7]
54 ; AVX1-ONLY-NEXT: vmovq %xmm3, (%rsi)
55 ; AVX1-ONLY-NEXT: vmovq %xmm4, (%rdx)
56 ; AVX1-ONLY-NEXT: vpextrq $1, %xmm5, (%rcx)
57 ; AVX1-ONLY-NEXT: vmovq %xmm0, (%r8)
58 ; AVX1-ONLY-NEXT: vmovq %xmm1, (%r9)
59 ; AVX1-ONLY-NEXT: retq
61 ; AVX2-ONLY-LABEL: load_i32_stride5_vf2:
63 ; AVX2-ONLY-NEXT: vmovdqa (%rdi), %xmm0
64 ; AVX2-ONLY-NEXT: vmovdqa 16(%rdi), %xmm1
65 ; AVX2-ONLY-NEXT: vmovdqa 32(%rdi), %xmm2
66 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm3 = xmm0[0],xmm1[1],xmm0[2,3]
67 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm4 = xmm0[0,1],xmm1[2,3]
68 ; AVX2-ONLY-NEXT: vpshufd {{.*#+}} xmm4 = xmm4[1,2,2,3]
69 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm1 = xmm0[0,1,2],xmm1[3]
70 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[12,13,14,15],xmm2[0,1,2,3,4,5,6,7,8,9,10,11]
71 ; AVX2-ONLY-NEXT: vpbroadcastd 16(%rdi), %ymm5
72 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm2 = xmm5[0],xmm2[1],xmm5[2,3]
73 ; AVX2-ONLY-NEXT: vmovq %xmm3, (%rsi)
74 ; AVX2-ONLY-NEXT: vmovq %xmm4, (%rdx)
75 ; AVX2-ONLY-NEXT: vpextrq $1, %xmm1, (%rcx)
76 ; AVX2-ONLY-NEXT: vmovq %xmm0, (%r8)
77 ; AVX2-ONLY-NEXT: vmovq %xmm2, (%r9)
78 ; AVX2-ONLY-NEXT: vzeroupper
79 ; AVX2-ONLY-NEXT: retq
81 ; AVX512-SLOW-LABEL: load_i32_stride5_vf2:
82 ; AVX512-SLOW: # %bb.0:
83 ; AVX512-SLOW-NEXT: vmovdqa (%rdi), %xmm0
84 ; AVX512-SLOW-NEXT: vmovdqa 16(%rdi), %xmm1
85 ; AVX512-SLOW-NEXT: vmovdqa 32(%rdi), %xmm2
86 ; AVX512-SLOW-NEXT: vpblendd {{.*#+}} xmm3 = xmm0[0],xmm1[1],xmm0[2,3]
87 ; AVX512-SLOW-NEXT: vpextrd $2, %xmm1, %eax
88 ; AVX512-SLOW-NEXT: vpshufd {{.*#+}} xmm4 = xmm0[1,1,1,1]
89 ; AVX512-SLOW-NEXT: vpinsrd $1, %eax, %xmm4, %xmm4
90 ; AVX512-SLOW-NEXT: vpbroadcastd 8(%rdi), %xmm5
91 ; AVX512-SLOW-NEXT: vpextrd $3, %xmm1, %eax
92 ; AVX512-SLOW-NEXT: vpinsrd $1, %eax, %xmm5, %xmm1
93 ; AVX512-SLOW-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[12,13,14,15],xmm2[0,1,2,3,4,5,6,7,8,9,10,11]
94 ; AVX512-SLOW-NEXT: vpbroadcastd 16(%rdi), %ymm5
95 ; AVX512-SLOW-NEXT: vpblendd {{.*#+}} xmm2 = xmm5[0],xmm2[1],xmm5[2,3]
96 ; AVX512-SLOW-NEXT: vmovq %xmm3, (%rsi)
97 ; AVX512-SLOW-NEXT: vmovq %xmm4, (%rdx)
98 ; AVX512-SLOW-NEXT: vmovq %xmm1, (%rcx)
99 ; AVX512-SLOW-NEXT: vmovq %xmm0, (%r8)
100 ; AVX512-SLOW-NEXT: vmovq %xmm2, (%r9)
101 ; AVX512-SLOW-NEXT: vzeroupper
102 ; AVX512-SLOW-NEXT: retq
104 ; AVX512-FAST-LABEL: load_i32_stride5_vf2:
105 ; AVX512-FAST: # %bb.0:
106 ; AVX512-FAST-NEXT: vmovdqa (%rdi), %xmm0
107 ; AVX512-FAST-NEXT: vmovdqa 16(%rdi), %xmm1
108 ; AVX512-FAST-NEXT: vmovdqa 32(%rdi), %xmm2
109 ; AVX512-FAST-NEXT: vpblendd {{.*#+}} xmm3 = xmm0[0],xmm1[1],xmm0[2,3]
110 ; AVX512-FAST-NEXT: vpbroadcastq {{.*#+}} xmm4 = [1,6,1,6]
111 ; AVX512-FAST-NEXT: vpermi2d %xmm1, %xmm0, %xmm4
112 ; AVX512-FAST-NEXT: vpbroadcastq {{.*#+}} xmm5 = [2,7,2,7]
113 ; AVX512-FAST-NEXT: vpermi2d %xmm1, %xmm0, %xmm5
114 ; AVX512-FAST-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[12,13,14,15],xmm2[0,1,2,3,4,5,6,7,8,9,10,11]
115 ; AVX512-FAST-NEXT: vpbroadcastd 16(%rdi), %ymm1
116 ; AVX512-FAST-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2,3]
117 ; AVX512-FAST-NEXT: vmovq %xmm3, (%rsi)
118 ; AVX512-FAST-NEXT: vmovq %xmm4, (%rdx)
119 ; AVX512-FAST-NEXT: vmovq %xmm5, (%rcx)
120 ; AVX512-FAST-NEXT: vmovq %xmm0, (%r8)
121 ; AVX512-FAST-NEXT: vmovq %xmm1, (%r9)
122 ; AVX512-FAST-NEXT: vzeroupper
123 ; AVX512-FAST-NEXT: retq
124 %wide.vec = load <10 x i32>, ptr %in.vec, align 64
125 %strided.vec0 = shufflevector <10 x i32> %wide.vec, <10 x i32> poison, <2 x i32> <i32 0, i32 5>
126 %strided.vec1 = shufflevector <10 x i32> %wide.vec, <10 x i32> poison, <2 x i32> <i32 1, i32 6>
127 %strided.vec2 = shufflevector <10 x i32> %wide.vec, <10 x i32> poison, <2 x i32> <i32 2, i32 7>
128 %strided.vec3 = shufflevector <10 x i32> %wide.vec, <10 x i32> poison, <2 x i32> <i32 3, i32 8>
129 %strided.vec4 = shufflevector <10 x i32> %wide.vec, <10 x i32> poison, <2 x i32> <i32 4, i32 9>
130 store <2 x i32> %strided.vec0, ptr %out.vec0, align 64
131 store <2 x i32> %strided.vec1, ptr %out.vec1, align 64
132 store <2 x i32> %strided.vec2, ptr %out.vec2, align 64
133 store <2 x i32> %strided.vec3, ptr %out.vec3, align 64
134 store <2 x i32> %strided.vec4, ptr %out.vec4, align 64
138 define void @load_i32_stride5_vf4(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2, ptr %out.vec3, ptr %out.vec4) nounwind {
139 ; SSE-LABEL: load_i32_stride5_vf4:
141 ; SSE-NEXT: movdqa 64(%rdi), %xmm1
142 ; SSE-NEXT: movdqa (%rdi), %xmm5
143 ; SSE-NEXT: movdqa 16(%rdi), %xmm2
144 ; SSE-NEXT: movdqa 32(%rdi), %xmm4
145 ; SSE-NEXT: movdqa 48(%rdi), %xmm0
146 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,1,1,1]
147 ; SSE-NEXT: movdqa %xmm5, %xmm6
148 ; SSE-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm3[0],xmm6[1],xmm3[1]
149 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm4[2,2,2,2]
150 ; SSE-NEXT: punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm0[2],xmm3[3],xmm0[3]
151 ; SSE-NEXT: movsd {{.*#+}} xmm3 = xmm6[0],xmm3[1]
152 ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm2[2,3,2,3]
153 ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm5[1,1,1,1]
154 ; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm6[0],xmm7[1],xmm6[1]
155 ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm1[0,0,1,1]
156 ; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm4[2,3,2,3]
157 ; SSE-NEXT: punpckldq {{.*#+}} xmm8 = xmm8[0],xmm6[0],xmm8[1],xmm6[1]
158 ; SSE-NEXT: movsd {{.*#+}} xmm8 = xmm7[0],xmm8[1]
159 ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm2[2,2,3,3]
160 ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm5[3,3,3,3]
161 ; SSE-NEXT: punpckhdq {{.*#+}} xmm5 = xmm5[2],xmm6[2],xmm5[3],xmm6[3]
162 ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm0[0,0,1,1]
163 ; SSE-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm1[0],xmm6[1],xmm1[1]
164 ; SSE-NEXT: movsd {{.*#+}} xmm6 = xmm5[0],xmm6[1]
165 ; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm4[0],xmm7[1],xmm4[1]
166 ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm1[2,2,3,3]
167 ; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm0[2,2,2,2]
168 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1]
169 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm7[0],xmm0[1]
170 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,1,1]
171 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1]
172 ; SSE-NEXT: punpckhdq {{.*#+}} xmm9 = xmm9[2],xmm1[2],xmm9[3],xmm1[3]
173 ; SSE-NEXT: movsd {{.*#+}} xmm9 = xmm2[0],xmm9[1]
174 ; SSE-NEXT: movapd %xmm3, (%rsi)
175 ; SSE-NEXT: movapd %xmm8, (%rdx)
176 ; SSE-NEXT: movapd %xmm6, (%rcx)
177 ; SSE-NEXT: movapd %xmm0, (%r8)
178 ; SSE-NEXT: movapd %xmm9, (%r9)
181 ; AVX1-ONLY-LABEL: load_i32_stride5_vf4:
182 ; AVX1-ONLY: # %bb.0:
183 ; AVX1-ONLY-NEXT: vmovaps 32(%rdi), %ymm0
184 ; AVX1-ONLY-NEXT: vmovaps (%rdi), %ymm1
185 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
186 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm2, %xmm3
187 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm2 = xmm2[0],xmm3[1],xmm2[2],xmm3[3]
188 ; AVX1-ONLY-NEXT: vmovaps (%rdi), %xmm3
189 ; AVX1-ONLY-NEXT: vmovaps 32(%rdi), %xmm4
190 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm5 = xmm3[0,1],xmm4[2,3]
191 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm5 = xmm5[0,1],mem[2],xmm5[3]
192 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm5 = xmm5[1,2,3,3]
193 ; AVX1-ONLY-NEXT: vmovaps 64(%rdi), %xmm6
194 ; AVX1-ONLY-NEXT: vinsertps {{.*#+}} xmm5 = xmm5[0,1,2],xmm6[0]
195 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm7 = ymm1[0,1,2,3],ymm0[4,5],ymm1[6,7]
196 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm7, %xmm7
197 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm7 = xmm7[0,1],mem[2],xmm7[3]
198 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} xmm7 = xmm7[1,0]
199 ; AVX1-ONLY-NEXT: vinsertps {{.*#+}} xmm7 = xmm7[0,1,2],xmm6[1]
200 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm3 = xmm4[0,1],xmm3[2,3]
201 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm3 = xmm3[3,0],mem[1,3]
202 ; AVX1-ONLY-NEXT: vinsertps {{.*#+}} xmm3 = xmm3[0,1,2],xmm6[2]
203 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
204 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm0, %xmm0
205 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],mem[1],xmm0[2,3]
206 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1,2],xmm6[3]
207 ; AVX1-ONLY-NEXT: vmovaps %xmm2, (%rsi)
208 ; AVX1-ONLY-NEXT: vmovaps %xmm5, (%rdx)
209 ; AVX1-ONLY-NEXT: vmovaps %xmm7, (%rcx)
210 ; AVX1-ONLY-NEXT: vmovaps %xmm3, (%r8)
211 ; AVX1-ONLY-NEXT: vmovaps %xmm0, (%r9)
212 ; AVX1-ONLY-NEXT: vzeroupper
213 ; AVX1-ONLY-NEXT: retq
215 ; AVX2-ONLY-LABEL: load_i32_stride5_vf4:
216 ; AVX2-ONLY: # %bb.0:
217 ; AVX2-ONLY-NEXT: vmovdqa (%rdi), %ymm0
218 ; AVX2-ONLY-NEXT: vmovdqa 32(%rdi), %ymm1
219 ; AVX2-ONLY-NEXT: vmovdqa {{.*#+}} xmm2 = [0,5,2,7]
220 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm3 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
221 ; AVX2-ONLY-NEXT: vpermd %ymm3, %ymm2, %ymm2
222 ; AVX2-ONLY-NEXT: vmovdqa {{.*#+}} xmm3 = [1,6,3,u]
223 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm4 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
224 ; AVX2-ONLY-NEXT: vpermd %ymm4, %ymm3, %ymm3
225 ; AVX2-ONLY-NEXT: vmovdqa 64(%rdi), %xmm4
226 ; AVX2-ONLY-NEXT: vpbroadcastd %xmm4, %xmm5
227 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm3 = xmm3[0,1,2],xmm5[3]
228 ; AVX2-ONLY-NEXT: vmovdqa {{.*#+}} xmm5 = [2,7,4,u]
229 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm6 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
230 ; AVX2-ONLY-NEXT: vpermd %ymm6, %ymm5, %ymm5
231 ; AVX2-ONLY-NEXT: vpbroadcastd 68(%rdi), %xmm6
232 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm5 = xmm5[0,1,2],xmm6[3]
233 ; AVX2-ONLY-NEXT: vpshufd {{.*#+}} xmm6 = xmm4[2,2,2,2]
234 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} ymm7 = ymm0[12,13,14,15],ymm1[0,1,2,3,4,5,6,7,8,9,10,11],ymm0[28,29,30,31],ymm1[16,17,18,19,20,21,22,23,24,25,26,27]
235 ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm7 = ymm7[0,3,2,3]
236 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm6 = xmm7[0,1,2],xmm6[3]
237 ; AVX2-ONLY-NEXT: vmovdqa {{.*#+}} xmm7 = [4,1,6,u]
238 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5],ymm1[6,7]
239 ; AVX2-ONLY-NEXT: vpermd %ymm0, %ymm7, %ymm0
240 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1,2],xmm4[3]
241 ; AVX2-ONLY-NEXT: vmovdqa %xmm2, (%rsi)
242 ; AVX2-ONLY-NEXT: vmovdqa %xmm3, (%rdx)
243 ; AVX2-ONLY-NEXT: vmovdqa %xmm5, (%rcx)
244 ; AVX2-ONLY-NEXT: vmovdqa %xmm6, (%r8)
245 ; AVX2-ONLY-NEXT: vmovdqa %xmm0, (%r9)
246 ; AVX2-ONLY-NEXT: vzeroupper
247 ; AVX2-ONLY-NEXT: retq
249 ; AVX512-LABEL: load_i32_stride5_vf4:
251 ; AVX512-NEXT: vmovdqa64 (%rdi), %zmm0
252 ; AVX512-NEXT: vmovdqa64 64(%rdi), %zmm1
253 ; AVX512-NEXT: vmovdqa {{.*#+}} xmm2 = [0,5,10,15]
254 ; AVX512-NEXT: vmovdqa (%rdi), %ymm3
255 ; AVX512-NEXT: vpermt2d 32(%rdi), %ymm2, %ymm3
256 ; AVX512-NEXT: vmovdqa {{.*#+}} xmm2 = [1,6,11,16]
257 ; AVX512-NEXT: vpermi2d %zmm1, %zmm0, %zmm2
258 ; AVX512-NEXT: vmovdqa {{.*#+}} xmm4 = [2,7,12,17]
259 ; AVX512-NEXT: vpermi2d %zmm1, %zmm0, %zmm4
260 ; AVX512-NEXT: vmovdqa {{.*#+}} xmm5 = [3,8,13,18]
261 ; AVX512-NEXT: vpermi2d %zmm1, %zmm0, %zmm5
262 ; AVX512-NEXT: vmovdqa {{.*#+}} xmm6 = [4,9,14,19]
263 ; AVX512-NEXT: vpermi2d %zmm1, %zmm0, %zmm6
264 ; AVX512-NEXT: vmovdqa %xmm3, (%rsi)
265 ; AVX512-NEXT: vmovdqa %xmm2, (%rdx)
266 ; AVX512-NEXT: vmovdqa %xmm4, (%rcx)
267 ; AVX512-NEXT: vmovdqa %xmm5, (%r8)
268 ; AVX512-NEXT: vmovdqa %xmm6, (%r9)
269 ; AVX512-NEXT: vzeroupper
271 %wide.vec = load <20 x i32>, ptr %in.vec, align 64
272 %strided.vec0 = shufflevector <20 x i32> %wide.vec, <20 x i32> poison, <4 x i32> <i32 0, i32 5, i32 10, i32 15>
273 %strided.vec1 = shufflevector <20 x i32> %wide.vec, <20 x i32> poison, <4 x i32> <i32 1, i32 6, i32 11, i32 16>
274 %strided.vec2 = shufflevector <20 x i32> %wide.vec, <20 x i32> poison, <4 x i32> <i32 2, i32 7, i32 12, i32 17>
275 %strided.vec3 = shufflevector <20 x i32> %wide.vec, <20 x i32> poison, <4 x i32> <i32 3, i32 8, i32 13, i32 18>
276 %strided.vec4 = shufflevector <20 x i32> %wide.vec, <20 x i32> poison, <4 x i32> <i32 4, i32 9, i32 14, i32 19>
277 store <4 x i32> %strided.vec0, ptr %out.vec0, align 64
278 store <4 x i32> %strided.vec1, ptr %out.vec1, align 64
279 store <4 x i32> %strided.vec2, ptr %out.vec2, align 64
280 store <4 x i32> %strided.vec3, ptr %out.vec3, align 64
281 store <4 x i32> %strided.vec4, ptr %out.vec4, align 64
285 define void @load_i32_stride5_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2, ptr %out.vec3, ptr %out.vec4) nounwind {
286 ; SSE-LABEL: load_i32_stride5_vf8:
288 ; SSE-NEXT: movdqa 144(%rdi), %xmm3
289 ; SSE-NEXT: movdqa 64(%rdi), %xmm0
290 ; SSE-NEXT: movdqa 128(%rdi), %xmm4
291 ; SSE-NEXT: movdqa 112(%rdi), %xmm5
292 ; SSE-NEXT: movdqa 80(%rdi), %xmm11
293 ; SSE-NEXT: movdqa 96(%rdi), %xmm1
294 ; SSE-NEXT: movdqa (%rdi), %xmm14
295 ; SSE-NEXT: movdqa 16(%rdi), %xmm7
296 ; SSE-NEXT: movdqa 32(%rdi), %xmm9
297 ; SSE-NEXT: movdqa 48(%rdi), %xmm2
298 ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm7[1,1,1,1]
299 ; SSE-NEXT: movdqa %xmm14, %xmm8
300 ; SSE-NEXT: punpckldq {{.*#+}} xmm8 = xmm8[0],xmm6[0],xmm8[1],xmm6[1]
301 ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm9[2,2,2,2]
302 ; SSE-NEXT: movdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
303 ; SSE-NEXT: punpckhdq {{.*#+}} xmm6 = xmm6[2],xmm2[2],xmm6[3],xmm2[3]
304 ; SSE-NEXT: movsd {{.*#+}} xmm6 = xmm8[0],xmm6[1]
305 ; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm1[1,1,1,1]
306 ; SSE-NEXT: movdqa %xmm11, %xmm10
307 ; SSE-NEXT: punpckldq {{.*#+}} xmm10 = xmm10[0],xmm8[0],xmm10[1],xmm8[1]
308 ; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm5[2,2,2,2]
309 ; SSE-NEXT: movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
310 ; SSE-NEXT: punpckhdq {{.*#+}} xmm8 = xmm8[2],xmm4[2],xmm8[3],xmm4[3]
311 ; SSE-NEXT: movsd {{.*#+}} xmm8 = xmm10[0],xmm8[1]
312 ; SSE-NEXT: pshufd {{.*#+}} xmm10 = xmm7[2,3,2,3]
313 ; SSE-NEXT: pshufd {{.*#+}} xmm12 = xmm14[1,1,1,1]
314 ; SSE-NEXT: punpckldq {{.*#+}} xmm12 = xmm12[0],xmm10[0],xmm12[1],xmm10[1]
315 ; SSE-NEXT: pshufd {{.*#+}} xmm13 = xmm0[0,0,1,1]
316 ; SSE-NEXT: pshufd {{.*#+}} xmm10 = xmm9[2,3,2,3]
317 ; SSE-NEXT: punpckldq {{.*#+}} xmm10 = xmm10[0],xmm13[0],xmm10[1],xmm13[1]
318 ; SSE-NEXT: movsd {{.*#+}} xmm10 = xmm12[0],xmm10[1]
319 ; SSE-NEXT: pshufd {{.*#+}} xmm12 = xmm1[2,3,2,3]
320 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
321 ; SSE-NEXT: pshufd {{.*#+}} xmm13 = xmm11[1,1,1,1]
322 ; SSE-NEXT: punpckldq {{.*#+}} xmm13 = xmm13[0],xmm12[0],xmm13[1],xmm12[1]
323 ; SSE-NEXT: pshufd {{.*#+}} xmm15 = xmm3[0,0,1,1]
324 ; SSE-NEXT: pshufd {{.*#+}} xmm12 = xmm5[2,3,2,3]
325 ; SSE-NEXT: punpckldq {{.*#+}} xmm12 = xmm12[0],xmm15[0],xmm12[1],xmm15[1]
326 ; SSE-NEXT: movsd {{.*#+}} xmm12 = xmm13[0],xmm12[1]
327 ; SSE-NEXT: pshufd {{.*#+}} xmm13 = xmm7[2,2,3,3]
328 ; SSE-NEXT: pshufd {{.*#+}} xmm15 = xmm14[3,3,3,3]
329 ; SSE-NEXT: punpckhdq {{.*#+}} xmm14 = xmm14[2],xmm13[2],xmm14[3],xmm13[3]
330 ; SSE-NEXT: pshufd {{.*#+}} xmm13 = xmm2[0,0,1,1]
331 ; SSE-NEXT: punpckldq {{.*#+}} xmm13 = xmm13[0],xmm0[0],xmm13[1],xmm0[1]
332 ; SSE-NEXT: movdqa %xmm0, %xmm9
333 ; SSE-NEXT: movsd {{.*#+}} xmm13 = xmm14[0],xmm13[1]
334 ; SSE-NEXT: pshufd {{.*#+}} xmm14 = xmm1[2,2,3,3]
335 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm11[3,3,3,3]
336 ; SSE-NEXT: punpckhdq {{.*#+}} xmm11 = xmm11[2],xmm14[2],xmm11[3],xmm14[3]
337 ; SSE-NEXT: pshufd {{.*#+}} xmm14 = xmm4[0,0,1,1]
338 ; SSE-NEXT: punpckldq {{.*#+}} xmm14 = xmm14[0],xmm3[0],xmm14[1],xmm3[1]
339 ; SSE-NEXT: movdqa %xmm3, %xmm5
340 ; SSE-NEXT: movsd {{.*#+}} xmm14 = xmm11[0],xmm14[1]
341 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,2,3,3]
342 ; SSE-NEXT: pshufd {{.*#+}} xmm11 = xmm2[2,2,2,2]
343 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
344 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
345 ; SSE-NEXT: punpckldq {{.*#+}} xmm15 = xmm15[0],xmm0[0],xmm15[1],xmm0[1]
346 ; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm15[0],xmm2[1]
347 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm5[2,2,3,3]
348 ; SSE-NEXT: pshufd {{.*#+}} xmm15 = xmm4[2,2,2,2]
349 ; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
350 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
351 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
352 ; SSE-NEXT: movsd {{.*#+}} xmm4 = xmm1[0],xmm4[1]
353 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
354 ; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm0[0],xmm7[1],xmm0[1]
355 ; SSE-NEXT: punpckhdq {{.*#+}} xmm11 = xmm11[2],xmm9[2],xmm11[3],xmm9[3]
356 ; SSE-NEXT: movsd {{.*#+}} xmm11 = xmm7[0],xmm11[1]
357 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,1,1,1]
358 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
359 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
360 ; SSE-NEXT: punpckhdq {{.*#+}} xmm15 = xmm15[2],xmm5[2],xmm15[3],xmm5[3]
361 ; SSE-NEXT: movsd {{.*#+}} xmm15 = xmm1[0],xmm15[1]
362 ; SSE-NEXT: movapd %xmm8, 16(%rsi)
363 ; SSE-NEXT: movapd %xmm6, (%rsi)
364 ; SSE-NEXT: movapd %xmm12, 16(%rdx)
365 ; SSE-NEXT: movapd %xmm10, (%rdx)
366 ; SSE-NEXT: movapd %xmm14, 16(%rcx)
367 ; SSE-NEXT: movapd %xmm13, (%rcx)
368 ; SSE-NEXT: movapd %xmm4, 16(%r8)
369 ; SSE-NEXT: movapd %xmm2, (%r8)
370 ; SSE-NEXT: movapd %xmm15, 16(%r9)
371 ; SSE-NEXT: movapd %xmm11, (%r9)
374 ; AVX1-ONLY-LABEL: load_i32_stride5_vf8:
375 ; AVX1-ONLY: # %bb.0:
376 ; AVX1-ONLY-NEXT: vmovaps 128(%rdi), %ymm0
377 ; AVX1-ONLY-NEXT: vmovaps 64(%rdi), %ymm1
378 ; AVX1-ONLY-NEXT: vmovaps 96(%rdi), %ymm2
379 ; AVX1-ONLY-NEXT: vmovaps 32(%rdi), %ymm3
380 ; AVX1-ONLY-NEXT: vmovaps (%rdi), %ymm4
381 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm5 = ymm4[0,1],ymm3[2,3],ymm4[4,5],ymm3[6,7]
382 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm5, %xmm6
383 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm5 = xmm5[0],xmm6[1],xmm5[2],xmm6[3]
384 ; AVX1-ONLY-NEXT: vinsertf128 $1, 128(%rdi), %ymm5, %ymm7
385 ; AVX1-ONLY-NEXT: vmovaps 96(%rdi), %xmm6
386 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm6, %ymm0, %ymm8
387 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm9 = ymm8[0,1,2,3,4,5],ymm2[6,7]
388 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm9 = ymm9[0,1,2,3],ymm1[4],ymm9[5,6,7]
389 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm9[4,5,6,7]
390 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2],ymm7[3],ymm5[4,5,6],ymm7[7]
391 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm7 = ymm1[0,1],ymm2[2,3],ymm1[4,5],ymm2[6,7]
392 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm9 = ymm2[2,3,0,1]
393 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm9 = ymm9[2,1],ymm1[1,3],ymm9[6,5],ymm1[5,7]
394 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm7 = ymm9[2,0],ymm7[3,0],ymm9[6,4],ymm7[7,4]
395 ; AVX1-ONLY-NEXT: vmovaps (%rdi), %xmm9
396 ; AVX1-ONLY-NEXT: vmovaps 32(%rdi), %xmm10
397 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm11 = xmm9[0,1],xmm10[2,3]
398 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm11 = xmm11[0,1],mem[2],xmm11[3]
399 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm11 = xmm11[1,2,3,3]
400 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm7 = ymm11[0,1,2],ymm7[3,4,5,6,7]
401 ; AVX1-ONLY-NEXT: vbroadcastss 144(%rdi), %ymm11
402 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3,4,5,6],ymm11[7]
403 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm8 = ymm8[3,0],ymm1[2,0],ymm8[7,4],ymm1[6,4]
404 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm8 = ymm8[2,0],ymm1[2,1],ymm8[6,4],ymm1[6,5]
405 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm11 = ymm4[0,1,2,3],ymm3[4,5],ymm4[6,7]
406 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm11, %xmm11
407 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm11 = xmm11[0,1],mem[2],xmm11[3]
408 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} xmm11 = xmm11[1,0]
409 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm8 = ymm11[0,1,2],ymm8[3,4,5,6,7]
410 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm11 = ymm0[2,3,0,1]
411 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm12 = ymm0[1,0],ymm11[0,0],ymm0[5,4],ymm11[4,4]
412 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm12 = ymm12[0,1,2,0,4,5,6,4]
413 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1,2,3,4,5],ymm12[6,7]
414 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm12 = ymm2[0,0],ymm1[3,0],ymm2[4,4],ymm1[7,4]
415 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm12 = ymm12[2,0],ymm1[2,2],ymm12[6,4],ymm1[6,6]
416 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm9 = xmm10[0,1],xmm9[2,3]
417 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm9 = xmm9[3,0],mem[1,3]
418 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm9 = ymm9[0,1,2],ymm12[3,4,5,6,7]
419 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm10 = ymm0[2,0],ymm11[1,0],ymm0[6,4],ymm11[5,4]
420 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm10 = ymm10[0,1,2,0,4,5,6,4]
421 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm9 = ymm9[0,1,2,3,4,5],ymm10[6,7]
422 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm6, %ymm1, %ymm1
423 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5],ymm1[6,7]
424 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm4[4,5],ymm3[6,7]
425 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm2, %xmm2
426 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm2 = xmm2[0],mem[1],xmm2[2,3]
427 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3,4,5,6,7]
428 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm11[0,1,2,3,4,5,6],ymm0[7]
429 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
430 ; AVX1-ONLY-NEXT: vmovaps %ymm5, (%rsi)
431 ; AVX1-ONLY-NEXT: vmovaps %ymm7, (%rdx)
432 ; AVX1-ONLY-NEXT: vmovaps %ymm8, (%rcx)
433 ; AVX1-ONLY-NEXT: vmovaps %ymm9, (%r8)
434 ; AVX1-ONLY-NEXT: vmovaps %ymm0, (%r9)
435 ; AVX1-ONLY-NEXT: vzeroupper
436 ; AVX1-ONLY-NEXT: retq
438 ; AVX2-ONLY-LABEL: load_i32_stride5_vf8:
439 ; AVX2-ONLY: # %bb.0:
440 ; AVX2-ONLY-NEXT: vmovdqa 128(%rdi), %ymm0
441 ; AVX2-ONLY-NEXT: vmovdqa (%rdi), %ymm1
442 ; AVX2-ONLY-NEXT: vmovdqa 32(%rdi), %ymm2
443 ; AVX2-ONLY-NEXT: vmovdqa 64(%rdi), %ymm3
444 ; AVX2-ONLY-NEXT: vmovdqa 96(%rdi), %ymm4
445 ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm5 = ymm4[0,1,0,3]
446 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm3[4],ymm5[5,6,7]
447 ; AVX2-ONLY-NEXT: vmovdqa {{.*#+}} xmm6 = [0,5,2,7]
448 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm7 = ymm1[0,1],ymm2[2,3],ymm1[4,5],ymm2[6,7]
449 ; AVX2-ONLY-NEXT: vpermd %ymm7, %ymm6, %ymm6
450 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm5 = ymm6[0,1,2,3],ymm5[4,5,6,7]
451 ; AVX2-ONLY-NEXT: vinserti128 $1, 128(%rdi), %ymm6, %ymm6
452 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm5 = ymm5[0,1,2],ymm6[3],ymm5[4,5,6],ymm6[7]
453 ; AVX2-ONLY-NEXT: vmovdqa {{.*#+}} xmm6 = [1,6,3,u]
454 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm7 = ymm1[0,1],ymm2[2,3],ymm1[4,5,6,7]
455 ; AVX2-ONLY-NEXT: vpermd %ymm7, %ymm6, %ymm6
456 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm7 = ymm3[0,1],ymm4[2,3],ymm3[4,5],ymm4[6,7]
457 ; AVX2-ONLY-NEXT: vbroadcasti128 {{.*#+}} ymm8 = [5,2,7,0,5,2,7,0]
458 ; AVX2-ONLY-NEXT: # ymm8 = mem[0,1,0,1]
459 ; AVX2-ONLY-NEXT: vpermd %ymm7, %ymm8, %ymm7
460 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm6 = ymm6[0,1,2],ymm7[3,4,5,6,7]
461 ; AVX2-ONLY-NEXT: vpbroadcastd 144(%rdi), %ymm7
462 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3,4,5,6],ymm7[7]
463 ; AVX2-ONLY-NEXT: vmovdqa {{.*#+}} xmm7 = [2,7,4,u]
464 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm8 = ymm1[0,1,2,3],ymm2[4,5],ymm1[6,7]
465 ; AVX2-ONLY-NEXT: vpermd %ymm8, %ymm7, %ymm7
466 ; AVX2-ONLY-NEXT: vinserti128 $1, 96(%rdi), %ymm0, %ymm8
467 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm8 = ymm3[0,1,2,3,4,5,6],ymm8[7]
468 ; AVX2-ONLY-NEXT: vpshufd {{.*#+}} ymm8 = ymm8[2,3,0,1,6,7,4,5]
469 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm7 = ymm7[0,1,2],ymm8[3,4,5,6,7]
470 ; AVX2-ONLY-NEXT: vpbroadcastq {{.*#+}} ymm8 = [0,5,0,5,0,5,0,5]
471 ; AVX2-ONLY-NEXT: vpermd %ymm0, %ymm8, %ymm8
472 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm7 = ymm7[0,1,2,3,4,5],ymm8[6,7]
473 ; AVX2-ONLY-NEXT: vpbroadcastq {{.*#+}} ymm8 = [1,6,1,6,1,6,1,6]
474 ; AVX2-ONLY-NEXT: vpermd %ymm0, %ymm8, %ymm8
475 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm9 = ymm3[0,1,2,3],ymm4[4,5],ymm3[6,7]
476 ; AVX2-ONLY-NEXT: vpshufd {{.*#+}} ymm9 = ymm9[3,0,2,2,7,4,6,6]
477 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} ymm10 = ymm1[12,13,14,15],ymm2[0,1,2,3,4,5,6,7,8,9,10,11],ymm1[28,29,30,31],ymm2[16,17,18,19,20,21,22,23,24,25,26,27]
478 ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm10 = ymm10[0,3,2,3]
479 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm9 = ymm10[0,1,2],ymm9[3,4,5,6,7]
480 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm8 = ymm9[0,1,2,3,4,5],ymm8[6,7]
481 ; AVX2-ONLY-NEXT: vperm2i128 {{.*#+}} ymm3 = ymm3[0,1],ymm4[0,1]
482 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3,4],ymm4[5],ymm3[6,7]
483 ; AVX2-ONLY-NEXT: vmovdqa {{.*#+}} xmm4 = [4,1,6,u]
484 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5],ymm2[6,7]
485 ; AVX2-ONLY-NEXT: vpermd %ymm1, %ymm4, %ymm1
486 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm3[3,4,5,6,7]
487 ; AVX2-ONLY-NEXT: vpbroadcastq {{.*#+}} ymm2 = [2,7,2,7,2,7,2,7]
488 ; AVX2-ONLY-NEXT: vpermd %ymm0, %ymm2, %ymm0
489 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
490 ; AVX2-ONLY-NEXT: vmovdqa %ymm5, (%rsi)
491 ; AVX2-ONLY-NEXT: vmovdqa %ymm6, (%rdx)
492 ; AVX2-ONLY-NEXT: vmovdqa %ymm7, (%rcx)
493 ; AVX2-ONLY-NEXT: vmovdqa %ymm8, (%r8)
494 ; AVX2-ONLY-NEXT: vmovdqa %ymm0, (%r9)
495 ; AVX2-ONLY-NEXT: vzeroupper
496 ; AVX2-ONLY-NEXT: retq
498 ; AVX512-LABEL: load_i32_stride5_vf8:
500 ; AVX512-NEXT: vmovdqa64 (%rdi), %zmm1
501 ; AVX512-NEXT: vmovdqa64 64(%rdi), %zmm2
502 ; AVX512-NEXT: vmovdqa {{.*#+}} ymm0 = [0,5,10,15,20,25,30,u]
503 ; AVX512-NEXT: vpermi2d %zmm2, %zmm1, %zmm0
504 ; AVX512-NEXT: vinserti128 $1, 128(%rdi), %ymm0, %ymm3
505 ; AVX512-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm3[3],ymm0[4,5,6],ymm3[7]
506 ; AVX512-NEXT: vmovdqa {{.*#+}} ymm3 = [17,22,27,0,5,10,15,u]
507 ; AVX512-NEXT: vpermi2d %zmm1, %zmm2, %zmm3
508 ; AVX512-NEXT: vpbroadcastd 144(%rdi), %ymm4
509 ; AVX512-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5,6],ymm4[7]
510 ; AVX512-NEXT: vmovdqa {{.*#+}} ymm4 = [2,7,12,17,22,27,u,u]
511 ; AVX512-NEXT: vpermi2d %zmm2, %zmm1, %zmm4
512 ; AVX512-NEXT: vmovdqa {{.*#+}} ymm5 = [0,1,2,3,4,5,8,13]
513 ; AVX512-NEXT: vmovdqa 128(%rdi), %ymm6
514 ; AVX512-NEXT: vpermi2d %ymm6, %ymm4, %ymm5
515 ; AVX512-NEXT: vmovdqa {{.*#+}} ymm4 = [3,8,13,18,23,28,u,u]
516 ; AVX512-NEXT: vpermi2d %zmm2, %zmm1, %zmm4
517 ; AVX512-NEXT: vmovdqa {{.*#+}} ymm7 = [0,1,2,3,4,5,9,14]
518 ; AVX512-NEXT: vpermi2d %ymm6, %ymm4, %ymm7
519 ; AVX512-NEXT: vmovdqa {{.*#+}} ymm4 = [4,9,14,19,24,29,u,u]
520 ; AVX512-NEXT: vpermi2d %zmm2, %zmm1, %zmm4
521 ; AVX512-NEXT: vmovdqa {{.*#+}} ymm1 = [0,1,2,3,4,5,10,15]
522 ; AVX512-NEXT: vpermi2d %ymm6, %ymm4, %ymm1
523 ; AVX512-NEXT: vmovdqa %ymm0, (%rsi)
524 ; AVX512-NEXT: vmovdqa %ymm3, (%rdx)
525 ; AVX512-NEXT: vmovdqa %ymm5, (%rcx)
526 ; AVX512-NEXT: vmovdqa %ymm7, (%r8)
527 ; AVX512-NEXT: vmovdqa %ymm1, (%r9)
528 ; AVX512-NEXT: vzeroupper
530 %wide.vec = load <40 x i32>, ptr %in.vec, align 64
531 %strided.vec0 = shufflevector <40 x i32> %wide.vec, <40 x i32> poison, <8 x i32> <i32 0, i32 5, i32 10, i32 15, i32 20, i32 25, i32 30, i32 35>
532 %strided.vec1 = shufflevector <40 x i32> %wide.vec, <40 x i32> poison, <8 x i32> <i32 1, i32 6, i32 11, i32 16, i32 21, i32 26, i32 31, i32 36>
533 %strided.vec2 = shufflevector <40 x i32> %wide.vec, <40 x i32> poison, <8 x i32> <i32 2, i32 7, i32 12, i32 17, i32 22, i32 27, i32 32, i32 37>
534 %strided.vec3 = shufflevector <40 x i32> %wide.vec, <40 x i32> poison, <8 x i32> <i32 3, i32 8, i32 13, i32 18, i32 23, i32 28, i32 33, i32 38>
535 %strided.vec4 = shufflevector <40 x i32> %wide.vec, <40 x i32> poison, <8 x i32> <i32 4, i32 9, i32 14, i32 19, i32 24, i32 29, i32 34, i32 39>
536 store <8 x i32> %strided.vec0, ptr %out.vec0, align 64
537 store <8 x i32> %strided.vec1, ptr %out.vec1, align 64
538 store <8 x i32> %strided.vec2, ptr %out.vec2, align 64
539 store <8 x i32> %strided.vec3, ptr %out.vec3, align 64
540 store <8 x i32> %strided.vec4, ptr %out.vec4, align 64
544 define void @load_i32_stride5_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2, ptr %out.vec3, ptr %out.vec4) nounwind {
545 ; SSE-LABEL: load_i32_stride5_vf16:
547 ; SSE-NEXT: subq $312, %rsp # imm = 0x138
548 ; SSE-NEXT: movdqa 288(%rdi), %xmm2
549 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
550 ; SSE-NEXT: movdqa 272(%rdi), %xmm3
551 ; SSE-NEXT: movdqa 240(%rdi), %xmm14
552 ; SSE-NEXT: movdqa 256(%rdi), %xmm8
553 ; SSE-NEXT: movdqa (%rdi), %xmm11
554 ; SSE-NEXT: movdqa 16(%rdi), %xmm15
555 ; SSE-NEXT: movdqa 32(%rdi), %xmm5
556 ; SSE-NEXT: movdqa 48(%rdi), %xmm4
557 ; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
558 ; SSE-NEXT: movdqa 208(%rdi), %xmm6
559 ; SSE-NEXT: movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
560 ; SSE-NEXT: movdqa 192(%rdi), %xmm7
561 ; SSE-NEXT: movdqa 160(%rdi), %xmm10
562 ; SSE-NEXT: movdqa 176(%rdi), %xmm13
563 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm13[1,1,1,1]
564 ; SSE-NEXT: movdqa %xmm10, %xmm1
565 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
566 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm7[2,2,2,2]
567 ; SSE-NEXT: movdqa %xmm7, %xmm9
568 ; SSE-NEXT: movdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
569 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm6[2],xmm0[3],xmm6[3]
570 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
571 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
572 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm15[1,1,1,1]
573 ; SSE-NEXT: movdqa %xmm11, %xmm1
574 ; SSE-NEXT: movdqa %xmm11, %xmm6
575 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
576 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm5[2,2,2,2]
577 ; SSE-NEXT: movdqa %xmm5, %xmm7
578 ; SSE-NEXT: movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
579 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm4[2],xmm0[3],xmm4[3]
580 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
581 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
582 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm8[1,1,1,1]
583 ; SSE-NEXT: movdqa %xmm14, %xmm1
584 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
585 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm3[2,2,2,2]
586 ; SSE-NEXT: movdqa %xmm3, %xmm5
587 ; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
588 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
589 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
590 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
591 ; SSE-NEXT: movdqa 80(%rdi), %xmm4
592 ; SSE-NEXT: movdqa 96(%rdi), %xmm11
593 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm11[1,1,1,1]
594 ; SSE-NEXT: movdqa %xmm4, %xmm1
595 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
596 ; SSE-NEXT: movdqa 128(%rdi), %xmm2
597 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
598 ; SSE-NEXT: movdqa 112(%rdi), %xmm12
599 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm12[2,2,2,2]
600 ; SSE-NEXT: movdqa %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
601 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
602 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
603 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
604 ; SSE-NEXT: movdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
605 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm10[1,1,1,1]
606 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm13[2,3,2,3]
607 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
608 ; SSE-NEXT: movdqa 224(%rdi), %xmm1
609 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
610 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
611 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm9[2,3,2,3]
612 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
613 ; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm0[0],xmm2[1]
614 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
615 ; SSE-NEXT: movdqa %xmm6, %xmm9
616 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm6[1,1,1,1]
617 ; SSE-NEXT: movdqa %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
618 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm15[2,3,2,3]
619 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
620 ; SSE-NEXT: movdqa 64(%rdi), %xmm1
621 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
622 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
623 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm7[2,3,2,3]
624 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
625 ; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm0[0],xmm2[1]
626 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
627 ; SSE-NEXT: movdqa %xmm14, %xmm3
628 ; SSE-NEXT: movdqa %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
629 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm14[1,1,1,1]
630 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm8[2,3,2,3]
631 ; SSE-NEXT: movdqa %xmm8, (%rsp) # 16-byte Spill
632 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
633 ; SSE-NEXT: movdqa 304(%rdi), %xmm1
634 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
635 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
636 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm5[2,3,2,3]
637 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
638 ; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm0[0],xmm2[1]
639 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
640 ; SSE-NEXT: movdqa %xmm4, %xmm7
641 ; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
642 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm4[1,1,1,1]
643 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm11[2,3,2,3]
644 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
645 ; SSE-NEXT: movdqa 144(%rdi), %xmm1
646 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
647 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
648 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm12[2,3,2,3]
649 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
650 ; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm0[0],xmm2[1]
651 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
652 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm13[2,2,3,3]
653 ; SSE-NEXT: punpckhdq {{.*#+}} xmm10 = xmm10[2],xmm1[2],xmm10[3],xmm1[3]
654 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
655 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm14[0,0,1,1]
656 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
657 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm6[0],xmm1[1],xmm6[1]
658 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm10[0],xmm1[1]
659 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
660 ; SSE-NEXT: movdqa %xmm9, %xmm0
661 ; SSE-NEXT: movdqa %xmm9, %xmm10
662 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm15[2,2,3,3]
663 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
664 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
665 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm4[0,0,1,1]
666 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
667 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
668 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
669 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
670 ; SSE-NEXT: movdqa %xmm3, %xmm0
671 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm8[2,2,3,3]
672 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
673 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
674 ; SSE-NEXT: pshufd {{.*#+}} xmm12 = xmm5[0,0,1,1]
675 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
676 ; SSE-NEXT: punpckldq {{.*#+}} xmm12 = xmm12[0],xmm3[0],xmm12[1],xmm3[1]
677 ; SSE-NEXT: movsd {{.*#+}} xmm12 = xmm0[0],xmm12[1]
678 ; SSE-NEXT: movdqa %xmm7, %xmm0
679 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm11[2,2,3,3]
680 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
681 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
682 ; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm7[0,0,1,1]
683 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
684 ; SSE-NEXT: punpckldq {{.*#+}} xmm9 = xmm9[0],xmm1[0],xmm9[1],xmm1[1]
685 ; SSE-NEXT: movsd {{.*#+}} xmm9 = xmm0[0],xmm9[1]
686 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm6[2,2,3,3]
687 ; SSE-NEXT: movdqa %xmm14, %xmm8
688 ; SSE-NEXT: movdqa %xmm14, %xmm15
689 ; SSE-NEXT: punpckldq {{.*#+}} xmm8 = xmm8[0],xmm0[0],xmm8[1],xmm0[1]
690 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
691 ; SSE-NEXT: # xmm0 = mem[3,3,3,3]
692 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
693 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm14[0],xmm0[1],xmm14[1]
694 ; SSE-NEXT: movsd {{.*#+}} xmm8 = xmm0[0],xmm8[1]
695 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,2,3,3]
696 ; SSE-NEXT: movdqa %xmm4, %xmm6
697 ; SSE-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm0[0],xmm6[1],xmm0[1]
698 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm10[3,3,3,3]
699 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
700 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm10[0],xmm0[1],xmm10[1]
701 ; SSE-NEXT: movsd {{.*#+}} xmm6 = xmm0[0],xmm6[1]
702 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm3[2,2,3,3]
703 ; SSE-NEXT: movdqa %xmm5, %xmm2
704 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
705 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
706 ; SSE-NEXT: # xmm0 = mem[3,3,3,3]
707 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
708 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
709 ; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm0[0],xmm2[1]
710 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,2,3,3]
711 ; SSE-NEXT: movdqa %xmm7, %xmm1
712 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
713 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
714 ; SSE-NEXT: # xmm0 = mem[3,3,3,3]
715 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
716 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
717 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
718 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm14[1,1,1,1]
719 ; SSE-NEXT: punpckldq {{.*#+}} xmm13 = xmm13[0],xmm0[0],xmm13[1],xmm0[1]
720 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm15[2,2,2,2]
721 ; SSE-NEXT: punpckhdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
722 ; SSE-NEXT: # xmm0 = xmm0[2],mem[2],xmm0[3],mem[3]
723 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm13[0],xmm0[1]
724 ; SSE-NEXT: pshufd {{.*#+}} xmm13 = xmm10[1,1,1,1]
725 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
726 ; SSE-NEXT: punpckldq {{.*#+}} xmm15 = xmm15[0],xmm13[0],xmm15[1],xmm13[1]
727 ; SSE-NEXT: pshufd $170, {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Folded Reload
728 ; SSE-NEXT: # xmm13 = mem[2,2,2,2]
729 ; SSE-NEXT: punpckhdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Folded Reload
730 ; SSE-NEXT: # xmm13 = xmm13[2],mem[2],xmm13[3],mem[3]
731 ; SSE-NEXT: movsd {{.*#+}} xmm13 = xmm15[0],xmm13[1]
732 ; SSE-NEXT: pshufd {{.*#+}} xmm15 = xmm4[1,1,1,1]
733 ; SSE-NEXT: movdqa (%rsp), %xmm14 # 16-byte Reload
734 ; SSE-NEXT: punpckldq {{.*#+}} xmm14 = xmm14[0],xmm15[0],xmm14[1],xmm15[1]
735 ; SSE-NEXT: pshufd {{.*#+}} xmm15 = xmm5[2,2,2,2]
736 ; SSE-NEXT: punpckhdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Folded Reload
737 ; SSE-NEXT: # xmm15 = xmm15[2],mem[2],xmm15[3],mem[3]
738 ; SSE-NEXT: movsd {{.*#+}} xmm15 = xmm14[0],xmm15[1]
739 ; SSE-NEXT: pshufd {{.*#+}} xmm14 = xmm3[1,1,1,1]
740 ; SSE-NEXT: punpckldq {{.*#+}} xmm11 = xmm11[0],xmm14[0],xmm11[1],xmm14[1]
741 ; SSE-NEXT: pshufd {{.*#+}} xmm14 = xmm7[2,2,2,2]
742 ; SSE-NEXT: punpckhdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Folded Reload
743 ; SSE-NEXT: # xmm14 = xmm14[2],mem[2],xmm14[3],mem[3]
744 ; SSE-NEXT: movsd {{.*#+}} xmm14 = xmm11[0],xmm14[1]
745 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
746 ; SSE-NEXT: movaps %xmm3, 16(%rsi)
747 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
748 ; SSE-NEXT: movaps %xmm11, 48(%rsi)
749 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
750 ; SSE-NEXT: movaps %xmm11, (%rsi)
751 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
752 ; SSE-NEXT: movaps %xmm11, 32(%rsi)
753 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
754 ; SSE-NEXT: movaps %xmm3, 16(%rdx)
755 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
756 ; SSE-NEXT: movaps %xmm3, 48(%rdx)
757 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
758 ; SSE-NEXT: movaps %xmm3, (%rdx)
759 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
760 ; SSE-NEXT: movaps %xmm3, 32(%rdx)
761 ; SSE-NEXT: movapd %xmm9, 16(%rcx)
762 ; SSE-NEXT: movapd %xmm12, 48(%rcx)
763 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
764 ; SSE-NEXT: movaps %xmm3, (%rcx)
765 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
766 ; SSE-NEXT: movaps %xmm3, 32(%rcx)
767 ; SSE-NEXT: movapd %xmm1, 16(%r8)
768 ; SSE-NEXT: movapd %xmm2, 48(%r8)
769 ; SSE-NEXT: movapd %xmm6, (%r8)
770 ; SSE-NEXT: movapd %xmm8, 32(%r8)
771 ; SSE-NEXT: movapd %xmm14, 16(%r9)
772 ; SSE-NEXT: movapd %xmm15, 48(%r9)
773 ; SSE-NEXT: movapd %xmm13, (%r9)
774 ; SSE-NEXT: movapd %xmm0, 32(%r9)
775 ; SSE-NEXT: addq $312, %rsp # imm = 0x138
778 ; AVX1-ONLY-LABEL: load_i32_stride5_vf16:
779 ; AVX1-ONLY: # %bb.0:
780 ; AVX1-ONLY-NEXT: subq $136, %rsp
781 ; AVX1-ONLY-NEXT: vmovaps 224(%rdi), %ymm2
782 ; AVX1-ONLY-NEXT: vmovaps 256(%rdi), %ymm7
783 ; AVX1-ONLY-NEXT: vmovaps 192(%rdi), %ymm11
784 ; AVX1-ONLY-NEXT: vmovaps 160(%rdi), %ymm9
785 ; AVX1-ONLY-NEXT: vmovaps 64(%rdi), %ymm5
786 ; AVX1-ONLY-NEXT: vmovaps 96(%rdi), %ymm6
787 ; AVX1-ONLY-NEXT: vmovaps 32(%rdi), %ymm1
788 ; AVX1-ONLY-NEXT: vmovaps (%rdi), %ymm12
789 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm12[0,1],ymm1[2,3],ymm12[4,5],ymm1[6,7]
790 ; AVX1-ONLY-NEXT: vmovaps %ymm1, %ymm14
791 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
792 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm0, %xmm1
793 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
794 ; AVX1-ONLY-NEXT: vinsertf128 $1, 128(%rdi), %ymm1, %ymm3
795 ; AVX1-ONLY-NEXT: vmovaps 96(%rdi), %xmm0
796 ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
797 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
798 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm4 = ymm0[0,1,2,3,4,5],ymm6[6,7]
799 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm5[4],ymm4[5,6,7]
800 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm4[4,5,6,7]
801 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2],ymm3[3],ymm1[4,5,6],ymm3[7]
802 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
803 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm9[0,1],ymm11[2,3],ymm9[4,5],ymm11[6,7]
804 ; AVX1-ONLY-NEXT: vmovaps %ymm9, %ymm13
805 ; AVX1-ONLY-NEXT: vmovups %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
806 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm1, %xmm3
807 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0],xmm3[1],xmm1[2],xmm3[3]
808 ; AVX1-ONLY-NEXT: vinsertf128 $1, 288(%rdi), %ymm1, %ymm4
809 ; AVX1-ONLY-NEXT: vmovaps 256(%rdi), %xmm3
810 ; AVX1-ONLY-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
811 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm3
812 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm8 = ymm3[0,1,2,3,4,5],ymm7[6,7]
813 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm2[4],ymm8[5,6,7]
814 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm8[4,5,6,7]
815 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2],ymm4[3],ymm1[4,5,6],ymm4[7]
816 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
817 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm5[0,1],ymm6[2,3],ymm5[4,5],ymm6[6,7]
818 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm6[2,3,0,1]
819 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm4 = ymm4[2,1],ymm5[1,3],ymm4[6,5],ymm5[5,7]
820 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm4[2,0],ymm1[3,0],ymm4[6,4],ymm1[7,4]
821 ; AVX1-ONLY-NEXT: vmovaps (%rdi), %xmm15
822 ; AVX1-ONLY-NEXT: vmovaps 32(%rdi), %xmm10
823 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm4 = xmm15[0,1],xmm10[2,3]
824 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm4 = xmm4[0,1],mem[2],xmm4[3]
825 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm4 = xmm4[1,2,3,3]
826 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm4[0,1,2],ymm1[3,4,5,6,7]
827 ; AVX1-ONLY-NEXT: vbroadcastss 144(%rdi), %ymm4
828 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm4[7]
829 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
830 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm7[2,3,0,1]
831 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[2,1],ymm2[1,3],ymm1[6,5],ymm2[5,7]
832 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm4 = ymm2[0,1],ymm7[2,3],ymm2[4,5],ymm7[6,7]
833 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[2,0],ymm4[3,0],ymm1[6,4],ymm4[7,4]
834 ; AVX1-ONLY-NEXT: vmovaps 160(%rdi), %xmm9
835 ; AVX1-ONLY-NEXT: vmovaps 192(%rdi), %xmm8
836 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm4 = xmm9[0,1],xmm8[2,3]
837 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm4 = xmm4[0,1],mem[2],xmm4[3]
838 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm4 = xmm4[1,2,3,3]
839 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm4[0,1,2],ymm1[3,4,5,6,7]
840 ; AVX1-ONLY-NEXT: vbroadcastss 304(%rdi), %ymm4
841 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm4[7]
842 ; AVX1-ONLY-NEXT: vmovups %ymm1, (%rsp) # 32-byte Spill
843 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[3,0],ymm5[2,0],ymm0[7,4],ymm5[6,4]
844 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0],ymm5[2,1],ymm0[6,4],ymm5[6,5]
845 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm12[0,1,2,3],ymm14[4,5],ymm12[6,7]
846 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm1, %xmm1
847 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2],xmm1[3]
848 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} xmm1 = xmm1[1,0]
849 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
850 ; AVX1-ONLY-NEXT: vmovaps 128(%rdi), %ymm1
851 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm1[2,3,0,1]
852 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm14 = ymm1[1,0],ymm4[0,0],ymm1[5,4],ymm4[4,4]
853 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm14 = ymm14[0,1,2,0,4,5,6,4]
854 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm14[6,7]
855 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
856 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm3[3,0],ymm2[2,0],ymm3[7,4],ymm2[6,4]
857 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0],ymm2[2,1],ymm0[6,4],ymm2[6,5]
858 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm3 = ymm13[0,1,2,3],ymm11[4,5],ymm13[6,7]
859 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm3, %xmm3
860 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm3 = xmm3[0,1],mem[2],xmm3[3]
861 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} xmm3 = xmm3[1,0]
862 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm14 = ymm3[0,1,2],ymm0[3,4,5,6,7]
863 ; AVX1-ONLY-NEXT: vmovaps 288(%rdi), %ymm0
864 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm3 = ymm0[2,3,0,1]
865 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm13 = ymm0[1,0],ymm3[0,0],ymm0[5,4],ymm3[4,4]
866 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm13 = ymm13[0,1,2,0,4,5,6,4]
867 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm14 = ymm14[0,1,2,3,4,5],ymm13[6,7]
868 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm10 = xmm10[0,1],xmm15[2,3]
869 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm13 = ymm6[0,0],ymm5[3,0],ymm6[4,4],ymm5[7,4]
870 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm13 = ymm13[2,0],ymm5[2,2],ymm13[6,4],ymm5[6,6]
871 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm10 = xmm10[3,0],mem[1,3]
872 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0,1,2],ymm13[3,4,5,6,7]
873 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm13 = ymm1[2,0],ymm4[1,0],ymm1[6,4],ymm4[5,4]
874 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm13 = ymm13[0,1,2,0,4,5,6,4]
875 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0,1,2,3,4,5],ymm13[6,7]
876 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm8 = xmm8[0,1],xmm9[2,3]
877 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm9 = ymm7[0,0],ymm2[3,0],ymm7[4,4],ymm2[7,4]
878 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm9 = ymm9[2,0],ymm2[2,2],ymm9[6,4],ymm2[6,6]
879 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm8 = xmm8[3,0],mem[1,3]
880 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1,2],ymm9[3,4,5,6,7]
881 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm9 = ymm0[2,0],ymm3[1,0],ymm0[6,4],ymm3[5,4]
882 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm9 = ymm9[0,1,2,0,4,5,6,4]
883 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1,2,3,4,5],ymm9[6,7]
884 ; AVX1-ONLY-NEXT: vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm5 # 16-byte Folded Reload
885 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3,4],ymm6[5],ymm5[6,7]
886 ; AVX1-ONLY-NEXT: vblendps $207, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm6 # 32-byte Folded Reload
887 ; AVX1-ONLY-NEXT: # ymm6 = mem[0,1,2,3],ymm12[4,5],mem[6,7]
888 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm6, %xmm6
889 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm6 = xmm6[0],mem[1],xmm6[2,3]
890 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm5 = ymm6[0,1,2],ymm5[3,4,5,6,7]
891 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm4[0,1,2,3,4,5,6],ymm1[7]
892 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm5[0,1,2,3,4,5],ymm1[6,7]
893 ; AVX1-ONLY-NEXT: vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 16-byte Folded Reload
894 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm7[5],ymm2[6,7]
895 ; AVX1-ONLY-NEXT: vblendps $48, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm4 # 32-byte Folded Reload
896 ; AVX1-ONLY-NEXT: # ymm4 = ymm11[0,1,2,3],mem[4,5],ymm11[6,7]
897 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm4, %xmm4
898 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm4 = xmm4[0],mem[1],xmm4[2,3]
899 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm4[0,1,2],ymm2[3,4,5,6,7]
900 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm3[0,1,2,3,4,5,6],ymm0[7]
901 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3,4,5],ymm0[6,7]
902 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
903 ; AVX1-ONLY-NEXT: vmovaps %ymm2, 32(%rsi)
904 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
905 ; AVX1-ONLY-NEXT: vmovaps %ymm2, (%rsi)
906 ; AVX1-ONLY-NEXT: vmovups (%rsp), %ymm2 # 32-byte Reload
907 ; AVX1-ONLY-NEXT: vmovaps %ymm2, 32(%rdx)
908 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
909 ; AVX1-ONLY-NEXT: vmovaps %ymm2, (%rdx)
910 ; AVX1-ONLY-NEXT: vmovaps %ymm14, 32(%rcx)
911 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
912 ; AVX1-ONLY-NEXT: vmovaps %ymm2, (%rcx)
913 ; AVX1-ONLY-NEXT: vmovaps %ymm8, 32(%r8)
914 ; AVX1-ONLY-NEXT: vmovaps %ymm10, (%r8)
915 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 32(%r9)
916 ; AVX1-ONLY-NEXT: vmovaps %ymm1, (%r9)
917 ; AVX1-ONLY-NEXT: addq $136, %rsp
918 ; AVX1-ONLY-NEXT: vzeroupper
919 ; AVX1-ONLY-NEXT: retq
921 ; AVX2-ONLY-LABEL: load_i32_stride5_vf16:
922 ; AVX2-ONLY: # %bb.0:
923 ; AVX2-ONLY-NEXT: pushq %rax
924 ; AVX2-ONLY-NEXT: vmovdqa 128(%rdi), %ymm0
925 ; AVX2-ONLY-NEXT: vmovdqa (%rdi), %ymm4
926 ; AVX2-ONLY-NEXT: vmovdqa 32(%rdi), %ymm6
927 ; AVX2-ONLY-NEXT: vmovdqa 64(%rdi), %ymm8
928 ; AVX2-ONLY-NEXT: vmovdqa 96(%rdi), %ymm9
929 ; AVX2-ONLY-NEXT: vmovdqa 224(%rdi), %ymm1
930 ; AVX2-ONLY-NEXT: vmovdqa 256(%rdi), %ymm2
931 ; AVX2-ONLY-NEXT: vmovdqa 192(%rdi), %ymm3
932 ; AVX2-ONLY-NEXT: vmovdqa 160(%rdi), %ymm5
933 ; AVX2-ONLY-NEXT: vmovdqa {{.*#+}} xmm7 = [0,5,2,7]
934 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm10 = ymm5[0,1],ymm3[2,3],ymm5[4,5],ymm3[6,7]
935 ; AVX2-ONLY-NEXT: vpermd %ymm10, %ymm7, %ymm10
936 ; AVX2-ONLY-NEXT: vinserti128 $1, 288(%rdi), %ymm10, %ymm11
937 ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm12 = ymm2[0,1,0,3]
938 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm12 = ymm12[0,1,2,3],ymm1[4],ymm12[5,6,7]
939 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm10 = ymm10[0,1,2,3],ymm12[4,5,6,7]
940 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm10 = ymm10[0,1,2],ymm11[3],ymm10[4,5,6],ymm11[7]
941 ; AVX2-ONLY-NEXT: vmovdqu %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
942 ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm10 = ymm9[0,1,0,3]
943 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm10 = ymm10[0,1,2,3],ymm8[4],ymm10[5,6,7]
944 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm11 = ymm4[0,1],ymm6[2,3],ymm4[4,5],ymm6[6,7]
945 ; AVX2-ONLY-NEXT: vpermd %ymm11, %ymm7, %ymm7
946 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm10 = ymm7[0,1,2,3],ymm10[4,5,6,7]
947 ; AVX2-ONLY-NEXT: vinserti128 $1, 128(%rdi), %ymm7, %ymm7
948 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm7 = ymm10[0,1,2],ymm7[3],ymm10[4,5,6],ymm7[7]
949 ; AVX2-ONLY-NEXT: vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
950 ; AVX2-ONLY-NEXT: vmovdqa {{.*#+}} xmm7 = [1,6,3,u]
951 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm11 = ymm4[0,1],ymm6[2,3],ymm4[4,5,6,7]
952 ; AVX2-ONLY-NEXT: vpermd %ymm11, %ymm7, %ymm11
953 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm12 = ymm8[0,1],ymm9[2,3],ymm8[4,5],ymm9[6,7]
954 ; AVX2-ONLY-NEXT: vbroadcasti128 {{.*#+}} ymm13 = [5,2,7,0,5,2,7,0]
955 ; AVX2-ONLY-NEXT: # ymm13 = mem[0,1,0,1]
956 ; AVX2-ONLY-NEXT: vpermd %ymm12, %ymm13, %ymm12
957 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm11 = ymm11[0,1,2],ymm12[3,4,5,6,7]
958 ; AVX2-ONLY-NEXT: vpbroadcastd 144(%rdi), %ymm12
959 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm10 = ymm11[0,1,2,3,4,5,6],ymm12[7]
960 ; AVX2-ONLY-NEXT: vmovdqu %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
961 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm12 = ymm5[0,1],ymm3[2,3],ymm5[4,5,6,7]
962 ; AVX2-ONLY-NEXT: vpermd %ymm12, %ymm7, %ymm7
963 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm12 = ymm1[0,1],ymm2[2,3],ymm1[4,5],ymm2[6,7]
964 ; AVX2-ONLY-NEXT: vpermd %ymm12, %ymm13, %ymm12
965 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm7 = ymm7[0,1,2],ymm12[3,4,5,6,7]
966 ; AVX2-ONLY-NEXT: vpbroadcastd 304(%rdi), %ymm12
967 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm7 = ymm7[0,1,2,3,4,5,6],ymm12[7]
968 ; AVX2-ONLY-NEXT: vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
969 ; AVX2-ONLY-NEXT: vmovdqa {{.*#+}} xmm7 = [2,7,4,u]
970 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm13 = ymm4[0,1,2,3],ymm6[4,5],ymm4[6,7]
971 ; AVX2-ONLY-NEXT: vpermd %ymm13, %ymm7, %ymm13
972 ; AVX2-ONLY-NEXT: vinserti128 $1, 96(%rdi), %ymm0, %ymm14
973 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm14 = ymm8[0,1,2,3,4,5,6],ymm14[7]
974 ; AVX2-ONLY-NEXT: vpshufd {{.*#+}} ymm14 = ymm14[2,3,0,1,6,7,4,5]
975 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm13 = ymm13[0,1,2],ymm14[3,4,5,6,7]
976 ; AVX2-ONLY-NEXT: vpbroadcastq {{.*#+}} ymm14 = [0,5,0,5,0,5,0,5]
977 ; AVX2-ONLY-NEXT: vpermd %ymm0, %ymm14, %ymm15
978 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm13 = ymm13[0,1,2,3,4,5],ymm15[6,7]
979 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm15 = ymm5[0,1,2,3],ymm3[4,5],ymm5[6,7]
980 ; AVX2-ONLY-NEXT: vpermd %ymm15, %ymm7, %ymm7
981 ; AVX2-ONLY-NEXT: vinserti128 $1, 256(%rdi), %ymm0, %ymm15
982 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm15 = ymm1[0,1,2,3,4,5,6],ymm15[7]
983 ; AVX2-ONLY-NEXT: vpshufd {{.*#+}} ymm15 = ymm15[2,3,0,1,6,7,4,5]
984 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm7 = ymm7[0,1,2],ymm15[3,4,5,6,7]
985 ; AVX2-ONLY-NEXT: vmovdqa 288(%rdi), %ymm15
986 ; AVX2-ONLY-NEXT: vpermd %ymm15, %ymm14, %ymm14
987 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm14 = ymm7[0,1,2,3,4,5],ymm14[6,7]
988 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm7 = ymm8[0,1,2,3],ymm9[4,5],ymm8[6,7]
989 ; AVX2-ONLY-NEXT: vpshufd {{.*#+}} ymm7 = ymm7[3,0,2,2,7,4,6,6]
990 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} ymm10 = ymm4[12,13,14,15],ymm6[0,1,2,3,4,5,6,7,8,9,10,11],ymm4[28,29,30,31],ymm6[16,17,18,19,20,21,22,23,24,25,26,27]
991 ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm10 = ymm10[0,3,2,3]
992 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm7 = ymm10[0,1,2],ymm7[3,4,5,6,7]
993 ; AVX2-ONLY-NEXT: vpbroadcastq {{.*#+}} ymm10 = [1,6,1,6,1,6,1,6]
994 ; AVX2-ONLY-NEXT: vpermd %ymm0, %ymm10, %ymm11
995 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm7 = ymm7[0,1,2,3,4,5],ymm11[6,7]
996 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm11 = ymm1[0,1,2,3],ymm2[4,5],ymm1[6,7]
997 ; AVX2-ONLY-NEXT: vpshufd {{.*#+}} ymm11 = ymm11[3,0,2,2,7,4,6,6]
998 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} ymm12 = ymm5[12,13,14,15],ymm3[0,1,2,3,4,5,6,7,8,9,10,11],ymm5[28,29,30,31],ymm3[16,17,18,19,20,21,22,23,24,25,26,27]
999 ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm12 = ymm12[0,3,2,3]
1000 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm11 = ymm12[0,1,2],ymm11[3,4,5,6,7]
1001 ; AVX2-ONLY-NEXT: vpermd %ymm15, %ymm10, %ymm10
1002 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm10 = ymm11[0,1,2,3,4,5],ymm10[6,7]
1003 ; AVX2-ONLY-NEXT: vperm2i128 {{.*#+}} ymm8 = ymm8[0,1],ymm9[0,1]
1004 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3,4],ymm9[5],ymm8[6,7]
1005 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm4 = ymm6[0,1,2,3],ymm4[4,5],ymm6[6,7]
1006 ; AVX2-ONLY-NEXT: vmovdqa {{.*#+}} xmm6 = [4,1,6,u]
1007 ; AVX2-ONLY-NEXT: vpermd %ymm4, %ymm6, %ymm4
1008 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1,2],ymm8[3,4,5,6,7]
1009 ; AVX2-ONLY-NEXT: vpbroadcastq {{.*#+}} ymm8 = [2,7,2,7,2,7,2,7]
1010 ; AVX2-ONLY-NEXT: vpermd %ymm0, %ymm8, %ymm0
1011 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm4[0,1,2,3,4,5],ymm0[6,7]
1012 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm5[4,5],ymm3[6,7]
1013 ; AVX2-ONLY-NEXT: vpermd %ymm3, %ymm6, %ymm3
1014 ; AVX2-ONLY-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm1[0,1],ymm2[0,1]
1015 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5],ymm1[6,7]
1016 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm1 = ymm3[0,1,2],ymm1[3,4,5,6,7]
1017 ; AVX2-ONLY-NEXT: vpermd %ymm15, %ymm8, %ymm2
1018 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm2[6,7]
1019 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
1020 ; AVX2-ONLY-NEXT: vmovaps %ymm2, 32(%rsi)
1021 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
1022 ; AVX2-ONLY-NEXT: vmovaps %ymm2, (%rsi)
1023 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
1024 ; AVX2-ONLY-NEXT: vmovaps %ymm2, 32(%rdx)
1025 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
1026 ; AVX2-ONLY-NEXT: vmovaps %ymm2, (%rdx)
1027 ; AVX2-ONLY-NEXT: vmovdqa %ymm14, 32(%rcx)
1028 ; AVX2-ONLY-NEXT: vmovdqa %ymm13, (%rcx)
1029 ; AVX2-ONLY-NEXT: vmovdqa %ymm10, 32(%r8)
1030 ; AVX2-ONLY-NEXT: vmovdqa %ymm7, (%r8)
1031 ; AVX2-ONLY-NEXT: vmovdqa %ymm1, 32(%r9)
1032 ; AVX2-ONLY-NEXT: vmovdqa %ymm0, (%r9)
1033 ; AVX2-ONLY-NEXT: popq %rax
1034 ; AVX2-ONLY-NEXT: vzeroupper
1035 ; AVX2-ONLY-NEXT: retq
1037 ; AVX512F-LABEL: load_i32_stride5_vf16:
1039 ; AVX512F-NEXT: vmovdqa64 256(%rdi), %zmm0
1040 ; AVX512F-NEXT: vmovdqa64 (%rdi), %zmm1
1041 ; AVX512F-NEXT: vmovdqa64 64(%rdi), %zmm2
1042 ; AVX512F-NEXT: vmovdqa64 128(%rdi), %zmm3
1043 ; AVX512F-NEXT: vmovdqa64 192(%rdi), %zmm4
1044 ; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm5 = [8,13,18,23,28,0,0,3,8,13,18,23,28,0,0,3]
1045 ; AVX512F-NEXT: # zmm5 = mem[0,1,2,3,0,1,2,3]
1046 ; AVX512F-NEXT: vpermi2d %zmm4, %zmm3, %zmm5
1047 ; AVX512F-NEXT: vmovdqa {{.*#+}} ymm6 = [0,5,10,15,20,25,30,u]
1048 ; AVX512F-NEXT: vpermi2d %zmm2, %zmm1, %zmm6
1049 ; AVX512F-NEXT: movw $8064, %ax # imm = 0x1F80
1050 ; AVX512F-NEXT: kmovw %eax, %k1
1051 ; AVX512F-NEXT: vmovdqa32 %zmm5, %zmm6 {%k1}
1052 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm5 = [0,1,2,3,4,5,6,7,8,9,10,11,12,17,22,27]
1053 ; AVX512F-NEXT: vpermi2d %zmm0, %zmm6, %zmm5
1054 ; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm6 = [9,14,19,24,29,0,0,4,9,14,19,24,29,0,0,4]
1055 ; AVX512F-NEXT: # zmm6 = mem[0,1,2,3,0,1,2,3]
1056 ; AVX512F-NEXT: vpermi2d %zmm4, %zmm3, %zmm6
1057 ; AVX512F-NEXT: vmovdqa {{.*#+}} ymm7 = [17,22,27,0,5,10,15,u]
1058 ; AVX512F-NEXT: vpermi2d %zmm1, %zmm2, %zmm7
1059 ; AVX512F-NEXT: vmovdqa32 %zmm6, %zmm7 {%k1}
1060 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm6 = [0,1,2,3,4,5,6,7,8,9,10,11,12,18,23,28]
1061 ; AVX512F-NEXT: vpermi2d %zmm0, %zmm7, %zmm6
1062 ; AVX512F-NEXT: vmovdqa {{.*#+}} ymm7 = [2,7,12,17,22,27,u,u]
1063 ; AVX512F-NEXT: vpermi2d %zmm2, %zmm1, %zmm7
1064 ; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm8 = [10,15,20,25,30,0,0,5,10,15,20,25,30,0,0,5]
1065 ; AVX512F-NEXT: # zmm8 = mem[0,1,2,3,0,1,2,3]
1066 ; AVX512F-NEXT: vpermi2d %zmm4, %zmm3, %zmm8
1067 ; AVX512F-NEXT: movb $7, %al
1068 ; AVX512F-NEXT: kmovw %eax, %k1
1069 ; AVX512F-NEXT: vmovdqa64 %zmm7, %zmm8 {%k1}
1070 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm7 = [0,1,2,3,4,5,6,7,8,9,10,11,12,19,24,29]
1071 ; AVX512F-NEXT: vpermi2d %zmm0, %zmm8, %zmm7
1072 ; AVX512F-NEXT: vmovdqa {{.*#+}} ymm8 = [3,8,13,18,23,28,u,u]
1073 ; AVX512F-NEXT: vpermi2d %zmm2, %zmm1, %zmm8
1074 ; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm9 = [27,0,5,10,15,0,17,22,27,0,5,10,15,0,17,22]
1075 ; AVX512F-NEXT: # zmm9 = mem[0,1,2,3,0,1,2,3]
1076 ; AVX512F-NEXT: vpermi2d %zmm3, %zmm4, %zmm9
1077 ; AVX512F-NEXT: vmovdqa64 %zmm8, %zmm9 {%k1}
1078 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm8 = [0,1,2,3,4,5,6,7,8,9,10,11,12,20,25,30]
1079 ; AVX512F-NEXT: vpermi2d %zmm0, %zmm9, %zmm8
1080 ; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm9 = [12,17,22,27,0,0,2,7,12,17,22,27,0,0,2,7]
1081 ; AVX512F-NEXT: # zmm9 = mem[0,1,2,3,0,1,2,3]
1082 ; AVX512F-NEXT: vpermi2d %zmm4, %zmm3, %zmm9
1083 ; AVX512F-NEXT: vmovdqa {{.*#+}} ymm3 = [4,9,14,19,24,29,u,u]
1084 ; AVX512F-NEXT: vpermi2d %zmm2, %zmm1, %zmm3
1085 ; AVX512F-NEXT: movb $56, %al
1086 ; AVX512F-NEXT: kmovw %eax, %k1
1087 ; AVX512F-NEXT: vmovdqa64 %zmm9, %zmm3 {%k1}
1088 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm1 = [0,1,2,3,4,5,6,7,8,9,10,11,16,21,26,31]
1089 ; AVX512F-NEXT: vpermi2d %zmm0, %zmm3, %zmm1
1090 ; AVX512F-NEXT: vmovdqa64 %zmm5, (%rsi)
1091 ; AVX512F-NEXT: vmovdqa64 %zmm6, (%rdx)
1092 ; AVX512F-NEXT: vmovdqa64 %zmm7, (%rcx)
1093 ; AVX512F-NEXT: vmovdqa64 %zmm8, (%r8)
1094 ; AVX512F-NEXT: vmovdqa64 %zmm1, (%r9)
1095 ; AVX512F-NEXT: vzeroupper
1096 ; AVX512F-NEXT: retq
1098 ; AVX512BW-LABEL: load_i32_stride5_vf16:
1099 ; AVX512BW: # %bb.0:
1100 ; AVX512BW-NEXT: vmovdqa64 256(%rdi), %zmm0
1101 ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm1
1102 ; AVX512BW-NEXT: vmovdqa64 64(%rdi), %zmm2
1103 ; AVX512BW-NEXT: vmovdqa64 128(%rdi), %zmm3
1104 ; AVX512BW-NEXT: vmovdqa64 192(%rdi), %zmm4
1105 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm5 = [8,13,18,23,28,0,0,3,8,13,18,23,28,0,0,3]
1106 ; AVX512BW-NEXT: # zmm5 = mem[0,1,2,3,0,1,2,3]
1107 ; AVX512BW-NEXT: vpermi2d %zmm4, %zmm3, %zmm5
1108 ; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm6 = [0,5,10,15,20,25,30,u]
1109 ; AVX512BW-NEXT: vpermi2d %zmm2, %zmm1, %zmm6
1110 ; AVX512BW-NEXT: movw $8064, %ax # imm = 0x1F80
1111 ; AVX512BW-NEXT: kmovd %eax, %k1
1112 ; AVX512BW-NEXT: vmovdqa32 %zmm5, %zmm6 {%k1}
1113 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm5 = [0,1,2,3,4,5,6,7,8,9,10,11,12,17,22,27]
1114 ; AVX512BW-NEXT: vpermi2d %zmm0, %zmm6, %zmm5
1115 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm6 = [9,14,19,24,29,0,0,4,9,14,19,24,29,0,0,4]
1116 ; AVX512BW-NEXT: # zmm6 = mem[0,1,2,3,0,1,2,3]
1117 ; AVX512BW-NEXT: vpermi2d %zmm4, %zmm3, %zmm6
1118 ; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm7 = [17,22,27,0,5,10,15,u]
1119 ; AVX512BW-NEXT: vpermi2d %zmm1, %zmm2, %zmm7
1120 ; AVX512BW-NEXT: vmovdqa32 %zmm6, %zmm7 {%k1}
1121 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm6 = [0,1,2,3,4,5,6,7,8,9,10,11,12,18,23,28]
1122 ; AVX512BW-NEXT: vpermi2d %zmm0, %zmm7, %zmm6
1123 ; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm7 = [2,7,12,17,22,27,u,u]
1124 ; AVX512BW-NEXT: vpermi2d %zmm2, %zmm1, %zmm7
1125 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm8 = [10,15,20,25,30,0,0,5,10,15,20,25,30,0,0,5]
1126 ; AVX512BW-NEXT: # zmm8 = mem[0,1,2,3,0,1,2,3]
1127 ; AVX512BW-NEXT: vpermi2d %zmm4, %zmm3, %zmm8
1128 ; AVX512BW-NEXT: movb $7, %al
1129 ; AVX512BW-NEXT: kmovd %eax, %k1
1130 ; AVX512BW-NEXT: vmovdqa64 %zmm7, %zmm8 {%k1}
1131 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm7 = [0,1,2,3,4,5,6,7,8,9,10,11,12,19,24,29]
1132 ; AVX512BW-NEXT: vpermi2d %zmm0, %zmm8, %zmm7
1133 ; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm8 = [3,8,13,18,23,28,u,u]
1134 ; AVX512BW-NEXT: vpermi2d %zmm2, %zmm1, %zmm8
1135 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm9 = [27,0,5,10,15,0,17,22,27,0,5,10,15,0,17,22]
1136 ; AVX512BW-NEXT: # zmm9 = mem[0,1,2,3,0,1,2,3]
1137 ; AVX512BW-NEXT: vpermi2d %zmm3, %zmm4, %zmm9
1138 ; AVX512BW-NEXT: vmovdqa64 %zmm8, %zmm9 {%k1}
1139 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm8 = [0,1,2,3,4,5,6,7,8,9,10,11,12,20,25,30]
1140 ; AVX512BW-NEXT: vpermi2d %zmm0, %zmm9, %zmm8
1141 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm9 = [12,17,22,27,0,0,2,7,12,17,22,27,0,0,2,7]
1142 ; AVX512BW-NEXT: # zmm9 = mem[0,1,2,3,0,1,2,3]
1143 ; AVX512BW-NEXT: vpermi2d %zmm4, %zmm3, %zmm9
1144 ; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm3 = [4,9,14,19,24,29,u,u]
1145 ; AVX512BW-NEXT: vpermi2d %zmm2, %zmm1, %zmm3
1146 ; AVX512BW-NEXT: movb $56, %al
1147 ; AVX512BW-NEXT: kmovd %eax, %k1
1148 ; AVX512BW-NEXT: vmovdqa64 %zmm9, %zmm3 {%k1}
1149 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm1 = [0,1,2,3,4,5,6,7,8,9,10,11,16,21,26,31]
1150 ; AVX512BW-NEXT: vpermi2d %zmm0, %zmm3, %zmm1
1151 ; AVX512BW-NEXT: vmovdqa64 %zmm5, (%rsi)
1152 ; AVX512BW-NEXT: vmovdqa64 %zmm6, (%rdx)
1153 ; AVX512BW-NEXT: vmovdqa64 %zmm7, (%rcx)
1154 ; AVX512BW-NEXT: vmovdqa64 %zmm8, (%r8)
1155 ; AVX512BW-NEXT: vmovdqa64 %zmm1, (%r9)
1156 ; AVX512BW-NEXT: vzeroupper
1157 ; AVX512BW-NEXT: retq
1158 %wide.vec = load <80 x i32>, ptr %in.vec, align 64
1159 %strided.vec0 = shufflevector <80 x i32> %wide.vec, <80 x i32> poison, <16 x i32> <i32 0, i32 5, i32 10, i32 15, i32 20, i32 25, i32 30, i32 35, i32 40, i32 45, i32 50, i32 55, i32 60, i32 65, i32 70, i32 75>
1160 %strided.vec1 = shufflevector <80 x i32> %wide.vec, <80 x i32> poison, <16 x i32> <i32 1, i32 6, i32 11, i32 16, i32 21, i32 26, i32 31, i32 36, i32 41, i32 46, i32 51, i32 56, i32 61, i32 66, i32 71, i32 76>
1161 %strided.vec2 = shufflevector <80 x i32> %wide.vec, <80 x i32> poison, <16 x i32> <i32 2, i32 7, i32 12, i32 17, i32 22, i32 27, i32 32, i32 37, i32 42, i32 47, i32 52, i32 57, i32 62, i32 67, i32 72, i32 77>
1162 %strided.vec3 = shufflevector <80 x i32> %wide.vec, <80 x i32> poison, <16 x i32> <i32 3, i32 8, i32 13, i32 18, i32 23, i32 28, i32 33, i32 38, i32 43, i32 48, i32 53, i32 58, i32 63, i32 68, i32 73, i32 78>
1163 %strided.vec4 = shufflevector <80 x i32> %wide.vec, <80 x i32> poison, <16 x i32> <i32 4, i32 9, i32 14, i32 19, i32 24, i32 29, i32 34, i32 39, i32 44, i32 49, i32 54, i32 59, i32 64, i32 69, i32 74, i32 79>
1164 store <16 x i32> %strided.vec0, ptr %out.vec0, align 64
1165 store <16 x i32> %strided.vec1, ptr %out.vec1, align 64
1166 store <16 x i32> %strided.vec2, ptr %out.vec2, align 64
1167 store <16 x i32> %strided.vec3, ptr %out.vec3, align 64
1168 store <16 x i32> %strided.vec4, ptr %out.vec4, align 64
1172 define void @load_i32_stride5_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2, ptr %out.vec3, ptr %out.vec4) nounwind {
1173 ; SSE-LABEL: load_i32_stride5_vf32:
1175 ; SSE-NEXT: subq $904, %rsp # imm = 0x388
1176 ; SSE-NEXT: movdqa (%rdi), %xmm11
1177 ; SSE-NEXT: movdqa 16(%rdi), %xmm5
1178 ; SSE-NEXT: movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1179 ; SSE-NEXT: movdqa 32(%rdi), %xmm9
1180 ; SSE-NEXT: movdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1181 ; SSE-NEXT: movdqa 48(%rdi), %xmm8
1182 ; SSE-NEXT: movdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1183 ; SSE-NEXT: movdqa 448(%rdi), %xmm3
1184 ; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1185 ; SSE-NEXT: movdqa 432(%rdi), %xmm4
1186 ; SSE-NEXT: movdqa 400(%rdi), %xmm10
1187 ; SSE-NEXT: movdqa 416(%rdi), %xmm14
1188 ; SSE-NEXT: movdqa 128(%rdi), %xmm6
1189 ; SSE-NEXT: movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1190 ; SSE-NEXT: movdqa 112(%rdi), %xmm7
1191 ; SSE-NEXT: movdqa 80(%rdi), %xmm12
1192 ; SSE-NEXT: movdqa 96(%rdi), %xmm1
1193 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1]
1194 ; SSE-NEXT: movdqa %xmm1, %xmm15
1195 ; SSE-NEXT: movdqa %xmm12, %xmm1
1196 ; SSE-NEXT: movdqa %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1197 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
1198 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm7[2,2,2,2]
1199 ; SSE-NEXT: movdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1200 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm6[2],xmm0[3],xmm6[3]
1201 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
1202 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1203 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm14[1,1,1,1]
1204 ; SSE-NEXT: movdqa %xmm10, %xmm1
1205 ; SSE-NEXT: movdqa %xmm10, %xmm6
1206 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
1207 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm4[2,2,2,2]
1208 ; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1209 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm3[2],xmm0[3],xmm3[3]
1210 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
1211 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1212 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm5[1,1,1,1]
1213 ; SSE-NEXT: movdqa %xmm11, %xmm1
1214 ; SSE-NEXT: movdqa %xmm11, %xmm5
1215 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
1216 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm9[2,2,2,2]
1217 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm8[2],xmm0[3],xmm8[3]
1218 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
1219 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1220 ; SSE-NEXT: movdqa 320(%rdi), %xmm2
1221 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1222 ; SSE-NEXT: movdqa 336(%rdi), %xmm1
1223 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1]
1224 ; SSE-NEXT: movdqa %xmm1, %xmm9
1225 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1226 ; SSE-NEXT: movdqa %xmm2, %xmm1
1227 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
1228 ; SSE-NEXT: movdqa 368(%rdi), %xmm2
1229 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1230 ; SSE-NEXT: movdqa 352(%rdi), %xmm11
1231 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm11[2,2,2,2]
1232 ; SSE-NEXT: movdqa %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1233 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
1234 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
1235 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1236 ; SSE-NEXT: movdqa 240(%rdi), %xmm1
1237 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1238 ; SSE-NEXT: movdqa 256(%rdi), %xmm0
1239 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1240 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
1241 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
1242 ; SSE-NEXT: movdqa 288(%rdi), %xmm2
1243 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1244 ; SSE-NEXT: movdqa 272(%rdi), %xmm0
1245 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1246 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,2,2,2]
1247 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
1248 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
1249 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1250 ; SSE-NEXT: movdqa 560(%rdi), %xmm2
1251 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1252 ; SSE-NEXT: movdqa 576(%rdi), %xmm1
1253 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1]
1254 ; SSE-NEXT: movdqa %xmm1, %xmm8
1255 ; SSE-NEXT: movdqa %xmm2, %xmm1
1256 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
1257 ; SSE-NEXT: movdqa 608(%rdi), %xmm2
1258 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1259 ; SSE-NEXT: movdqa 592(%rdi), %xmm13
1260 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm13[2,2,2,2]
1261 ; SSE-NEXT: movdqa %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1262 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
1263 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
1264 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1265 ; SSE-NEXT: movdqa 160(%rdi), %xmm1
1266 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1267 ; SSE-NEXT: movdqa 176(%rdi), %xmm0
1268 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1269 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
1270 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
1271 ; SSE-NEXT: movdqa 208(%rdi), %xmm2
1272 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1273 ; SSE-NEXT: movdqa 192(%rdi), %xmm0
1274 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1275 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,2,2,2]
1276 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
1277 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
1278 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1279 ; SSE-NEXT: movdqa 480(%rdi), %xmm1
1280 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1281 ; SSE-NEXT: movdqa 496(%rdi), %xmm0
1282 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1283 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
1284 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
1285 ; SSE-NEXT: movdqa 528(%rdi), %xmm3
1286 ; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1287 ; SSE-NEXT: movdqa 512(%rdi), %xmm0
1288 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1289 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,2,2,2]
1290 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm3[2],xmm0[3],xmm3[3]
1291 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
1292 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1293 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm12[1,1,1,1]
1294 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm15[2,3,2,3]
1295 ; SSE-NEXT: movdqa %xmm15, %xmm10
1296 ; SSE-NEXT: movdqa %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1297 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
1298 ; SSE-NEXT: movdqa 144(%rdi), %xmm0
1299 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1300 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,1,1]
1301 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm7[2,3,2,3]
1302 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
1303 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm2[0],xmm0[1]
1304 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1305 ; SSE-NEXT: movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1306 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm6[1,1,1,1]
1307 ; SSE-NEXT: movdqa %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1308 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm14[2,3,2,3]
1309 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
1310 ; SSE-NEXT: movdqa 464(%rdi), %xmm1
1311 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1312 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
1313 ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm4[2,3,2,3]
1314 ; SSE-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm1[0],xmm6[1],xmm1[1]
1315 ; SSE-NEXT: movsd {{.*#+}} xmm6 = xmm0[0],xmm6[1]
1316 ; SSE-NEXT: movapd %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1317 ; SSE-NEXT: movdqa %xmm5, %xmm12
1318 ; SSE-NEXT: movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1319 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm5[1,1,1,1]
1320 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
1321 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm5[2,3,2,3]
1322 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
1323 ; SSE-NEXT: movdqa 64(%rdi), %xmm1
1324 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1325 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
1326 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
1327 ; SSE-NEXT: # xmm4 = mem[2,3,2,3]
1328 ; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
1329 ; SSE-NEXT: movsd {{.*#+}} xmm4 = xmm0[0],xmm4[1]
1330 ; SSE-NEXT: movapd %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1331 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
1332 ; SSE-NEXT: # xmm0 = mem[1,1,1,1]
1333 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm9[2,3,2,3]
1334 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
1335 ; SSE-NEXT: movdqa 384(%rdi), %xmm1
1336 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1337 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
1338 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm11[2,3,2,3]
1339 ; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
1340 ; SSE-NEXT: movsd {{.*#+}} xmm4 = xmm0[0],xmm4[1]
1341 ; SSE-NEXT: movapd %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1342 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
1343 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm6[1,1,1,1]
1344 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
1345 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm4[2,3,2,3]
1346 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
1347 ; SSE-NEXT: movdqa 304(%rdi), %xmm2
1348 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,0,1,1]
1349 ; SSE-NEXT: movdqa %xmm2, %xmm14
1350 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1351 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload
1352 ; SSE-NEXT: # xmm7 = mem[2,3,2,3]
1353 ; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm1[0],xmm7[1],xmm1[1]
1354 ; SSE-NEXT: movsd {{.*#+}} xmm7 = xmm0[0],xmm7[1]
1355 ; SSE-NEXT: movapd %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1356 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
1357 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm15[1,1,1,1]
1358 ; SSE-NEXT: movdqa %xmm8, %xmm11
1359 ; SSE-NEXT: movdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1360 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm8[2,3,2,3]
1361 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
1362 ; SSE-NEXT: movdqa 624(%rdi), %xmm1
1363 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1364 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
1365 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm13[2,3,2,3]
1366 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
1367 ; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm0[0],xmm2[1]
1368 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1369 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
1370 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,1,1,1]
1371 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
1372 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm9[2,3,2,3]
1373 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
1374 ; SSE-NEXT: movdqa 224(%rdi), %xmm1
1375 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1376 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
1377 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
1378 ; SSE-NEXT: # xmm2 = mem[2,3,2,3]
1379 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
1380 ; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm0[0],xmm2[1]
1381 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1382 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
1383 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm7[1,1,1,1]
1384 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
1385 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm8[2,3,2,3]
1386 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
1387 ; SSE-NEXT: movdqa 544(%rdi), %xmm1
1388 ; SSE-NEXT: movdqa %xmm1, (%rsp) # 16-byte Spill
1389 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
1390 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
1391 ; SSE-NEXT: # xmm2 = mem[2,3,2,3]
1392 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
1393 ; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm0[0],xmm2[1]
1394 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1395 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1396 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm10[2,2,3,3]
1397 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
1398 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
1399 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm13[0,0,1,1]
1400 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
1401 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm10[0],xmm1[1],xmm10[1]
1402 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
1403 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1404 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm5[2,2,3,3]
1405 ; SSE-NEXT: punpckhdq {{.*#+}} xmm12 = xmm12[2],xmm1[2],xmm12[3],xmm1[3]
1406 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
1407 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm5[0,0,1,1]
1408 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
1409 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
1410 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm12[0],xmm1[1]
1411 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1412 ; SSE-NEXT: movdqa %xmm6, %xmm0
1413 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm4[2,2,3,3]
1414 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
1415 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
1416 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm6[0,0,1,1]
1417 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm14[0],xmm1[1],xmm14[1]
1418 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
1419 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1420 ; SSE-NEXT: movdqa %xmm3, %xmm0
1421 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm9[2,2,3,3]
1422 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
1423 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
1424 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm12[0,0,1,1]
1425 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
1426 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
1427 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
1428 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1429 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1430 ; SSE-NEXT: pshufd $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
1431 ; SSE-NEXT: # xmm1 = mem[2,2,3,3]
1432 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
1433 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
1434 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm14[0,0,1,1]
1435 ; SSE-NEXT: punpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
1436 ; SSE-NEXT: # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1]
1437 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
1438 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1439 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1440 ; SSE-NEXT: pshufd $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
1441 ; SSE-NEXT: # xmm1 = mem[2,2,3,3]
1442 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
1443 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
1444 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm9[0,0,1,1]
1445 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
1446 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1]
1447 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
1448 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1449 ; SSE-NEXT: movdqa %xmm15, %xmm0
1450 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm11[2,2,3,3]
1451 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
1452 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
1453 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm15[0,0,1,1]
1454 ; SSE-NEXT: punpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
1455 ; SSE-NEXT: # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1]
1456 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
1457 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1458 ; SSE-NEXT: movdqa %xmm7, %xmm0
1459 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm8[2,2,3,3]
1460 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
1461 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
1462 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm11[0,0,1,1]
1463 ; SSE-NEXT: movdqa (%rsp), %xmm7 # 16-byte Reload
1464 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm7[0],xmm1[1],xmm7[1]
1465 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
1466 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1467 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,2,3,3]
1468 ; SSE-NEXT: movdqa %xmm5, %xmm1
1469 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
1470 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
1471 ; SSE-NEXT: # xmm0 = mem[3,3,3,3]
1472 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
1473 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm8[0],xmm0[1],xmm8[1]
1474 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
1475 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1476 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm10[2,2,3,3]
1477 ; SSE-NEXT: punpckldq {{.*#+}} xmm13 = xmm13[0],xmm0[0],xmm13[1],xmm0[1]
1478 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
1479 ; SSE-NEXT: # xmm0 = mem[3,3,3,3]
1480 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
1481 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1]
1482 ; SSE-NEXT: movsd {{.*#+}} xmm13 = xmm0[0],xmm13[1]
1483 ; SSE-NEXT: movapd %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1484 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm3[2,2,3,3]
1485 ; SSE-NEXT: movdqa %xmm12, %xmm1
1486 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
1487 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
1488 ; SSE-NEXT: # xmm0 = mem[3,3,3,3]
1489 ; SSE-NEXT: punpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
1490 ; SSE-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
1491 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
1492 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1493 ; SSE-NEXT: pshufd $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
1494 ; SSE-NEXT: # xmm0 = mem[2,2,3,3]
1495 ; SSE-NEXT: movdqa %xmm6, %xmm13
1496 ; SSE-NEXT: punpckldq {{.*#+}} xmm13 = xmm13[0],xmm0[0],xmm13[1],xmm0[1]
1497 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
1498 ; SSE-NEXT: # xmm0 = mem[3,3,3,3]
1499 ; SSE-NEXT: punpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
1500 ; SSE-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
1501 ; SSE-NEXT: movsd {{.*#+}} xmm13 = xmm0[0],xmm13[1]
1502 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm4[2,2,3,3]
1503 ; SSE-NEXT: movdqa %xmm9, %xmm12
1504 ; SSE-NEXT: punpckldq {{.*#+}} xmm12 = xmm12[0],xmm0[0],xmm12[1],xmm0[1]
1505 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
1506 ; SSE-NEXT: # xmm0 = mem[3,3,3,3]
1507 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
1508 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
1509 ; SSE-NEXT: movsd {{.*#+}} xmm12 = xmm0[0],xmm12[1]
1510 ; SSE-NEXT: pshufd $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
1511 ; SSE-NEXT: # xmm0 = mem[2,2,3,3]
1512 ; SSE-NEXT: movdqa %xmm14, %xmm10
1513 ; SSE-NEXT: punpckldq {{.*#+}} xmm10 = xmm10[0],xmm0[0],xmm10[1],xmm0[1]
1514 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
1515 ; SSE-NEXT: # xmm0 = mem[3,3,3,3]
1516 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
1517 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
1518 ; SSE-NEXT: movsd {{.*#+}} xmm10 = xmm0[0],xmm10[1]
1519 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm7[2,2,3,3]
1520 ; SSE-NEXT: movdqa %xmm11, %xmm9
1521 ; SSE-NEXT: punpckldq {{.*#+}} xmm9 = xmm9[0],xmm0[0],xmm9[1],xmm0[1]
1522 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
1523 ; SSE-NEXT: # xmm0 = mem[3,3,3,3]
1524 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
1525 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm14[0],xmm0[1],xmm14[1]
1526 ; SSE-NEXT: movsd {{.*#+}} xmm9 = xmm0[0],xmm9[1]
1527 ; SSE-NEXT: pshufd $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
1528 ; SSE-NEXT: # xmm0 = mem[2,2,3,3]
1529 ; SSE-NEXT: movdqa %xmm15, %xmm7
1530 ; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm0[0],xmm7[1],xmm0[1]
1531 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
1532 ; SSE-NEXT: # xmm0 = mem[3,3,3,3]
1533 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
1534 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm11[0],xmm0[1],xmm11[1]
1535 ; SSE-NEXT: movsd {{.*#+}} xmm7 = xmm0[0],xmm7[1]
1536 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm8[1,1,1,1]
1537 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
1538 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
1539 ; SSE-NEXT: pshufd $170, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Folded Reload
1540 ; SSE-NEXT: # xmm8 = mem[2,2,2,2]
1541 ; SSE-NEXT: punpckhdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Folded Reload
1542 ; SSE-NEXT: # xmm8 = xmm8[2],mem[2],xmm8[3],mem[3]
1543 ; SSE-NEXT: movsd {{.*#+}} xmm8 = xmm2[0],xmm8[1]
1544 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm5[1,1,1,1]
1545 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
1546 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
1547 ; SSE-NEXT: pshufd $170, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
1548 ; SSE-NEXT: # xmm6 = mem[2,2,2,2]
1549 ; SSE-NEXT: punpckhdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
1550 ; SSE-NEXT: # xmm6 = xmm6[2],mem[2],xmm6[3],mem[3]
1551 ; SSE-NEXT: movsd {{.*#+}} xmm6 = xmm2[0],xmm6[1]
1552 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
1553 ; SSE-NEXT: # xmm0 = mem[1,1,1,1]
1554 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
1555 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
1556 ; SSE-NEXT: pshufd $170, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Folded Reload
1557 ; SSE-NEXT: # xmm5 = mem[2,2,2,2]
1558 ; SSE-NEXT: punpckhdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Folded Reload
1559 ; SSE-NEXT: # xmm5 = xmm5[2],mem[2],xmm5[3],mem[3]
1560 ; SSE-NEXT: movsd {{.*#+}} xmm5 = xmm2[0],xmm5[1]
1561 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
1562 ; SSE-NEXT: # xmm0 = mem[1,1,1,1]
1563 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
1564 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
1565 ; SSE-NEXT: pshufd $170, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
1566 ; SSE-NEXT: # xmm4 = mem[2,2,2,2]
1567 ; SSE-NEXT: punpckhdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
1568 ; SSE-NEXT: # xmm4 = xmm4[2],mem[2],xmm4[3],mem[3]
1569 ; SSE-NEXT: movsd {{.*#+}} xmm4 = xmm2[0],xmm4[1]
1570 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,1,1,1]
1571 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
1572 ; SSE-NEXT: punpckldq {{.*#+}} xmm15 = xmm15[0],xmm0[0],xmm15[1],xmm0[1]
1573 ; SSE-NEXT: pshufd $170, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
1574 ; SSE-NEXT: # xmm3 = mem[2,2,2,2]
1575 ; SSE-NEXT: punpckhdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
1576 ; SSE-NEXT: # xmm3 = xmm3[2],mem[2],xmm3[3],mem[3]
1577 ; SSE-NEXT: movsd {{.*#+}} xmm3 = xmm15[0],xmm3[1]
1578 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1]
1579 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
1580 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
1581 ; SSE-NEXT: pshufd $170, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
1582 ; SSE-NEXT: # xmm2 = mem[2,2,2,2]
1583 ; SSE-NEXT: punpckhdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
1584 ; SSE-NEXT: # xmm2 = xmm2[2],mem[2],xmm2[3],mem[3]
1585 ; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm1[0],xmm2[1]
1586 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm14[1,1,1,1]
1587 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
1588 ; SSE-NEXT: punpckldq {{.*#+}} xmm15 = xmm15[0],xmm0[0],xmm15[1],xmm0[1]
1589 ; SSE-NEXT: pshufd $170, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
1590 ; SSE-NEXT: # xmm1 = mem[2,2,2,2]
1591 ; SSE-NEXT: punpckhdq (%rsp), %xmm1 # 16-byte Folded Reload
1592 ; SSE-NEXT: # xmm1 = xmm1[2],mem[2],xmm1[3],mem[3]
1593 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm15[0],xmm1[1]
1594 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm11[1,1,1,1]
1595 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
1596 ; SSE-NEXT: punpckldq {{.*#+}} xmm15 = xmm15[0],xmm0[0],xmm15[1],xmm0[1]
1597 ; SSE-NEXT: pshufd $170, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
1598 ; SSE-NEXT: # xmm0 = mem[2,2,2,2]
1599 ; SSE-NEXT: punpckhdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
1600 ; SSE-NEXT: # xmm0 = xmm0[2],mem[2],xmm0[3],mem[3]
1601 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm15[0],xmm0[1]
1602 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
1603 ; SSE-NEXT: movaps %xmm15, 96(%rsi)
1604 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
1605 ; SSE-NEXT: movaps %xmm15, 32(%rsi)
1606 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
1607 ; SSE-NEXT: movaps %xmm15, 112(%rsi)
1608 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
1609 ; SSE-NEXT: movaps %xmm15, 48(%rsi)
1610 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
1611 ; SSE-NEXT: movaps %xmm15, 64(%rsi)
1612 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
1613 ; SSE-NEXT: movaps %xmm15, (%rsi)
1614 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
1615 ; SSE-NEXT: movaps %xmm15, 80(%rsi)
1616 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
1617 ; SSE-NEXT: movaps %xmm15, 16(%rsi)
1618 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
1619 ; SSE-NEXT: movaps %xmm11, 96(%rdx)
1620 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
1621 ; SSE-NEXT: movaps %xmm11, 32(%rdx)
1622 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
1623 ; SSE-NEXT: movaps %xmm11, 112(%rdx)
1624 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
1625 ; SSE-NEXT: movaps %xmm11, 48(%rdx)
1626 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
1627 ; SSE-NEXT: movaps %xmm11, 64(%rdx)
1628 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
1629 ; SSE-NEXT: movaps %xmm11, (%rdx)
1630 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
1631 ; SSE-NEXT: movaps %xmm15, 80(%rdx)
1632 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
1633 ; SSE-NEXT: movaps %xmm15, 16(%rdx)
1634 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
1635 ; SSE-NEXT: movaps %xmm11, 96(%rcx)
1636 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
1637 ; SSE-NEXT: movaps %xmm11, 112(%rcx)
1638 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
1639 ; SSE-NEXT: movaps %xmm11, 64(%rcx)
1640 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
1641 ; SSE-NEXT: movaps %xmm11, 80(%rcx)
1642 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
1643 ; SSE-NEXT: movaps %xmm11, 32(%rcx)
1644 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
1645 ; SSE-NEXT: movaps %xmm11, 48(%rcx)
1646 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
1647 ; SSE-NEXT: movaps %xmm11, (%rcx)
1648 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
1649 ; SSE-NEXT: movaps %xmm11, 16(%rcx)
1650 ; SSE-NEXT: movapd %xmm7, 112(%r8)
1651 ; SSE-NEXT: movapd %xmm9, 96(%r8)
1652 ; SSE-NEXT: movapd %xmm10, 80(%r8)
1653 ; SSE-NEXT: movapd %xmm12, 64(%r8)
1654 ; SSE-NEXT: movapd %xmm13, 48(%r8)
1655 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
1656 ; SSE-NEXT: movaps %xmm7, 32(%r8)
1657 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
1658 ; SSE-NEXT: movaps %xmm7, 16(%r8)
1659 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
1660 ; SSE-NEXT: movaps %xmm7, (%r8)
1661 ; SSE-NEXT: movapd %xmm0, 112(%r9)
1662 ; SSE-NEXT: movapd %xmm1, 96(%r9)
1663 ; SSE-NEXT: movapd %xmm2, 80(%r9)
1664 ; SSE-NEXT: movapd %xmm3, 64(%r9)
1665 ; SSE-NEXT: movapd %xmm4, 48(%r9)
1666 ; SSE-NEXT: movapd %xmm5, 32(%r9)
1667 ; SSE-NEXT: movapd %xmm6, 16(%r9)
1668 ; SSE-NEXT: movapd %xmm8, (%r9)
1669 ; SSE-NEXT: addq $904, %rsp # imm = 0x388
1672 ; AVX1-ONLY-LABEL: load_i32_stride5_vf32:
1673 ; AVX1-ONLY: # %bb.0:
1674 ; AVX1-ONLY-NEXT: subq $952, %rsp # imm = 0x3B8
1675 ; AVX1-ONLY-NEXT: vmovaps 32(%rdi), %ymm3
1676 ; AVX1-ONLY-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1677 ; AVX1-ONLY-NEXT: vmovaps 544(%rdi), %ymm14
1678 ; AVX1-ONLY-NEXT: vmovaps 576(%rdi), %ymm5
1679 ; AVX1-ONLY-NEXT: vmovaps 512(%rdi), %ymm4
1680 ; AVX1-ONLY-NEXT: vmovaps 480(%rdi), %ymm6
1681 ; AVX1-ONLY-NEXT: vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1682 ; AVX1-ONLY-NEXT: vmovaps 224(%rdi), %ymm8
1683 ; AVX1-ONLY-NEXT: vmovaps 256(%rdi), %ymm9
1684 ; AVX1-ONLY-NEXT: vmovaps 192(%rdi), %ymm1
1685 ; AVX1-ONLY-NEXT: vmovaps 160(%rdi), %ymm0
1686 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1687 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
1688 ; AVX1-ONLY-NEXT: vmovaps %ymm1, %ymm7
1689 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1690 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm0, %xmm1
1691 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
1692 ; AVX1-ONLY-NEXT: vinsertf128 $1, 288(%rdi), %ymm0, %ymm1
1693 ; AVX1-ONLY-NEXT: vmovaps 256(%rdi), %xmm2
1694 ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1695 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm13
1696 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm13[0,1,2,3,4,5],ymm9[6,7]
1697 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm8[4],ymm2[5,6,7]
1698 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7]
1699 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3],ymm0[4,5,6],ymm1[7]
1700 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1701 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm6[0,1],ymm4[2,3],ymm6[4,5],ymm4[6,7]
1702 ; AVX1-ONLY-NEXT: vmovaps %ymm4, %ymm6
1703 ; AVX1-ONLY-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1704 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm0, %xmm1
1705 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
1706 ; AVX1-ONLY-NEXT: vmovaps 576(%rdi), %xmm1
1707 ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1708 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm12
1709 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm12[0,1,2,3,4,5],ymm5[6,7]
1710 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm14[4],ymm1[5,6,7]
1711 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm0[0,1,2,3],ymm1[4,5,6,7]
1712 ; AVX1-ONLY-NEXT: vinsertf128 $1, 608(%rdi), %ymm0, %ymm0
1713 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5,6],ymm0[7]
1714 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1715 ; AVX1-ONLY-NEXT: vmovaps (%rdi), %ymm1
1716 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm3[2,3],ymm1[4,5],ymm3[6,7]
1717 ; AVX1-ONLY-NEXT: vmovaps %ymm1, %ymm15
1718 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1719 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm0, %xmm1
1720 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
1721 ; AVX1-ONLY-NEXT: vmovaps 64(%rdi), %ymm11
1722 ; AVX1-ONLY-NEXT: vmovaps 96(%rdi), %ymm3
1723 ; AVX1-ONLY-NEXT: vmovaps 96(%rdi), %xmm1
1724 ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1725 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
1726 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1727 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm3[6,7]
1728 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm11[4],ymm1[5,6,7]
1729 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm0[0,1,2,3],ymm1[4,5,6,7]
1730 ; AVX1-ONLY-NEXT: vinsertf128 $1, 128(%rdi), %ymm0, %ymm0
1731 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5,6],ymm0[7]
1732 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1733 ; AVX1-ONLY-NEXT: vmovaps 352(%rdi), %ymm1
1734 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1735 ; AVX1-ONLY-NEXT: vmovaps 320(%rdi), %ymm0
1736 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1737 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
1738 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm0, %xmm1
1739 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
1740 ; AVX1-ONLY-NEXT: vmovaps 384(%rdi), %ymm10
1741 ; AVX1-ONLY-NEXT: vmovaps 416(%rdi), %ymm4
1742 ; AVX1-ONLY-NEXT: vmovaps 416(%rdi), %xmm1
1743 ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1744 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
1745 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1746 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm4[6,7]
1747 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm10[4],ymm1[5,6,7]
1748 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm0[0,1,2,3],ymm1[4,5,6,7]
1749 ; AVX1-ONLY-NEXT: vinsertf128 $1, 448(%rdi), %ymm0, %ymm0
1750 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5,6],ymm0[7]
1751 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1752 ; AVX1-ONLY-NEXT: vmovups %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1753 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm9[2,3,0,1]
1754 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,1],ymm8[1,3],ymm0[6,5],ymm8[5,7]
1755 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm8[0,1],ymm9[2,3],ymm8[4,5],ymm9[6,7]
1756 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm2 = ymm0[2,0],ymm1[3,0],ymm0[6,4],ymm1[7,4]
1757 ; AVX1-ONLY-NEXT: vmovaps 160(%rdi), %xmm1
1758 ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1759 ; AVX1-ONLY-NEXT: vmovaps 192(%rdi), %xmm0
1760 ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1761 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,3]
1762 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2],xmm1[3]
1763 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,2,3,3]
1764 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm2[3,4,5,6,7]
1765 ; AVX1-ONLY-NEXT: vbroadcastss 304(%rdi), %ymm1
1766 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6],ymm1[7]
1767 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1768 ; AVX1-ONLY-NEXT: vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1769 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm5[2,3,0,1]
1770 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,1],ymm14[1,3],ymm0[6,5],ymm14[5,7]
1771 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm14[0,1],ymm5[2,3],ymm14[4,5],ymm5[6,7]
1772 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0],ymm1[3,0],ymm0[6,4],ymm1[7,4]
1773 ; AVX1-ONLY-NEXT: vmovaps 480(%rdi), %xmm2
1774 ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1775 ; AVX1-ONLY-NEXT: vmovaps 512(%rdi), %xmm1
1776 ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1777 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3]
1778 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2],xmm1[3]
1779 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,2,3,3]
1780 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
1781 ; AVX1-ONLY-NEXT: vbroadcastss 624(%rdi), %ymm1
1782 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6],ymm1[7]
1783 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1784 ; AVX1-ONLY-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1785 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm3[2,3,0,1]
1786 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,1],ymm11[1,3],ymm0[6,5],ymm11[5,7]
1787 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm11[0,1],ymm3[2,3],ymm11[4,5],ymm3[6,7]
1788 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0],ymm1[3,0],ymm0[6,4],ymm1[7,4]
1789 ; AVX1-ONLY-NEXT: vmovaps (%rdi), %xmm2
1790 ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1791 ; AVX1-ONLY-NEXT: vmovaps 32(%rdi), %xmm1
1792 ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1793 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3]
1794 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2],xmm1[3]
1795 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,2,3,3]
1796 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
1797 ; AVX1-ONLY-NEXT: vbroadcastss 144(%rdi), %ymm1
1798 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6],ymm1[7]
1799 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1800 ; AVX1-ONLY-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1801 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm4[2,3,0,1]
1802 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,1],ymm10[1,3],ymm0[6,5],ymm10[5,7]
1803 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm10[0,1],ymm4[2,3],ymm10[4,5],ymm4[6,7]
1804 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0],ymm1[3,0],ymm0[6,4],ymm1[7,4]
1805 ; AVX1-ONLY-NEXT: vmovaps 320(%rdi), %xmm3
1806 ; AVX1-ONLY-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1807 ; AVX1-ONLY-NEXT: vmovaps 352(%rdi), %xmm1
1808 ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1809 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm3[0,1],xmm1[2,3]
1810 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2],xmm1[3]
1811 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,2,3,3]
1812 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
1813 ; AVX1-ONLY-NEXT: vbroadcastss 464(%rdi), %ymm1
1814 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6],ymm1[7]
1815 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1816 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm13[3,0],ymm8[2,0],ymm13[7,4],ymm8[6,4]
1817 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0],ymm8[2,1],ymm0[6,4],ymm8[6,5]
1818 ; AVX1-ONLY-NEXT: vblendps $207, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm1 # 32-byte Folded Reload
1819 ; AVX1-ONLY-NEXT: # ymm1 = mem[0,1,2,3],ymm7[4,5],mem[6,7]
1820 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm1, %xmm1
1821 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2],xmm1[3]
1822 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} xmm1 = xmm1[1,0]
1823 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
1824 ; AVX1-ONLY-NEXT: vmovaps 288(%rdi), %ymm5
1825 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm3 = ymm5[2,3,0,1]
1826 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm5[1,0],ymm3[0,0],ymm5[5,4],ymm3[4,4]
1827 ; AVX1-ONLY-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1828 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,1,2,0,4,5,6,4]
1829 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
1830 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1831 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm12[3,0],ymm14[2,0],ymm12[7,4],ymm14[6,4]
1832 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0],ymm14[2,1],ymm0[6,4],ymm14[6,5]
1833 ; AVX1-ONLY-NEXT: vblendps $207, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm1 # 32-byte Folded Reload
1834 ; AVX1-ONLY-NEXT: # ymm1 = mem[0,1,2,3],ymm6[4,5],mem[6,7]
1835 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm1, %xmm1
1836 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2],xmm1[3]
1837 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} xmm1 = xmm1[1,0]
1838 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
1839 ; AVX1-ONLY-NEXT: vmovaps 608(%rdi), %ymm7
1840 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm7[2,3,0,1]
1841 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm7[1,0],ymm2[0,0],ymm7[5,4],ymm2[4,4]
1842 ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1843 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,1,2,0,4,5,6,4]
1844 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
1845 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1846 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
1847 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[3,0],ymm11[2,0],ymm0[7,4],ymm11[6,4]
1848 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0],ymm11[2,1],ymm0[6,4],ymm11[6,5]
1849 ; AVX1-ONLY-NEXT: vblendps $48, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm1 # 32-byte Folded Reload
1850 ; AVX1-ONLY-NEXT: # ymm1 = ymm15[0,1,2,3],mem[4,5],ymm15[6,7]
1851 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm1, %xmm1
1852 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2],xmm1[3]
1853 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} xmm1 = xmm1[1,0]
1854 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
1855 ; AVX1-ONLY-NEXT: vmovaps 128(%rdi), %ymm9
1856 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm13 = ymm9[2,3,0,1]
1857 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm6 = ymm9[1,0],ymm13[0,0],ymm9[5,4],ymm13[4,4]
1858 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm6 = ymm6[0,1,2,0,4,5,6,4]
1859 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm6[6,7]
1860 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1861 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
1862 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[3,0],ymm10[2,0],ymm0[7,4],ymm10[6,4]
1863 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0],ymm10[2,1],ymm0[6,4],ymm10[6,5]
1864 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
1865 ; AVX1-ONLY-NEXT: vblendps $207, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm6 # 32-byte Folded Reload
1866 ; AVX1-ONLY-NEXT: # ymm6 = mem[0,1,2,3],ymm1[4,5],mem[6,7]
1867 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm6, %xmm6
1868 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm6 = xmm6[0,1],mem[2],xmm6[3]
1869 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} xmm6 = xmm6[1,0]
1870 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm6[0,1,2],ymm0[3,4,5,6,7]
1871 ; AVX1-ONLY-NEXT: vmovaps 448(%rdi), %ymm6
1872 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm12 = ymm6[2,3,0,1]
1873 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm15 = ymm6[1,0],ymm12[0,0],ymm6[5,4],ymm12[4,4]
1874 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm15 = ymm15[0,1,2,0,4,5,6,4]
1875 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm15[6,7]
1876 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1877 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1878 ; AVX1-ONLY-NEXT: vblendps $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
1879 ; AVX1-ONLY-NEXT: # xmm0 = mem[0,1],xmm0[2,3]
1880 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
1881 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm15 = ymm4[0,0],ymm8[3,0],ymm4[4,4],ymm8[7,4]
1882 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm15 = ymm15[2,0],ymm8[2,2],ymm15[6,4],ymm8[6,6]
1883 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm0[3,0],mem[1,3]
1884 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm15[3,4,5,6,7]
1885 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm15 = ymm5[2,0],ymm3[1,0],ymm5[6,4],ymm3[5,4]
1886 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm15 = ymm15[0,1,2,0,4,5,6,4]
1887 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm15[6,7]
1888 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1889 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1890 ; AVX1-ONLY-NEXT: vblendps $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
1891 ; AVX1-ONLY-NEXT: # xmm0 = mem[0,1],xmm0[2,3]
1892 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
1893 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm15 = ymm3[0,0],ymm14[3,0],ymm3[4,4],ymm14[7,4]
1894 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm15 = ymm15[2,0],ymm14[2,2],ymm15[6,4],ymm14[6,6]
1895 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm0[3,0],mem[1,3]
1896 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm15[3,4,5,6,7]
1897 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm15 = ymm7[2,0],ymm2[1,0],ymm7[6,4],ymm2[5,4]
1898 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm15 = ymm15[0,1,2,0,4,5,6,4]
1899 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm15[6,7]
1900 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1901 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1902 ; AVX1-ONLY-NEXT: vblendps $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
1903 ; AVX1-ONLY-NEXT: # xmm0 = mem[0,1],xmm0[2,3]
1904 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
1905 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm15 = ymm2[0,0],ymm11[3,0],ymm2[4,4],ymm11[7,4]
1906 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm15 = ymm15[2,0],ymm11[2,2],ymm15[6,4],ymm11[6,6]
1907 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm0[3,0],mem[1,3]
1908 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm15[3,4,5,6,7]
1909 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm15 = ymm9[2,0],ymm13[1,0],ymm9[6,4],ymm13[5,4]
1910 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm15 = ymm15[0,1,2,0,4,5,6,4]
1911 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm15[6,7]
1912 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1913 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1914 ; AVX1-ONLY-NEXT: vblendps $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
1915 ; AVX1-ONLY-NEXT: # xmm0 = mem[0,1],xmm0[2,3]
1916 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
1917 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm15 = ymm1[0,0],ymm10[3,0],ymm1[4,4],ymm10[7,4]
1918 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm15 = ymm15[2,0],ymm10[2,2],ymm15[6,4],ymm10[6,6]
1919 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm0[3,0],mem[1,3]
1920 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm15[3,4,5,6,7]
1921 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm15 = ymm6[2,0],ymm12[1,0],ymm6[6,4],ymm12[5,4]
1922 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm15 = ymm15[0,1,2,0,4,5,6,4]
1923 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm15 = ymm0[0,1,2,3,4,5],ymm15[6,7]
1924 ; AVX1-ONLY-NEXT: vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm0 # 16-byte Folded Reload
1925 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm4[5],ymm0[6,7]
1926 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
1927 ; AVX1-ONLY-NEXT: vblendps $48, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm8 # 32-byte Folded Reload
1928 ; AVX1-ONLY-NEXT: # ymm8 = ymm4[0,1,2,3],mem[4,5],ymm4[6,7]
1929 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm8, %xmm8
1930 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm8 = xmm8[0],mem[1],xmm8[2,3]
1931 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm8[0,1,2],ymm0[3,4,5,6,7]
1932 ; AVX1-ONLY-NEXT: vblendps $127, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm5 # 32-byte Folded Reload
1933 ; AVX1-ONLY-NEXT: # ymm5 = mem[0,1,2,3,4,5,6],ymm5[7]
1934 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm5[6,7]
1935 ; AVX1-ONLY-NEXT: vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm5 # 16-byte Folded Reload
1936 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3,4],ymm3[5],ymm5[6,7]
1937 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
1938 ; AVX1-ONLY-NEXT: vblendps $48, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm8 # 32-byte Folded Reload
1939 ; AVX1-ONLY-NEXT: # ymm8 = ymm3[0,1,2,3],mem[4,5],ymm3[6,7]
1940 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm8, %xmm8
1941 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm8 = xmm8[0],mem[1],xmm8[2,3]
1942 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm5 = ymm8[0,1,2],ymm5[3,4,5,6,7]
1943 ; AVX1-ONLY-NEXT: vblendps $127, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm3 # 32-byte Folded Reload
1944 ; AVX1-ONLY-NEXT: # ymm3 = mem[0,1,2,3,4,5,6],ymm7[7]
1945 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm3 = ymm5[0,1,2,3,4,5],ymm3[6,7]
1946 ; AVX1-ONLY-NEXT: vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm5 # 16-byte Folded Reload
1947 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3,4],ymm2[5],ymm5[6,7]
1948 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
1949 ; AVX1-ONLY-NEXT: vblendps $207, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm7 # 32-byte Folded Reload
1950 ; AVX1-ONLY-NEXT: # ymm7 = mem[0,1,2,3],ymm2[4,5],mem[6,7]
1951 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm7, %xmm7
1952 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm7 = xmm7[0],mem[1],xmm7[2,3]
1953 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm5 = ymm7[0,1,2],ymm5[3,4,5,6,7]
1954 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm13[0,1,2,3,4,5,6],ymm9[7]
1955 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm5[0,1,2,3,4,5],ymm2[6,7]
1956 ; AVX1-ONLY-NEXT: vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm4 # 16-byte Folded Reload
1957 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3,4],ymm1[5],ymm4[6,7]
1958 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
1959 ; AVX1-ONLY-NEXT: vblendps $48, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm5 # 32-byte Folded Reload
1960 ; AVX1-ONLY-NEXT: # ymm5 = ymm5[0,1,2,3],mem[4,5],ymm5[6,7]
1961 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm5, %xmm5
1962 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm5 = xmm5[0],mem[1],xmm5[2,3]
1963 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0,1,2],ymm4[3,4,5,6,7]
1964 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm12[0,1,2,3,4,5,6],ymm6[7]
1965 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm4[0,1,2,3,4,5],ymm1[6,7]
1966 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
1967 ; AVX1-ONLY-NEXT: vmovaps %ymm4, 64(%rsi)
1968 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
1969 ; AVX1-ONLY-NEXT: vmovaps %ymm4, (%rsi)
1970 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
1971 ; AVX1-ONLY-NEXT: vmovaps %ymm4, 96(%rsi)
1972 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
1973 ; AVX1-ONLY-NEXT: vmovaps %ymm4, 32(%rsi)
1974 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
1975 ; AVX1-ONLY-NEXT: vmovaps %ymm4, 64(%rdx)
1976 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
1977 ; AVX1-ONLY-NEXT: vmovaps %ymm4, (%rdx)
1978 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
1979 ; AVX1-ONLY-NEXT: vmovaps %ymm4, 96(%rdx)
1980 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
1981 ; AVX1-ONLY-NEXT: vmovaps %ymm4, 32(%rdx)
1982 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
1983 ; AVX1-ONLY-NEXT: vmovaps %ymm4, 64(%rcx)
1984 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
1985 ; AVX1-ONLY-NEXT: vmovaps %ymm4, (%rcx)
1986 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
1987 ; AVX1-ONLY-NEXT: vmovaps %ymm4, 96(%rcx)
1988 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
1989 ; AVX1-ONLY-NEXT: vmovaps %ymm4, 32(%rcx)
1990 ; AVX1-ONLY-NEXT: vmovaps %ymm15, 64(%r8)
1991 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
1992 ; AVX1-ONLY-NEXT: vmovaps %ymm4, (%r8)
1993 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
1994 ; AVX1-ONLY-NEXT: vmovaps %ymm4, 96(%r8)
1995 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
1996 ; AVX1-ONLY-NEXT: vmovaps %ymm4, 32(%r8)
1997 ; AVX1-ONLY-NEXT: vmovaps %ymm1, 64(%r9)
1998 ; AVX1-ONLY-NEXT: vmovaps %ymm2, (%r9)
1999 ; AVX1-ONLY-NEXT: vmovaps %ymm3, 96(%r9)
2000 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 32(%r9)
2001 ; AVX1-ONLY-NEXT: addq $952, %rsp # imm = 0x3B8
2002 ; AVX1-ONLY-NEXT: vzeroupper
2003 ; AVX1-ONLY-NEXT: retq
2005 ; AVX2-ONLY-LABEL: load_i32_stride5_vf32:
2006 ; AVX2-ONLY: # %bb.0:
2007 ; AVX2-ONLY-NEXT: subq $1000, %rsp # imm = 0x3E8
2008 ; AVX2-ONLY-NEXT: vmovdqa 384(%rdi), %ymm14
2009 ; AVX2-ONLY-NEXT: vmovdqa 416(%rdi), %ymm5
2010 ; AVX2-ONLY-NEXT: vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2011 ; AVX2-ONLY-NEXT: vmovdqa 352(%rdi), %ymm6
2012 ; AVX2-ONLY-NEXT: vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2013 ; AVX2-ONLY-NEXT: vmovdqa 320(%rdi), %ymm7
2014 ; AVX2-ONLY-NEXT: vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2015 ; AVX2-ONLY-NEXT: vmovdqa 544(%rdi), %ymm8
2016 ; AVX2-ONLY-NEXT: vmovdqa 576(%rdi), %ymm9
2017 ; AVX2-ONLY-NEXT: vmovdqa 512(%rdi), %ymm10
2018 ; AVX2-ONLY-NEXT: vmovdqu %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2019 ; AVX2-ONLY-NEXT: vmovdqa 480(%rdi), %ymm15
2020 ; AVX2-ONLY-NEXT: vmovdqa 224(%rdi), %ymm13
2021 ; AVX2-ONLY-NEXT: vmovdqa 256(%rdi), %ymm11
2022 ; AVX2-ONLY-NEXT: vmovdqa 192(%rdi), %ymm2
2023 ; AVX2-ONLY-NEXT: vmovdqu %ymm2, (%rsp) # 32-byte Spill
2024 ; AVX2-ONLY-NEXT: vmovdqa 160(%rdi), %ymm1
2025 ; AVX2-ONLY-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2026 ; AVX2-ONLY-NEXT: vmovdqa {{.*#+}} xmm0 = [0,5,2,7]
2027 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3],ymm1[4,5],ymm2[6,7]
2028 ; AVX2-ONLY-NEXT: vpermd %ymm1, %ymm0, %ymm1
2029 ; AVX2-ONLY-NEXT: vinserti128 $1, 288(%rdi), %ymm1, %ymm2
2030 ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm3 = ymm11[0,1,0,3]
2031 ; AVX2-ONLY-NEXT: vmovdqu %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2032 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm13[4],ymm3[5,6,7]
2033 ; AVX2-ONLY-NEXT: vmovdqa %ymm13, %ymm12
2034 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm3[4,5,6,7]
2035 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3],ymm1[4,5,6],ymm2[7]
2036 ; AVX2-ONLY-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2037 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm1 = ymm15[0,1],ymm10[2,3],ymm15[4,5],ymm10[6,7]
2038 ; AVX2-ONLY-NEXT: vmovdqu %ymm15, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2039 ; AVX2-ONLY-NEXT: vpermd %ymm1, %ymm0, %ymm1
2040 ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm2 = ymm9[0,1,0,3]
2041 ; AVX2-ONLY-NEXT: vmovdqa %ymm9, %ymm13
2042 ; AVX2-ONLY-NEXT: vmovdqu %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2043 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm8[4],ymm2[5,6,7]
2044 ; AVX2-ONLY-NEXT: vmovdqa %ymm8, %ymm10
2045 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm2 = ymm1[0,1,2,3],ymm2[4,5,6,7]
2046 ; AVX2-ONLY-NEXT: vinserti128 $1, 608(%rdi), %ymm1, %ymm1
2047 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3],ymm2[4,5,6],ymm1[7]
2048 ; AVX2-ONLY-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2049 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm1 = ymm7[0,1],ymm6[2,3],ymm7[4,5],ymm6[6,7]
2050 ; AVX2-ONLY-NEXT: vpermd %ymm1, %ymm0, %ymm1
2051 ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm2 = ymm5[0,1,0,3]
2052 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm14[4],ymm2[5,6,7]
2053 ; AVX2-ONLY-NEXT: vmovdqu %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2054 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm2 = ymm1[0,1,2,3],ymm2[4,5,6,7]
2055 ; AVX2-ONLY-NEXT: vinserti128 $1, 448(%rdi), %ymm1, %ymm1
2056 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3],ymm2[4,5,6],ymm1[7]
2057 ; AVX2-ONLY-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2058 ; AVX2-ONLY-NEXT: vmovdqa (%rdi), %ymm7
2059 ; AVX2-ONLY-NEXT: vmovdqa 32(%rdi), %ymm1
2060 ; AVX2-ONLY-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2061 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm1 = ymm7[0,1],ymm1[2,3],ymm7[4,5],ymm1[6,7]
2062 ; AVX2-ONLY-NEXT: vpermd %ymm1, %ymm0, %ymm0
2063 ; AVX2-ONLY-NEXT: vmovdqa 64(%rdi), %ymm6
2064 ; AVX2-ONLY-NEXT: vmovdqa 96(%rdi), %ymm9
2065 ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm1 = ymm9[0,1,0,3]
2066 ; AVX2-ONLY-NEXT: vmovdqu %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2067 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm6[4],ymm1[5,6,7]
2068 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm1 = ymm0[0,1,2,3],ymm1[4,5,6,7]
2069 ; AVX2-ONLY-NEXT: vinserti128 $1, 128(%rdi), %ymm0, %ymm0
2070 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5,6],ymm0[7]
2071 ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2072 ; AVX2-ONLY-NEXT: vmovdqa {{.*#+}} xmm0 = [1,6,3,u]
2073 ; AVX2-ONLY-NEXT: vmovdqu (%rsp), %ymm8 # 32-byte Reload
2074 ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
2075 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm1 = ymm4[0,1],ymm8[2,3],ymm4[4,5,6,7]
2076 ; AVX2-ONLY-NEXT: vpermd %ymm1, %ymm0, %ymm1
2077 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm2 = ymm12[0,1],ymm11[2,3],ymm12[4,5],ymm11[6,7]
2078 ; AVX2-ONLY-NEXT: vbroadcasti128 {{.*#+}} ymm3 = [5,2,7,0,5,2,7,0]
2079 ; AVX2-ONLY-NEXT: # ymm3 = mem[0,1,0,1]
2080 ; AVX2-ONLY-NEXT: vpermd %ymm2, %ymm3, %ymm2
2081 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4,5,6,7]
2082 ; AVX2-ONLY-NEXT: vpbroadcastd 304(%rdi), %ymm2
2083 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm2[7]
2084 ; AVX2-ONLY-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2085 ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
2086 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm1 = ymm15[0,1],ymm5[2,3],ymm15[4,5,6,7]
2087 ; AVX2-ONLY-NEXT: vpermd %ymm1, %ymm0, %ymm1
2088 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm2 = ymm10[0,1],ymm13[2,3],ymm10[4,5],ymm13[6,7]
2089 ; AVX2-ONLY-NEXT: vmovdqa %ymm10, %ymm11
2090 ; AVX2-ONLY-NEXT: vpermd %ymm2, %ymm3, %ymm2
2091 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4,5,6,7]
2092 ; AVX2-ONLY-NEXT: vpbroadcastd 624(%rdi), %ymm2
2093 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm2[7]
2094 ; AVX2-ONLY-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2095 ; AVX2-ONLY-NEXT: vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2096 ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
2097 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm1 = ymm7[0,1],ymm10[2,3],ymm7[4,5,6,7]
2098 ; AVX2-ONLY-NEXT: vpermd %ymm1, %ymm0, %ymm1
2099 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm2 = ymm6[0,1],ymm9[2,3],ymm6[4,5],ymm9[6,7]
2100 ; AVX2-ONLY-NEXT: vpermd %ymm2, %ymm3, %ymm2
2101 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4,5,6,7]
2102 ; AVX2-ONLY-NEXT: vpbroadcastd 144(%rdi), %ymm2
2103 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm2[7]
2104 ; AVX2-ONLY-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2105 ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
2106 ; AVX2-ONLY-NEXT: vpblendd $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm1 # 32-byte Folded Reload
2107 ; AVX2-ONLY-NEXT: # ymm1 = mem[0,1],ymm15[2,3],mem[4,5,6,7]
2108 ; AVX2-ONLY-NEXT: vpermd %ymm1, %ymm0, %ymm0
2109 ; AVX2-ONLY-NEXT: vpblendd $204, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm1 # 32-byte Folded Reload
2110 ; AVX2-ONLY-NEXT: # ymm1 = ymm14[0,1],mem[2,3],ymm14[4,5],mem[6,7]
2111 ; AVX2-ONLY-NEXT: vpermd %ymm1, %ymm3, %ymm1
2112 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
2113 ; AVX2-ONLY-NEXT: vpbroadcastd 464(%rdi), %ymm1
2114 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6],ymm1[7]
2115 ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2116 ; AVX2-ONLY-NEXT: vmovdqa {{.*#+}} xmm9 = [2,7,4,u]
2117 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm1 = ymm4[0,1,2,3],ymm8[4,5],ymm4[6,7]
2118 ; AVX2-ONLY-NEXT: vmovdqa %ymm8, %ymm14
2119 ; AVX2-ONLY-NEXT: vpermd %ymm1, %ymm9, %ymm1
2120 ; AVX2-ONLY-NEXT: vinserti128 $1, 256(%rdi), %ymm0, %ymm2
2121 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm2 = ymm12[0,1,2,3,4,5,6],ymm2[7]
2122 ; AVX2-ONLY-NEXT: vmovdqa %ymm12, %ymm8
2123 ; AVX2-ONLY-NEXT: vpshufd {{.*#+}} ymm2 = ymm2[2,3,0,1,6,7,4,5]
2124 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4,5,6,7]
2125 ; AVX2-ONLY-NEXT: vmovdqa 288(%rdi), %ymm3
2126 ; AVX2-ONLY-NEXT: vpbroadcastq {{.*#+}} ymm0 = [0,5,0,5,0,5,0,5]
2127 ; AVX2-ONLY-NEXT: vpermd %ymm3, %ymm0, %ymm2
2128 ; AVX2-ONLY-NEXT: vmovdqa %ymm3, %ymm12
2129 ; AVX2-ONLY-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2130 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm2[6,7]
2131 ; AVX2-ONLY-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2132 ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
2133 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm5[4,5],ymm3[6,7]
2134 ; AVX2-ONLY-NEXT: vmovdqa %ymm5, %ymm13
2135 ; AVX2-ONLY-NEXT: vpermd %ymm1, %ymm9, %ymm1
2136 ; AVX2-ONLY-NEXT: vinserti128 $1, 576(%rdi), %ymm0, %ymm2
2137 ; AVX2-ONLY-NEXT: vmovdqa %ymm11, %ymm4
2138 ; AVX2-ONLY-NEXT: vmovdqu %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2139 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm2 = ymm11[0,1,2,3,4,5,6],ymm2[7]
2140 ; AVX2-ONLY-NEXT: vpshufd {{.*#+}} ymm2 = ymm2[2,3,0,1,6,7,4,5]
2141 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4,5,6,7]
2142 ; AVX2-ONLY-NEXT: vmovdqa 608(%rdi), %ymm2
2143 ; AVX2-ONLY-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2144 ; AVX2-ONLY-NEXT: vpermd %ymm2, %ymm0, %ymm2
2145 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm2[6,7]
2146 ; AVX2-ONLY-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2147 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm1 = ymm7[0,1,2,3],ymm10[4,5],ymm7[6,7]
2148 ; AVX2-ONLY-NEXT: vpermd %ymm1, %ymm9, %ymm1
2149 ; AVX2-ONLY-NEXT: vinserti128 $1, 96(%rdi), %ymm0, %ymm2
2150 ; AVX2-ONLY-NEXT: vmovdqa %ymm6, %ymm7
2151 ; AVX2-ONLY-NEXT: vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2152 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm2 = ymm6[0,1,2,3,4,5,6],ymm2[7]
2153 ; AVX2-ONLY-NEXT: vpshufd {{.*#+}} ymm2 = ymm2[2,3,0,1,6,7,4,5]
2154 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4,5,6,7]
2155 ; AVX2-ONLY-NEXT: vmovdqa 128(%rdi), %ymm6
2156 ; AVX2-ONLY-NEXT: vpermd %ymm6, %ymm0, %ymm10
2157 ; AVX2-ONLY-NEXT: vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2158 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm10[6,7]
2159 ; AVX2-ONLY-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2160 ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
2161 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm15[4,5],ymm2[6,7]
2162 ; AVX2-ONLY-NEXT: vpermd %ymm1, %ymm9, %ymm1
2163 ; AVX2-ONLY-NEXT: vinserti128 $1, 416(%rdi), %ymm0, %ymm5
2164 ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
2165 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm5 = ymm9[0,1,2,3,4,5,6],ymm5[7]
2166 ; AVX2-ONLY-NEXT: vpshufd {{.*#+}} ymm5 = ymm5[2,3,0,1,6,7,4,5]
2167 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm5 = ymm1[0,1,2],ymm5[3,4,5,6,7]
2168 ; AVX2-ONLY-NEXT: vmovdqa 448(%rdi), %ymm1
2169 ; AVX2-ONLY-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2170 ; AVX2-ONLY-NEXT: vpermd %ymm1, %ymm0, %ymm0
2171 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm5[0,1,2,3,4,5],ymm0[6,7]
2172 ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2173 ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
2174 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm8[0,1,2,3],ymm1[4,5],ymm8[6,7]
2175 ; AVX2-ONLY-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[3,0,2,2,7,4,6,6]
2176 ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
2177 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} ymm5 = ymm11[12,13,14,15],ymm14[0,1,2,3,4,5,6,7,8,9,10,11],ymm11[28,29,30,31],ymm14[16,17,18,19,20,21,22,23,24,25,26,27]
2178 ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm5 = ymm5[0,3,2,3]
2179 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm5[0,1,2],ymm0[3,4,5,6,7]
2180 ; AVX2-ONLY-NEXT: vpbroadcastq {{.*#+}} ymm5 = [1,6,1,6,1,6,1,6]
2181 ; AVX2-ONLY-NEXT: vpermd %ymm12, %ymm5, %ymm10
2182 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm10[6,7]
2183 ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2184 ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
2185 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm4[0,1,2,3],ymm12[4,5],ymm4[6,7]
2186 ; AVX2-ONLY-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[3,0,2,2,7,4,6,6]
2187 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} ymm10 = ymm3[12,13,14,15],ymm13[0,1,2,3,4,5,6,7,8,9,10,11],ymm3[28,29,30,31],ymm13[16,17,18,19,20,21,22,23,24,25,26,27]
2188 ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm10 = ymm10[0,3,2,3]
2189 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm10[0,1,2],ymm0[3,4,5,6,7]
2190 ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
2191 ; AVX2-ONLY-NEXT: vpermd %ymm3, %ymm5, %ymm10
2192 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm10[6,7]
2193 ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2194 ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
2195 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm7[0,1,2,3],ymm13[4,5],ymm7[6,7]
2196 ; AVX2-ONLY-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[3,0,2,2,7,4,6,6]
2197 ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
2198 ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
2199 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} ymm10 = ymm14[12,13,14,15],ymm15[0,1,2,3,4,5,6,7,8,9,10,11],ymm14[28,29,30,31],ymm15[16,17,18,19,20,21,22,23,24,25,26,27]
2200 ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm10 = ymm10[0,3,2,3]
2201 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm10[0,1,2],ymm0[3,4,5,6,7]
2202 ; AVX2-ONLY-NEXT: vpermd %ymm6, %ymm5, %ymm10
2203 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm10[6,7]
2204 ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2205 ; AVX2-ONLY-NEXT: vpblendd $48, {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm0 # 32-byte Folded Reload
2206 ; AVX2-ONLY-NEXT: # ymm0 = ymm9[0,1,2,3],mem[4,5],ymm9[6,7]
2207 ; AVX2-ONLY-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[3,0,2,2,7,4,6,6]
2208 ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
2209 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} ymm10 = ymm2[12,13,14,15],ymm7[0,1,2,3,4,5,6,7,8,9,10,11],ymm2[28,29,30,31],ymm7[16,17,18,19,20,21,22,23,24,25,26,27]
2210 ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm10 = ymm10[0,3,2,3]
2211 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm10[0,1,2],ymm0[3,4,5,6,7]
2212 ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
2213 ; AVX2-ONLY-NEXT: vpermd %ymm9, %ymm5, %ymm5
2214 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm5[6,7]
2215 ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2216 ; AVX2-ONLY-NEXT: vpblendd $207, (%rsp), %ymm11, %ymm0 # 32-byte Folded Reload
2217 ; AVX2-ONLY-NEXT: # ymm0 = mem[0,1,2,3],ymm11[4,5],mem[6,7]
2218 ; AVX2-ONLY-NEXT: vperm2i128 {{.*#+}} ymm5 = ymm8[0,1],ymm1[0,1]
2219 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3,4],ymm1[5],ymm5[6,7]
2220 ; AVX2-ONLY-NEXT: vmovdqa {{.*#+}} xmm10 = [4,1,6,u]
2221 ; AVX2-ONLY-NEXT: vpermd %ymm0, %ymm10, %ymm0
2222 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm5[3,4,5,6,7]
2223 ; AVX2-ONLY-NEXT: vpbroadcastq {{.*#+}} ymm5 = [2,7,2,7,2,7,2,7]
2224 ; AVX2-ONLY-NEXT: vpermd {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm4 # 32-byte Folded Reload
2225 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm4[6,7]
2226 ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
2227 ; AVX2-ONLY-NEXT: vpblendd $207, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm4 # 32-byte Folded Reload
2228 ; AVX2-ONLY-NEXT: # ymm4 = mem[0,1,2,3],ymm1[4,5],mem[6,7]
2229 ; AVX2-ONLY-NEXT: vperm2i128 $2, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm11 # 32-byte Folded Reload
2230 ; AVX2-ONLY-NEXT: # ymm11 = mem[0,1],ymm12[0,1]
2231 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm11 = ymm11[0,1,2,3,4],ymm12[5],ymm11[6,7]
2232 ; AVX2-ONLY-NEXT: vpermd %ymm4, %ymm10, %ymm4
2233 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1,2],ymm11[3,4,5,6,7]
2234 ; AVX2-ONLY-NEXT: vpermd %ymm3, %ymm5, %ymm3
2235 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3,4,5],ymm3[6,7]
2236 ; AVX2-ONLY-NEXT: vperm2i128 $2, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm4 # 32-byte Folded Reload
2237 ; AVX2-ONLY-NEXT: # ymm4 = mem[0,1],ymm13[0,1]
2238 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3,4],ymm13[5],ymm4[6,7]
2239 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm6 = ymm15[0,1,2,3],ymm14[4,5],ymm15[6,7]
2240 ; AVX2-ONLY-NEXT: vpermd %ymm6, %ymm10, %ymm6
2241 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm4 = ymm6[0,1,2],ymm4[3,4,5,6,7]
2242 ; AVX2-ONLY-NEXT: vpermd {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm2 # 32-byte Folded Reload
2243 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm2 = ymm4[0,1,2,3,4,5],ymm2[6,7]
2244 ; AVX2-ONLY-NEXT: vpblendd $48, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm4 # 32-byte Folded Reload
2245 ; AVX2-ONLY-NEXT: # ymm4 = ymm7[0,1,2,3],mem[4,5],ymm7[6,7]
2246 ; AVX2-ONLY-NEXT: vpermd %ymm4, %ymm10, %ymm4
2247 ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
2248 ; AVX2-ONLY-NEXT: vperm2i128 $2, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm6 # 32-byte Folded Reload
2249 ; AVX2-ONLY-NEXT: # ymm6 = mem[0,1],ymm7[0,1]
2250 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3,4],ymm7[5],ymm6[6,7]
2251 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1,2],ymm6[3,4,5,6,7]
2252 ; AVX2-ONLY-NEXT: vpermd %ymm9, %ymm5, %ymm1
2253 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm1 = ymm4[0,1,2,3,4,5],ymm1[6,7]
2254 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
2255 ; AVX2-ONLY-NEXT: vmovaps %ymm4, 64(%rsi)
2256 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
2257 ; AVX2-ONLY-NEXT: vmovaps %ymm4, (%rsi)
2258 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
2259 ; AVX2-ONLY-NEXT: vmovaps %ymm4, 96(%rsi)
2260 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
2261 ; AVX2-ONLY-NEXT: vmovaps %ymm4, 32(%rsi)
2262 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
2263 ; AVX2-ONLY-NEXT: vmovaps %ymm4, 64(%rdx)
2264 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
2265 ; AVX2-ONLY-NEXT: vmovaps %ymm4, (%rdx)
2266 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
2267 ; AVX2-ONLY-NEXT: vmovaps %ymm4, 96(%rdx)
2268 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
2269 ; AVX2-ONLY-NEXT: vmovaps %ymm4, 32(%rdx)
2270 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
2271 ; AVX2-ONLY-NEXT: vmovaps %ymm4, 64(%rcx)
2272 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
2273 ; AVX2-ONLY-NEXT: vmovaps %ymm4, (%rcx)
2274 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
2275 ; AVX2-ONLY-NEXT: vmovaps %ymm4, 96(%rcx)
2276 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
2277 ; AVX2-ONLY-NEXT: vmovaps %ymm4, 32(%rcx)
2278 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
2279 ; AVX2-ONLY-NEXT: vmovaps %ymm4, 64(%r8)
2280 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
2281 ; AVX2-ONLY-NEXT: vmovaps %ymm4, (%r8)
2282 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
2283 ; AVX2-ONLY-NEXT: vmovaps %ymm4, 96(%r8)
2284 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
2285 ; AVX2-ONLY-NEXT: vmovaps %ymm4, 32(%r8)
2286 ; AVX2-ONLY-NEXT: vmovdqa %ymm1, 64(%r9)
2287 ; AVX2-ONLY-NEXT: vmovdqa %ymm2, (%r9)
2288 ; AVX2-ONLY-NEXT: vmovdqa %ymm3, 96(%r9)
2289 ; AVX2-ONLY-NEXT: vmovdqa %ymm0, 32(%r9)
2290 ; AVX2-ONLY-NEXT: addq $1000, %rsp # imm = 0x3E8
2291 ; AVX2-ONLY-NEXT: vzeroupper
2292 ; AVX2-ONLY-NEXT: retq
2294 ; AVX512F-LABEL: load_i32_stride5_vf32:
2296 ; AVX512F-NEXT: vmovdqa64 576(%rdi), %zmm2
2297 ; AVX512F-NEXT: vmovdqa64 384(%rdi), %zmm3
2298 ; AVX512F-NEXT: vmovdqa64 320(%rdi), %zmm0
2299 ; AVX512F-NEXT: vmovdqa64 512(%rdi), %zmm5
2300 ; AVX512F-NEXT: vmovdqa64 448(%rdi), %zmm4
2301 ; AVX512F-NEXT: vmovdqa64 256(%rdi), %zmm7
2302 ; AVX512F-NEXT: vmovdqa64 (%rdi), %zmm1
2303 ; AVX512F-NEXT: vmovdqa64 64(%rdi), %zmm10
2304 ; AVX512F-NEXT: vmovdqa64 128(%rdi), %zmm9
2305 ; AVX512F-NEXT: vmovdqa64 192(%rdi), %zmm11
2306 ; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm12 = [8,13,18,23,28,0,0,3,8,13,18,23,28,0,0,3]
2307 ; AVX512F-NEXT: # zmm12 = mem[0,1,2,3,0,1,2,3]
2308 ; AVX512F-NEXT: vmovdqa64 %zmm9, %zmm13
2309 ; AVX512F-NEXT: vpermt2d %zmm11, %zmm12, %zmm13
2310 ; AVX512F-NEXT: vmovdqa {{.*#+}} ymm6 = [0,5,10,15,20,25,30,u]
2311 ; AVX512F-NEXT: vmovdqa64 %zmm1, %zmm8
2312 ; AVX512F-NEXT: vpermt2d %zmm10, %zmm6, %zmm8
2313 ; AVX512F-NEXT: movw $8064, %ax # imm = 0x1F80
2314 ; AVX512F-NEXT: kmovw %eax, %k1
2315 ; AVX512F-NEXT: vmovdqa32 %zmm13, %zmm8 {%k1}
2316 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm13 = [0,1,2,3,4,5,6,7,8,9,10,11,12,17,22,27]
2317 ; AVX512F-NEXT: vpermt2d %zmm7, %zmm13, %zmm8
2318 ; AVX512F-NEXT: vpermi2d %zmm5, %zmm4, %zmm12
2319 ; AVX512F-NEXT: vpermi2d %zmm3, %zmm0, %zmm6
2320 ; AVX512F-NEXT: vmovdqa32 %zmm12, %zmm6 {%k1}
2321 ; AVX512F-NEXT: vpermt2d %zmm2, %zmm13, %zmm6
2322 ; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm14 = [9,14,19,24,29,0,0,4,9,14,19,24,29,0,0,4]
2323 ; AVX512F-NEXT: # zmm14 = mem[0,1,2,3,0,1,2,3]
2324 ; AVX512F-NEXT: vmovdqa64 %zmm9, %zmm15
2325 ; AVX512F-NEXT: vpermt2d %zmm11, %zmm14, %zmm15
2326 ; AVX512F-NEXT: vmovdqa {{.*#+}} ymm12 = [17,22,27,0,5,10,15,u]
2327 ; AVX512F-NEXT: vmovdqa64 %zmm10, %zmm13
2328 ; AVX512F-NEXT: vpermt2d %zmm1, %zmm12, %zmm13
2329 ; AVX512F-NEXT: vmovdqa32 %zmm15, %zmm13 {%k1}
2330 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm15 = [0,1,2,3,4,5,6,7,8,9,10,11,12,18,23,28]
2331 ; AVX512F-NEXT: vpermt2d %zmm7, %zmm15, %zmm13
2332 ; AVX512F-NEXT: vpermi2d %zmm5, %zmm4, %zmm14
2333 ; AVX512F-NEXT: vpermi2d %zmm0, %zmm3, %zmm12
2334 ; AVX512F-NEXT: vmovdqa32 %zmm14, %zmm12 {%k1}
2335 ; AVX512F-NEXT: vpermt2d %zmm2, %zmm15, %zmm12
2336 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} ymm16 = [2,7,12,17,22,27,u,u]
2337 ; AVX512F-NEXT: vmovdqa64 %zmm1, %zmm17
2338 ; AVX512F-NEXT: vpermt2d %zmm10, %zmm16, %zmm17
2339 ; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm15 = [10,15,20,25,30,0,0,5,10,15,20,25,30,0,0,5]
2340 ; AVX512F-NEXT: # zmm15 = mem[0,1,2,3,0,1,2,3]
2341 ; AVX512F-NEXT: vmovdqa64 %zmm9, %zmm14
2342 ; AVX512F-NEXT: vpermt2d %zmm11, %zmm15, %zmm14
2343 ; AVX512F-NEXT: movb $7, %al
2344 ; AVX512F-NEXT: kmovw %eax, %k1
2345 ; AVX512F-NEXT: vmovdqa64 %zmm17, %zmm14 {%k1}
2346 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm17 = [0,1,2,3,4,5,6,7,8,9,10,11,12,19,24,29]
2347 ; AVX512F-NEXT: vpermt2d %zmm7, %zmm17, %zmm14
2348 ; AVX512F-NEXT: vpermi2d %zmm5, %zmm4, %zmm15
2349 ; AVX512F-NEXT: vpermi2d %zmm3, %zmm0, %zmm16
2350 ; AVX512F-NEXT: vmovdqa64 %zmm16, %zmm15 {%k1}
2351 ; AVX512F-NEXT: vpermt2d %zmm2, %zmm17, %zmm15
2352 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} ymm16 = [3,8,13,18,23,28,u,u]
2353 ; AVX512F-NEXT: vmovdqa64 %zmm1, %zmm17
2354 ; AVX512F-NEXT: vpermt2d %zmm10, %zmm16, %zmm17
2355 ; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm18 = [27,0,5,10,15,0,17,22,27,0,5,10,15,0,17,22]
2356 ; AVX512F-NEXT: # zmm18 = mem[0,1,2,3,0,1,2,3]
2357 ; AVX512F-NEXT: vmovdqa64 %zmm11, %zmm19
2358 ; AVX512F-NEXT: vpermt2d %zmm9, %zmm18, %zmm19
2359 ; AVX512F-NEXT: vmovdqa64 %zmm17, %zmm19 {%k1}
2360 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm17 = [0,1,2,3,4,5,6,7,8,9,10,11,12,20,25,30]
2361 ; AVX512F-NEXT: vpermt2d %zmm7, %zmm17, %zmm19
2362 ; AVX512F-NEXT: vpermi2d %zmm4, %zmm5, %zmm18
2363 ; AVX512F-NEXT: vpermi2d %zmm3, %zmm0, %zmm16
2364 ; AVX512F-NEXT: vmovdqa64 %zmm16, %zmm18 {%k1}
2365 ; AVX512F-NEXT: vpermt2d %zmm2, %zmm17, %zmm18
2366 ; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm16 = [12,17,22,27,0,0,2,7,12,17,22,27,0,0,2,7]
2367 ; AVX512F-NEXT: # zmm16 = mem[0,1,2,3,0,1,2,3]
2368 ; AVX512F-NEXT: vpermt2d %zmm11, %zmm16, %zmm9
2369 ; AVX512F-NEXT: vmovdqa {{.*#+}} ymm11 = [4,9,14,19,24,29,u,u]
2370 ; AVX512F-NEXT: vpermt2d %zmm10, %zmm11, %zmm1
2371 ; AVX512F-NEXT: movb $56, %al
2372 ; AVX512F-NEXT: kmovw %eax, %k1
2373 ; AVX512F-NEXT: vmovdqa64 %zmm9, %zmm1 {%k1}
2374 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm9 = [0,1,2,3,4,5,6,7,8,9,10,11,16,21,26,31]
2375 ; AVX512F-NEXT: vpermt2d %zmm7, %zmm9, %zmm1
2376 ; AVX512F-NEXT: vpermt2d %zmm5, %zmm16, %zmm4
2377 ; AVX512F-NEXT: vpermt2d %zmm3, %zmm11, %zmm0
2378 ; AVX512F-NEXT: vmovdqa64 %zmm4, %zmm0 {%k1}
2379 ; AVX512F-NEXT: vpermt2d %zmm2, %zmm9, %zmm0
2380 ; AVX512F-NEXT: vmovdqa64 %zmm6, 64(%rsi)
2381 ; AVX512F-NEXT: vmovdqa64 %zmm8, (%rsi)
2382 ; AVX512F-NEXT: vmovdqa64 %zmm12, 64(%rdx)
2383 ; AVX512F-NEXT: vmovdqa64 %zmm13, (%rdx)
2384 ; AVX512F-NEXT: vmovdqa64 %zmm15, 64(%rcx)
2385 ; AVX512F-NEXT: vmovdqa64 %zmm14, (%rcx)
2386 ; AVX512F-NEXT: vmovdqa64 %zmm18, 64(%r8)
2387 ; AVX512F-NEXT: vmovdqa64 %zmm19, (%r8)
2388 ; AVX512F-NEXT: vmovdqa64 %zmm0, 64(%r9)
2389 ; AVX512F-NEXT: vmovdqa64 %zmm1, (%r9)
2390 ; AVX512F-NEXT: vzeroupper
2391 ; AVX512F-NEXT: retq
2393 ; AVX512BW-LABEL: load_i32_stride5_vf32:
2394 ; AVX512BW: # %bb.0:
2395 ; AVX512BW-NEXT: vmovdqa64 576(%rdi), %zmm2
2396 ; AVX512BW-NEXT: vmovdqa64 384(%rdi), %zmm3
2397 ; AVX512BW-NEXT: vmovdqa64 320(%rdi), %zmm0
2398 ; AVX512BW-NEXT: vmovdqa64 512(%rdi), %zmm5
2399 ; AVX512BW-NEXT: vmovdqa64 448(%rdi), %zmm4
2400 ; AVX512BW-NEXT: vmovdqa64 256(%rdi), %zmm7
2401 ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm1
2402 ; AVX512BW-NEXT: vmovdqa64 64(%rdi), %zmm10
2403 ; AVX512BW-NEXT: vmovdqa64 128(%rdi), %zmm9
2404 ; AVX512BW-NEXT: vmovdqa64 192(%rdi), %zmm11
2405 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm12 = [8,13,18,23,28,0,0,3,8,13,18,23,28,0,0,3]
2406 ; AVX512BW-NEXT: # zmm12 = mem[0,1,2,3,0,1,2,3]
2407 ; AVX512BW-NEXT: vmovdqa64 %zmm9, %zmm13
2408 ; AVX512BW-NEXT: vpermt2d %zmm11, %zmm12, %zmm13
2409 ; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm6 = [0,5,10,15,20,25,30,u]
2410 ; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm8
2411 ; AVX512BW-NEXT: vpermt2d %zmm10, %zmm6, %zmm8
2412 ; AVX512BW-NEXT: movw $8064, %ax # imm = 0x1F80
2413 ; AVX512BW-NEXT: kmovd %eax, %k1
2414 ; AVX512BW-NEXT: vmovdqa32 %zmm13, %zmm8 {%k1}
2415 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm13 = [0,1,2,3,4,5,6,7,8,9,10,11,12,17,22,27]
2416 ; AVX512BW-NEXT: vpermt2d %zmm7, %zmm13, %zmm8
2417 ; AVX512BW-NEXT: vpermi2d %zmm5, %zmm4, %zmm12
2418 ; AVX512BW-NEXT: vpermi2d %zmm3, %zmm0, %zmm6
2419 ; AVX512BW-NEXT: vmovdqa32 %zmm12, %zmm6 {%k1}
2420 ; AVX512BW-NEXT: vpermt2d %zmm2, %zmm13, %zmm6
2421 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm14 = [9,14,19,24,29,0,0,4,9,14,19,24,29,0,0,4]
2422 ; AVX512BW-NEXT: # zmm14 = mem[0,1,2,3,0,1,2,3]
2423 ; AVX512BW-NEXT: vmovdqa64 %zmm9, %zmm15
2424 ; AVX512BW-NEXT: vpermt2d %zmm11, %zmm14, %zmm15
2425 ; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm12 = [17,22,27,0,5,10,15,u]
2426 ; AVX512BW-NEXT: vmovdqa64 %zmm10, %zmm13
2427 ; AVX512BW-NEXT: vpermt2d %zmm1, %zmm12, %zmm13
2428 ; AVX512BW-NEXT: vmovdqa32 %zmm15, %zmm13 {%k1}
2429 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm15 = [0,1,2,3,4,5,6,7,8,9,10,11,12,18,23,28]
2430 ; AVX512BW-NEXT: vpermt2d %zmm7, %zmm15, %zmm13
2431 ; AVX512BW-NEXT: vpermi2d %zmm5, %zmm4, %zmm14
2432 ; AVX512BW-NEXT: vpermi2d %zmm0, %zmm3, %zmm12
2433 ; AVX512BW-NEXT: vmovdqa32 %zmm14, %zmm12 {%k1}
2434 ; AVX512BW-NEXT: vpermt2d %zmm2, %zmm15, %zmm12
2435 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} ymm16 = [2,7,12,17,22,27,u,u]
2436 ; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm17
2437 ; AVX512BW-NEXT: vpermt2d %zmm10, %zmm16, %zmm17
2438 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm15 = [10,15,20,25,30,0,0,5,10,15,20,25,30,0,0,5]
2439 ; AVX512BW-NEXT: # zmm15 = mem[0,1,2,3,0,1,2,3]
2440 ; AVX512BW-NEXT: vmovdqa64 %zmm9, %zmm14
2441 ; AVX512BW-NEXT: vpermt2d %zmm11, %zmm15, %zmm14
2442 ; AVX512BW-NEXT: movb $7, %al
2443 ; AVX512BW-NEXT: kmovd %eax, %k1
2444 ; AVX512BW-NEXT: vmovdqa64 %zmm17, %zmm14 {%k1}
2445 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm17 = [0,1,2,3,4,5,6,7,8,9,10,11,12,19,24,29]
2446 ; AVX512BW-NEXT: vpermt2d %zmm7, %zmm17, %zmm14
2447 ; AVX512BW-NEXT: vpermi2d %zmm5, %zmm4, %zmm15
2448 ; AVX512BW-NEXT: vpermi2d %zmm3, %zmm0, %zmm16
2449 ; AVX512BW-NEXT: vmovdqa64 %zmm16, %zmm15 {%k1}
2450 ; AVX512BW-NEXT: vpermt2d %zmm2, %zmm17, %zmm15
2451 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} ymm16 = [3,8,13,18,23,28,u,u]
2452 ; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm17
2453 ; AVX512BW-NEXT: vpermt2d %zmm10, %zmm16, %zmm17
2454 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm18 = [27,0,5,10,15,0,17,22,27,0,5,10,15,0,17,22]
2455 ; AVX512BW-NEXT: # zmm18 = mem[0,1,2,3,0,1,2,3]
2456 ; AVX512BW-NEXT: vmovdqa64 %zmm11, %zmm19
2457 ; AVX512BW-NEXT: vpermt2d %zmm9, %zmm18, %zmm19
2458 ; AVX512BW-NEXT: vmovdqa64 %zmm17, %zmm19 {%k1}
2459 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm17 = [0,1,2,3,4,5,6,7,8,9,10,11,12,20,25,30]
2460 ; AVX512BW-NEXT: vpermt2d %zmm7, %zmm17, %zmm19
2461 ; AVX512BW-NEXT: vpermi2d %zmm4, %zmm5, %zmm18
2462 ; AVX512BW-NEXT: vpermi2d %zmm3, %zmm0, %zmm16
2463 ; AVX512BW-NEXT: vmovdqa64 %zmm16, %zmm18 {%k1}
2464 ; AVX512BW-NEXT: vpermt2d %zmm2, %zmm17, %zmm18
2465 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm16 = [12,17,22,27,0,0,2,7,12,17,22,27,0,0,2,7]
2466 ; AVX512BW-NEXT: # zmm16 = mem[0,1,2,3,0,1,2,3]
2467 ; AVX512BW-NEXT: vpermt2d %zmm11, %zmm16, %zmm9
2468 ; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm11 = [4,9,14,19,24,29,u,u]
2469 ; AVX512BW-NEXT: vpermt2d %zmm10, %zmm11, %zmm1
2470 ; AVX512BW-NEXT: movb $56, %al
2471 ; AVX512BW-NEXT: kmovd %eax, %k1
2472 ; AVX512BW-NEXT: vmovdqa64 %zmm9, %zmm1 {%k1}
2473 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm9 = [0,1,2,3,4,5,6,7,8,9,10,11,16,21,26,31]
2474 ; AVX512BW-NEXT: vpermt2d %zmm7, %zmm9, %zmm1
2475 ; AVX512BW-NEXT: vpermt2d %zmm5, %zmm16, %zmm4
2476 ; AVX512BW-NEXT: vpermt2d %zmm3, %zmm11, %zmm0
2477 ; AVX512BW-NEXT: vmovdqa64 %zmm4, %zmm0 {%k1}
2478 ; AVX512BW-NEXT: vpermt2d %zmm2, %zmm9, %zmm0
2479 ; AVX512BW-NEXT: vmovdqa64 %zmm6, 64(%rsi)
2480 ; AVX512BW-NEXT: vmovdqa64 %zmm8, (%rsi)
2481 ; AVX512BW-NEXT: vmovdqa64 %zmm12, 64(%rdx)
2482 ; AVX512BW-NEXT: vmovdqa64 %zmm13, (%rdx)
2483 ; AVX512BW-NEXT: vmovdqa64 %zmm15, 64(%rcx)
2484 ; AVX512BW-NEXT: vmovdqa64 %zmm14, (%rcx)
2485 ; AVX512BW-NEXT: vmovdqa64 %zmm18, 64(%r8)
2486 ; AVX512BW-NEXT: vmovdqa64 %zmm19, (%r8)
2487 ; AVX512BW-NEXT: vmovdqa64 %zmm0, 64(%r9)
2488 ; AVX512BW-NEXT: vmovdqa64 %zmm1, (%r9)
2489 ; AVX512BW-NEXT: vzeroupper
2490 ; AVX512BW-NEXT: retq
2491 %wide.vec = load <160 x i32>, ptr %in.vec, align 64
2492 %strided.vec0 = shufflevector <160 x i32> %wide.vec, <160 x i32> poison, <32 x i32> <i32 0, i32 5, i32 10, i32 15, i32 20, i32 25, i32 30, i32 35, i32 40, i32 45, i32 50, i32 55, i32 60, i32 65, i32 70, i32 75, i32 80, i32 85, i32 90, i32 95, i32 100, i32 105, i32 110, i32 115, i32 120, i32 125, i32 130, i32 135, i32 140, i32 145, i32 150, i32 155>
2493 %strided.vec1 = shufflevector <160 x i32> %wide.vec, <160 x i32> poison, <32 x i32> <i32 1, i32 6, i32 11, i32 16, i32 21, i32 26, i32 31, i32 36, i32 41, i32 46, i32 51, i32 56, i32 61, i32 66, i32 71, i32 76, i32 81, i32 86, i32 91, i32 96, i32 101, i32 106, i32 111, i32 116, i32 121, i32 126, i32 131, i32 136, i32 141, i32 146, i32 151, i32 156>
2494 %strided.vec2 = shufflevector <160 x i32> %wide.vec, <160 x i32> poison, <32 x i32> <i32 2, i32 7, i32 12, i32 17, i32 22, i32 27, i32 32, i32 37, i32 42, i32 47, i32 52, i32 57, i32 62, i32 67, i32 72, i32 77, i32 82, i32 87, i32 92, i32 97, i32 102, i32 107, i32 112, i32 117, i32 122, i32 127, i32 132, i32 137, i32 142, i32 147, i32 152, i32 157>
2495 %strided.vec3 = shufflevector <160 x i32> %wide.vec, <160 x i32> poison, <32 x i32> <i32 3, i32 8, i32 13, i32 18, i32 23, i32 28, i32 33, i32 38, i32 43, i32 48, i32 53, i32 58, i32 63, i32 68, i32 73, i32 78, i32 83, i32 88, i32 93, i32 98, i32 103, i32 108, i32 113, i32 118, i32 123, i32 128, i32 133, i32 138, i32 143, i32 148, i32 153, i32 158>
2496 %strided.vec4 = shufflevector <160 x i32> %wide.vec, <160 x i32> poison, <32 x i32> <i32 4, i32 9, i32 14, i32 19, i32 24, i32 29, i32 34, i32 39, i32 44, i32 49, i32 54, i32 59, i32 64, i32 69, i32 74, i32 79, i32 84, i32 89, i32 94, i32 99, i32 104, i32 109, i32 114, i32 119, i32 124, i32 129, i32 134, i32 139, i32 144, i32 149, i32 154, i32 159>
2497 store <32 x i32> %strided.vec0, ptr %out.vec0, align 64
2498 store <32 x i32> %strided.vec1, ptr %out.vec1, align 64
2499 store <32 x i32> %strided.vec2, ptr %out.vec2, align 64
2500 store <32 x i32> %strided.vec3, ptr %out.vec3, align 64
2501 store <32 x i32> %strided.vec4, ptr %out.vec4, align 64
2505 define void @load_i32_stride5_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2, ptr %out.vec3, ptr %out.vec4) nounwind {
2506 ; SSE-LABEL: load_i32_stride5_vf64:
2508 ; SSE-NEXT: subq $1928, %rsp # imm = 0x788
2509 ; SSE-NEXT: movdqa 768(%rdi), %xmm2
2510 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2511 ; SSE-NEXT: movdqa 752(%rdi), %xmm4
2512 ; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2513 ; SSE-NEXT: movdqa 720(%rdi), %xmm11
2514 ; SSE-NEXT: movdqa %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2515 ; SSE-NEXT: movdqa 736(%rdi), %xmm3
2516 ; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2517 ; SSE-NEXT: movdqa 448(%rdi), %xmm6
2518 ; SSE-NEXT: movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2519 ; SSE-NEXT: movdqa 432(%rdi), %xmm8
2520 ; SSE-NEXT: movdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2521 ; SSE-NEXT: movdqa 400(%rdi), %xmm10
2522 ; SSE-NEXT: movdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2523 ; SSE-NEXT: movdqa 416(%rdi), %xmm9
2524 ; SSE-NEXT: movdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2525 ; SSE-NEXT: movdqa 128(%rdi), %xmm7
2526 ; SSE-NEXT: movdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2527 ; SSE-NEXT: movdqa 112(%rdi), %xmm5
2528 ; SSE-NEXT: movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2529 ; SSE-NEXT: movdqa 80(%rdi), %xmm1
2530 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2531 ; SSE-NEXT: movdqa 96(%rdi), %xmm0
2532 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2533 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
2534 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
2535 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm5[2,2,2,2]
2536 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm7[2],xmm0[3],xmm7[3]
2537 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
2538 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2539 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm9[1,1,1,1]
2540 ; SSE-NEXT: movdqa %xmm10, %xmm1
2541 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
2542 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm8[2,2,2,2]
2543 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm6[2],xmm0[3],xmm6[3]
2544 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
2545 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2546 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,1,1,1]
2547 ; SSE-NEXT: movdqa %xmm11, %xmm1
2548 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
2549 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm4[2,2,2,2]
2550 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
2551 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
2552 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2553 ; SSE-NEXT: movdqa 1040(%rdi), %xmm2
2554 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2555 ; SSE-NEXT: movdqa 1056(%rdi), %xmm0
2556 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2557 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
2558 ; SSE-NEXT: movdqa %xmm2, %xmm1
2559 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
2560 ; SSE-NEXT: movdqa 1088(%rdi), %xmm2
2561 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2562 ; SSE-NEXT: movdqa 1072(%rdi), %xmm0
2563 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2564 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,2,2,2]
2565 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
2566 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
2567 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2568 ; SSE-NEXT: movdqa (%rdi), %xmm1
2569 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2570 ; SSE-NEXT: movdqa 16(%rdi), %xmm14
2571 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm14[1,1,1,1]
2572 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
2573 ; SSE-NEXT: movdqa 32(%rdi), %xmm12
2574 ; SSE-NEXT: movdqa 48(%rdi), %xmm2
2575 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2576 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm12[2,2,2,2]
2577 ; SSE-NEXT: movdqa %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2578 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
2579 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
2580 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2581 ; SSE-NEXT: movdqa 320(%rdi), %xmm1
2582 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2583 ; SSE-NEXT: movdqa 336(%rdi), %xmm0
2584 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2585 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
2586 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
2587 ; SSE-NEXT: movdqa 368(%rdi), %xmm2
2588 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2589 ; SSE-NEXT: movdqa 352(%rdi), %xmm0
2590 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2591 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,2,2,2]
2592 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
2593 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
2594 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2595 ; SSE-NEXT: movdqa 640(%rdi), %xmm1
2596 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2597 ; SSE-NEXT: movdqa 656(%rdi), %xmm0
2598 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2599 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
2600 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
2601 ; SSE-NEXT: movdqa 688(%rdi), %xmm2
2602 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2603 ; SSE-NEXT: movdqa 672(%rdi), %xmm0
2604 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2605 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,2,2,2]
2606 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
2607 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
2608 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2609 ; SSE-NEXT: movdqa 960(%rdi), %xmm1
2610 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2611 ; SSE-NEXT: movdqa 976(%rdi), %xmm0
2612 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2613 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
2614 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
2615 ; SSE-NEXT: movdqa 1008(%rdi), %xmm2
2616 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2617 ; SSE-NEXT: movdqa 992(%rdi), %xmm0
2618 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2619 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,2,2,2]
2620 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
2621 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
2622 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2623 ; SSE-NEXT: movdqa 240(%rdi), %xmm2
2624 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2625 ; SSE-NEXT: movdqa 256(%rdi), %xmm4
2626 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm4[1,1,1,1]
2627 ; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2628 ; SSE-NEXT: movdqa %xmm2, %xmm1
2629 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
2630 ; SSE-NEXT: movdqa 288(%rdi), %xmm2
2631 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2632 ; SSE-NEXT: movdqa 272(%rdi), %xmm11
2633 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm11[2,2,2,2]
2634 ; SSE-NEXT: movdqa %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2635 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
2636 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
2637 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2638 ; SSE-NEXT: movdqa 560(%rdi), %xmm1
2639 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2640 ; SSE-NEXT: movdqa 576(%rdi), %xmm0
2641 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2642 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
2643 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
2644 ; SSE-NEXT: movdqa 608(%rdi), %xmm2
2645 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2646 ; SSE-NEXT: movdqa 592(%rdi), %xmm15
2647 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm15[2,2,2,2]
2648 ; SSE-NEXT: movdqa %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2649 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
2650 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
2651 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2652 ; SSE-NEXT: movdqa 880(%rdi), %xmm2
2653 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2654 ; SSE-NEXT: movdqa 896(%rdi), %xmm0
2655 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2656 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
2657 ; SSE-NEXT: movdqa %xmm2, %xmm1
2658 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
2659 ; SSE-NEXT: movdqa 928(%rdi), %xmm2
2660 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2661 ; SSE-NEXT: movdqa 912(%rdi), %xmm0
2662 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2663 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,2,2,2]
2664 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
2665 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
2666 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2667 ; SSE-NEXT: movdqa 1200(%rdi), %xmm2
2668 ; SSE-NEXT: movdqa %xmm2, (%rsp) # 16-byte Spill
2669 ; SSE-NEXT: movdqa 1216(%rdi), %xmm0
2670 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2671 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
2672 ; SSE-NEXT: movdqa %xmm2, %xmm1
2673 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
2674 ; SSE-NEXT: movdqa 1248(%rdi), %xmm3
2675 ; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2676 ; SSE-NEXT: movdqa 1232(%rdi), %xmm0
2677 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2678 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,2,2,2]
2679 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm3[2],xmm0[3],xmm3[3]
2680 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
2681 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2682 ; SSE-NEXT: movdqa 160(%rdi), %xmm3
2683 ; SSE-NEXT: movdqa 176(%rdi), %xmm8
2684 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm8[1,1,1,1]
2685 ; SSE-NEXT: movdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2686 ; SSE-NEXT: movdqa %xmm3, %xmm1
2687 ; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2688 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
2689 ; SSE-NEXT: movdqa 208(%rdi), %xmm2
2690 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2691 ; SSE-NEXT: movdqa 192(%rdi), %xmm7
2692 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm7[2,2,2,2]
2693 ; SSE-NEXT: movdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2694 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
2695 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
2696 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2697 ; SSE-NEXT: movdqa 480(%rdi), %xmm9
2698 ; SSE-NEXT: movdqa 496(%rdi), %xmm1
2699 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1]
2700 ; SSE-NEXT: movdqa %xmm1, %xmm13
2701 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2702 ; SSE-NEXT: movdqa %xmm9, %xmm1
2703 ; SSE-NEXT: movdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2704 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
2705 ; SSE-NEXT: movdqa 528(%rdi), %xmm2
2706 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2707 ; SSE-NEXT: movdqa 512(%rdi), %xmm10
2708 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm10[2,2,2,2]
2709 ; SSE-NEXT: movdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2710 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
2711 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
2712 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2713 ; SSE-NEXT: movdqa 800(%rdi), %xmm2
2714 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2715 ; SSE-NEXT: movdqa 816(%rdi), %xmm0
2716 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2717 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
2718 ; SSE-NEXT: movdqa %xmm2, %xmm1
2719 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
2720 ; SSE-NEXT: movdqa 848(%rdi), %xmm2
2721 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2722 ; SSE-NEXT: movdqa 832(%rdi), %xmm0
2723 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2724 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,2,2,2]
2725 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
2726 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
2727 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2728 ; SSE-NEXT: movdqa 1120(%rdi), %xmm1
2729 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2730 ; SSE-NEXT: movdqa 1136(%rdi), %xmm0
2731 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2732 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
2733 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
2734 ; SSE-NEXT: movdqa 1168(%rdi), %xmm2
2735 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2736 ; SSE-NEXT: movdqa 1152(%rdi), %xmm0
2737 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2738 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,2,2,2]
2739 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
2740 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
2741 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2742 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
2743 ; SSE-NEXT: # xmm0 = mem[1,1,1,1]
2744 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
2745 ; SSE-NEXT: # xmm1 = mem[2,3,2,3]
2746 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
2747 ; SSE-NEXT: movdqa 144(%rdi), %xmm1
2748 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2749 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
2750 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
2751 ; SSE-NEXT: # xmm6 = mem[2,3,2,3]
2752 ; SSE-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm1[0],xmm6[1],xmm1[1]
2753 ; SSE-NEXT: movsd {{.*#+}} xmm6 = xmm0[0],xmm6[1]
2754 ; SSE-NEXT: movapd %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2755 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
2756 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm5[1,1,1,1]
2757 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm14[2,3,2,3]
2758 ; SSE-NEXT: movdqa %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2759 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
2760 ; SSE-NEXT: movdqa 64(%rdi), %xmm1
2761 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2762 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
2763 ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm12[2,3,2,3]
2764 ; SSE-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm1[0],xmm6[1],xmm1[1]
2765 ; SSE-NEXT: movsd {{.*#+}} xmm6 = xmm0[0],xmm6[1]
2766 ; SSE-NEXT: movapd %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2767 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
2768 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm6[1,1,1,1]
2769 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm4[2,3,2,3]
2770 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
2771 ; SSE-NEXT: movdqa 304(%rdi), %xmm1
2772 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2773 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
2774 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm11[2,3,2,3]
2775 ; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
2776 ; SSE-NEXT: movsd {{.*#+}} xmm4 = xmm0[0],xmm4[1]
2777 ; SSE-NEXT: movapd %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2778 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,1,1,1]
2779 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm8[2,3,2,3]
2780 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
2781 ; SSE-NEXT: movdqa 224(%rdi), %xmm8
2782 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm8[0,0,1,1]
2783 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm7[2,3,2,3]
2784 ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
2785 ; SSE-NEXT: movsd {{.*#+}} xmm3 = xmm0[0],xmm3[1]
2786 ; SSE-NEXT: movapd %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2787 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
2788 ; SSE-NEXT: # xmm0 = mem[1,1,1,1]
2789 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
2790 ; SSE-NEXT: # xmm1 = mem[2,3,2,3]
2791 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
2792 ; SSE-NEXT: movdqa 464(%rdi), %xmm1
2793 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2794 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
2795 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
2796 ; SSE-NEXT: # xmm3 = mem[2,3,2,3]
2797 ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
2798 ; SSE-NEXT: movsd {{.*#+}} xmm3 = xmm0[0],xmm3[1]
2799 ; SSE-NEXT: movapd %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2800 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
2801 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm11[1,1,1,1]
2802 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
2803 ; SSE-NEXT: # xmm1 = mem[2,3,2,3]
2804 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
2805 ; SSE-NEXT: movdqa 384(%rdi), %xmm1
2806 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2807 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
2808 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
2809 ; SSE-NEXT: # xmm4 = mem[2,3,2,3]
2810 ; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
2811 ; SSE-NEXT: movsd {{.*#+}} xmm4 = xmm0[0],xmm4[1]
2812 ; SSE-NEXT: movapd %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2813 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
2814 ; SSE-NEXT: # xmm0 = mem[1,1,1,1]
2815 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
2816 ; SSE-NEXT: # xmm1 = mem[2,3,2,3]
2817 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
2818 ; SSE-NEXT: movdqa 624(%rdi), %xmm1
2819 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2820 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
2821 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm15[2,3,2,3]
2822 ; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
2823 ; SSE-NEXT: movsd {{.*#+}} xmm4 = xmm0[0],xmm4[1]
2824 ; SSE-NEXT: movapd %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2825 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm9[1,1,1,1]
2826 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm13[2,3,2,3]
2827 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
2828 ; SSE-NEXT: movdqa 544(%rdi), %xmm1
2829 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2830 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
2831 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm10[2,3,2,3]
2832 ; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
2833 ; SSE-NEXT: movsd {{.*#+}} xmm4 = xmm0[0],xmm4[1]
2834 ; SSE-NEXT: movapd %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2835 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
2836 ; SSE-NEXT: # xmm0 = mem[1,1,1,1]
2837 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
2838 ; SSE-NEXT: # xmm1 = mem[2,3,2,3]
2839 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
2840 ; SSE-NEXT: movdqa 784(%rdi), %xmm1
2841 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2842 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
2843 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
2844 ; SSE-NEXT: # xmm4 = mem[2,3,2,3]
2845 ; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
2846 ; SSE-NEXT: movsd {{.*#+}} xmm4 = xmm0[0],xmm4[1]
2847 ; SSE-NEXT: movapd %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2848 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2849 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
2850 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
2851 ; SSE-NEXT: # xmm1 = mem[2,3,2,3]
2852 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
2853 ; SSE-NEXT: movdqa 704(%rdi), %xmm1
2854 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2855 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
2856 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
2857 ; SSE-NEXT: # xmm4 = mem[2,3,2,3]
2858 ; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
2859 ; SSE-NEXT: movsd {{.*#+}} xmm4 = xmm0[0],xmm4[1]
2860 ; SSE-NEXT: movapd %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2861 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
2862 ; SSE-NEXT: # xmm0 = mem[1,1,1,1]
2863 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
2864 ; SSE-NEXT: # xmm1 = mem[2,3,2,3]
2865 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
2866 ; SSE-NEXT: movdqa 944(%rdi), %xmm1
2867 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2868 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
2869 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
2870 ; SSE-NEXT: # xmm4 = mem[2,3,2,3]
2871 ; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
2872 ; SSE-NEXT: movsd {{.*#+}} xmm4 = xmm0[0],xmm4[1]
2873 ; SSE-NEXT: movapd %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2874 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
2875 ; SSE-NEXT: # xmm0 = mem[1,1,1,1]
2876 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
2877 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm13[2,3,2,3]
2878 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
2879 ; SSE-NEXT: movdqa 864(%rdi), %xmm1
2880 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2881 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
2882 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
2883 ; SSE-NEXT: # xmm3 = mem[2,3,2,3]
2884 ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
2885 ; SSE-NEXT: movsd {{.*#+}} xmm3 = xmm0[0],xmm3[1]
2886 ; SSE-NEXT: movapd %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2887 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
2888 ; SSE-NEXT: # xmm0 = mem[1,1,1,1]
2889 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
2890 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
2891 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
2892 ; SSE-NEXT: movdqa 1104(%rdi), %xmm1
2893 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2894 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
2895 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
2896 ; SSE-NEXT: # xmm2 = mem[2,3,2,3]
2897 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
2898 ; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm0[0],xmm2[1]
2899 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2900 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
2901 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm10[1,1,1,1]
2902 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
2903 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm12[2,3,2,3]
2904 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
2905 ; SSE-NEXT: movdqa 1024(%rdi), %xmm1
2906 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2907 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
2908 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
2909 ; SSE-NEXT: # xmm2 = mem[2,3,2,3]
2910 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
2911 ; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm0[0],xmm2[1]
2912 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2913 ; SSE-NEXT: pshufd $85, (%rsp), %xmm0 # 16-byte Folded Reload
2914 ; SSE-NEXT: # xmm0 = mem[1,1,1,1]
2915 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
2916 ; SSE-NEXT: # xmm1 = mem[2,3,2,3]
2917 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
2918 ; SSE-NEXT: movdqa 1264(%rdi), %xmm1
2919 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2920 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
2921 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
2922 ; SSE-NEXT: # xmm2 = mem[2,3,2,3]
2923 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
2924 ; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm0[0],xmm2[1]
2925 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2926 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
2927 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm4[1,1,1,1]
2928 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
2929 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm15[2,3,2,3]
2930 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
2931 ; SSE-NEXT: movdqa 1184(%rdi), %xmm1
2932 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2933 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
2934 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
2935 ; SSE-NEXT: # xmm3 = mem[2,3,2,3]
2936 ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
2937 ; SSE-NEXT: movsd {{.*#+}} xmm3 = xmm0[0],xmm3[1]
2938 ; SSE-NEXT: movapd %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2939 ; SSE-NEXT: movdqa %xmm5, %xmm0
2940 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm14[2,2,3,3]
2941 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
2942 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
2943 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm14[0,0,1,1]
2944 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
2945 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
2946 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
2947 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2948 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2949 ; SSE-NEXT: pshufd $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
2950 ; SSE-NEXT: # xmm1 = mem[2,2,3,3]
2951 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
2952 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
2953 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm7[0,0,1,1]
2954 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
2955 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1]
2956 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
2957 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2958 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2959 ; SSE-NEXT: pshufd $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
2960 ; SSE-NEXT: # xmm1 = mem[2,2,3,3]
2961 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
2962 ; SSE-NEXT: pshufd $80, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
2963 ; SSE-NEXT: # xmm1 = mem[0,0,1,1]
2964 ; SSE-NEXT: movdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2965 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm8[0],xmm1[1],xmm8[1]
2966 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
2967 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2968 ; SSE-NEXT: movdqa %xmm6, %xmm0
2969 ; SSE-NEXT: pshufd $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
2970 ; SSE-NEXT: # xmm1 = mem[2,2,3,3]
2971 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
2972 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
2973 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm9[0,0,1,1]
2974 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
2975 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm6[0],xmm1[1],xmm6[1]
2976 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
2977 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2978 ; SSE-NEXT: movdqa %xmm11, %xmm0
2979 ; SSE-NEXT: pshufd $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
2980 ; SSE-NEXT: # xmm1 = mem[2,2,3,3]
2981 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
2982 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
2983 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm11[0,0,1,1]
2984 ; SSE-NEXT: punpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
2985 ; SSE-NEXT: # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1]
2986 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
2987 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2988 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2989 ; SSE-NEXT: pshufd $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
2990 ; SSE-NEXT: # xmm1 = mem[2,2,3,3]
2991 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
2992 ; SSE-NEXT: pshufd $80, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
2993 ; SSE-NEXT: # xmm1 = mem[0,0,1,1]
2994 ; SSE-NEXT: punpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
2995 ; SSE-NEXT: # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1]
2996 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
2997 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2998 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2999 ; SSE-NEXT: pshufd $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
3000 ; SSE-NEXT: # xmm1 = mem[2,2,3,3]
3001 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
3002 ; SSE-NEXT: pshufd $80, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
3003 ; SSE-NEXT: # xmm1 = mem[0,0,1,1]
3004 ; SSE-NEXT: punpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
3005 ; SSE-NEXT: # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1]
3006 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
3007 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3008 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3009 ; SSE-NEXT: pshufd $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
3010 ; SSE-NEXT: # xmm1 = mem[2,2,3,3]
3011 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
3012 ; SSE-NEXT: pshufd $80, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
3013 ; SSE-NEXT: # xmm1 = mem[0,0,1,1]
3014 ; SSE-NEXT: punpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
3015 ; SSE-NEXT: # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1]
3016 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
3017 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3018 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3019 ; SSE-NEXT: pshufd $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
3020 ; SSE-NEXT: # xmm1 = mem[2,2,3,3]
3021 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
3022 ; SSE-NEXT: pshufd $80, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
3023 ; SSE-NEXT: # xmm1 = mem[0,0,1,1]
3024 ; SSE-NEXT: punpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
3025 ; SSE-NEXT: # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1]
3026 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
3027 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3028 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3029 ; SSE-NEXT: pshufd $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
3030 ; SSE-NEXT: # xmm1 = mem[2,2,3,3]
3031 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
3032 ; SSE-NEXT: pshufd $80, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
3033 ; SSE-NEXT: # xmm1 = mem[0,0,1,1]
3034 ; SSE-NEXT: punpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
3035 ; SSE-NEXT: # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1]
3036 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
3037 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3038 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3039 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm13[2,2,3,3]
3040 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
3041 ; SSE-NEXT: pshufd $80, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
3042 ; SSE-NEXT: # xmm1 = mem[0,0,1,1]
3043 ; SSE-NEXT: punpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
3044 ; SSE-NEXT: # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1]
3045 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
3046 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3047 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3048 ; SSE-NEXT: pshufd $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
3049 ; SSE-NEXT: # xmm1 = mem[2,2,3,3]
3050 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
3051 ; SSE-NEXT: pshufd $80, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
3052 ; SSE-NEXT: # xmm1 = mem[0,0,1,1]
3053 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
3054 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm13[0],xmm1[1],xmm13[1]
3055 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
3056 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3057 ; SSE-NEXT: movdqa %xmm10, %xmm0
3058 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm12[2,2,3,3]
3059 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
3060 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
3061 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm10[0,0,1,1]
3062 ; SSE-NEXT: punpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
3063 ; SSE-NEXT: # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1]
3064 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
3065 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3066 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3067 ; SSE-NEXT: pshufd $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
3068 ; SSE-NEXT: # xmm1 = mem[2,2,3,3]
3069 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
3070 ; SSE-NEXT: pshufd $80, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
3071 ; SSE-NEXT: # xmm1 = mem[0,0,1,1]
3072 ; SSE-NEXT: punpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
3073 ; SSE-NEXT: # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1]
3074 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
3075 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3076 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm15[2,2,3,3]
3077 ; SSE-NEXT: punpckhdq {{.*#+}} xmm4 = xmm4[2],xmm1[2],xmm4[3],xmm1[3]
3078 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
3079 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm12[0,0,1,1]
3080 ; SSE-NEXT: punpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
3081 ; SSE-NEXT: # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1]
3082 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm4[0],xmm1[1]
3083 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3084 ; SSE-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload
3085 ; SSE-NEXT: pshufd $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
3086 ; SSE-NEXT: # xmm1 = mem[2,2,3,3]
3087 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
3088 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
3089 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm15[0,0,1,1]
3090 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
3091 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
3092 ; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm0[0],xmm2[1]
3093 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3094 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm3[2,2,3,3]
3095 ; SSE-NEXT: movdqa %xmm14, %xmm2
3096 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
3097 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
3098 ; SSE-NEXT: # xmm0 = mem[3,3,3,3]
3099 ; SSE-NEXT: punpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
3100 ; SSE-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
3101 ; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm0[0],xmm2[1]
3102 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3103 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm5[2,2,3,3]
3104 ; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm0[0],xmm7[1],xmm0[1]
3105 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
3106 ; SSE-NEXT: # xmm0 = mem[3,3,3,3]
3107 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
3108 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm14[0],xmm0[1],xmm14[1]
3109 ; SSE-NEXT: movsd {{.*#+}} xmm7 = xmm0[0],xmm7[1]
3110 ; SSE-NEXT: movapd %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3111 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm8[2,2,3,3]
3112 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
3113 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
3114 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
3115 ; SSE-NEXT: # xmm0 = mem[3,3,3,3]
3116 ; SSE-NEXT: punpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
3117 ; SSE-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
3118 ; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm0[0],xmm2[1]
3119 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3120 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm6[2,2,3,3]
3121 ; SSE-NEXT: punpckldq {{.*#+}} xmm9 = xmm9[0],xmm0[0],xmm9[1],xmm0[1]
3122 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
3123 ; SSE-NEXT: # xmm0 = mem[3,3,3,3]
3124 ; SSE-NEXT: punpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
3125 ; SSE-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
3126 ; SSE-NEXT: movsd {{.*#+}} xmm9 = xmm0[0],xmm9[1]
3127 ; SSE-NEXT: movapd %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3128 ; SSE-NEXT: pshufd $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
3129 ; SSE-NEXT: # xmm0 = mem[2,2,3,3]
3130 ; SSE-NEXT: punpckldq {{.*#+}} xmm11 = xmm11[0],xmm0[0],xmm11[1],xmm0[1]
3131 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
3132 ; SSE-NEXT: # xmm0 = mem[3,3,3,3]
3133 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
3134 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm9[0],xmm0[1],xmm9[1]
3135 ; SSE-NEXT: movsd {{.*#+}} xmm11 = xmm0[0],xmm11[1]
3136 ; SSE-NEXT: movapd %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3137 ; SSE-NEXT: pshufd $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
3138 ; SSE-NEXT: # xmm0 = mem[2,2,3,3]
3139 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
3140 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
3141 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
3142 ; SSE-NEXT: # xmm0 = mem[3,3,3,3]
3143 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
3144 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm7[0],xmm0[1],xmm7[1]
3145 ; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm0[0],xmm2[1]
3146 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3147 ; SSE-NEXT: pshufd $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
3148 ; SSE-NEXT: # xmm0 = mem[2,2,3,3]
3149 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
3150 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
3151 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
3152 ; SSE-NEXT: # xmm0 = mem[3,3,3,3]
3153 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
3154 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1]
3155 ; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm0[0],xmm2[1]
3156 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3157 ; SSE-NEXT: pshufd $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
3158 ; SSE-NEXT: # xmm0 = mem[2,2,3,3]
3159 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
3160 ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1]
3161 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
3162 ; SSE-NEXT: # xmm0 = mem[3,3,3,3]
3163 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
3164 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm8[0],xmm0[1],xmm8[1]
3165 ; SSE-NEXT: movsd {{.*#+}} xmm3 = xmm0[0],xmm3[1]
3166 ; SSE-NEXT: movapd %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3167 ; SSE-NEXT: pshufd $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
3168 ; SSE-NEXT: # xmm0 = mem[2,2,3,3]
3169 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
3170 ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1]
3171 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
3172 ; SSE-NEXT: # xmm0 = mem[3,3,3,3]
3173 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
3174 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1]
3175 ; SSE-NEXT: movsd {{.*#+}} xmm3 = xmm0[0],xmm3[1]
3176 ; SSE-NEXT: movapd %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3177 ; SSE-NEXT: pshufd $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
3178 ; SSE-NEXT: # xmm0 = mem[2,2,3,3]
3179 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
3180 ; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1]
3181 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
3182 ; SSE-NEXT: # xmm0 = mem[3,3,3,3]
3183 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
3184 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
3185 ; SSE-NEXT: movsd {{.*#+}} xmm4 = xmm0[0],xmm4[1]
3186 ; SSE-NEXT: movapd %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3187 ; SSE-NEXT: pshufd $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
3188 ; SSE-NEXT: # xmm0 = mem[2,2,3,3]
3189 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
3190 ; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1]
3191 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
3192 ; SSE-NEXT: # xmm0 = mem[3,3,3,3]
3193 ; SSE-NEXT: punpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
3194 ; SSE-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
3195 ; SSE-NEXT: movsd {{.*#+}} xmm4 = xmm0[0],xmm4[1]
3196 ; SSE-NEXT: movapd %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3197 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm13[2,2,3,3]
3198 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
3199 ; SSE-NEXT: punpckldq {{.*#+}} xmm11 = xmm11[0],xmm0[0],xmm11[1],xmm0[1]
3200 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
3201 ; SSE-NEXT: # xmm0 = mem[3,3,3,3]
3202 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
3203 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
3204 ; SSE-NEXT: movsd {{.*#+}} xmm11 = xmm0[0],xmm11[1]
3205 ; SSE-NEXT: movapd %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3206 ; SSE-NEXT: pshufd $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
3207 ; SSE-NEXT: # xmm0 = mem[2,2,3,3]
3208 ; SSE-NEXT: punpckldq {{.*#+}} xmm10 = xmm10[0],xmm0[0],xmm10[1],xmm0[1]
3209 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
3210 ; SSE-NEXT: # xmm0 = mem[3,3,3,3]
3211 ; SSE-NEXT: punpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
3212 ; SSE-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
3213 ; SSE-NEXT: movsd {{.*#+}} xmm10 = xmm0[0],xmm10[1]
3214 ; SSE-NEXT: movapd %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3215 ; SSE-NEXT: pshufd $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
3216 ; SSE-NEXT: # xmm0 = mem[2,2,3,3]
3217 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
3218 ; SSE-NEXT: punpckldq {{.*#+}} xmm10 = xmm10[0],xmm0[0],xmm10[1],xmm0[1]
3219 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
3220 ; SSE-NEXT: # xmm0 = mem[3,3,3,3]
3221 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
3222 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
3223 ; SSE-NEXT: movsd {{.*#+}} xmm10 = xmm0[0],xmm10[1]
3224 ; SSE-NEXT: movapd %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3225 ; SSE-NEXT: pshufd $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
3226 ; SSE-NEXT: # xmm0 = mem[2,2,3,3]
3227 ; SSE-NEXT: punpckldq {{.*#+}} xmm12 = xmm12[0],xmm0[0],xmm12[1],xmm0[1]
3228 ; SSE-NEXT: pshufd $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
3229 ; SSE-NEXT: # xmm0 = mem[3,3,3,3]
3230 ; SSE-NEXT: punpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
3231 ; SSE-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
3232 ; SSE-NEXT: movsd {{.*#+}} xmm12 = xmm0[0],xmm12[1]
3233 ; SSE-NEXT: movapd %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3234 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,2,3,3]
3235 ; SSE-NEXT: movdqa %xmm15, %xmm13
3236 ; SSE-NEXT: punpckldq {{.*#+}} xmm13 = xmm13[0],xmm0[0],xmm13[1],xmm0[1]
3237 ; SSE-NEXT: pshufd $255, (%rsp), %xmm0 # 16-byte Folded Reload
3238 ; SSE-NEXT: # xmm0 = mem[3,3,3,3]
3239 ; SSE-NEXT: punpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
3240 ; SSE-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
3241 ; SSE-NEXT: movsd {{.*#+}} xmm13 = xmm0[0],xmm13[1]
3242 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
3243 ; SSE-NEXT: # xmm0 = mem[1,1,1,1]
3244 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
3245 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
3246 ; SSE-NEXT: pshufd $170, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
3247 ; SSE-NEXT: # xmm0 = mem[2,2,2,2]
3248 ; SSE-NEXT: punpckhdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
3249 ; SSE-NEXT: # xmm0 = xmm0[2],mem[2],xmm0[3],mem[3]
3250 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
3251 ; SSE-NEXT: movapd %xmm0, (%rsp) # 16-byte Spill
3252 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm14[1,1,1,1]
3253 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
3254 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
3255 ; SSE-NEXT: pshufd $170, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
3256 ; SSE-NEXT: # xmm0 = mem[2,2,2,2]
3257 ; SSE-NEXT: punpckhdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
3258 ; SSE-NEXT: # xmm0 = xmm0[2],mem[2],xmm0[3],mem[3]
3259 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
3260 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3261 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
3262 ; SSE-NEXT: # xmm0 = mem[1,1,1,1]
3263 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
3264 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
3265 ; SSE-NEXT: pshufd $170, {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Folded Reload
3266 ; SSE-NEXT: # xmm14 = mem[2,2,2,2]
3267 ; SSE-NEXT: punpckhdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Folded Reload
3268 ; SSE-NEXT: # xmm14 = xmm14[2],mem[2],xmm14[3],mem[3]
3269 ; SSE-NEXT: movsd {{.*#+}} xmm14 = xmm1[0],xmm14[1]
3270 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
3271 ; SSE-NEXT: # xmm0 = mem[1,1,1,1]
3272 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
3273 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
3274 ; SSE-NEXT: pshufd $170, {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Folded Reload
3275 ; SSE-NEXT: # xmm12 = mem[2,2,2,2]
3276 ; SSE-NEXT: punpckhdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Folded Reload
3277 ; SSE-NEXT: # xmm12 = xmm12[2],mem[2],xmm12[3],mem[3]
3278 ; SSE-NEXT: movsd {{.*#+}} xmm12 = xmm1[0],xmm12[1]
3279 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm9[1,1,1,1]
3280 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
3281 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
3282 ; SSE-NEXT: pshufd $170, {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Folded Reload
3283 ; SSE-NEXT: # xmm11 = mem[2,2,2,2]
3284 ; SSE-NEXT: punpckhdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Folded Reload
3285 ; SSE-NEXT: # xmm11 = xmm11[2],mem[2],xmm11[3],mem[3]
3286 ; SSE-NEXT: movsd {{.*#+}} xmm11 = xmm1[0],xmm11[1]
3287 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm7[1,1,1,1]
3288 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
3289 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
3290 ; SSE-NEXT: pshufd $170, {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Folded Reload
3291 ; SSE-NEXT: # xmm10 = mem[2,2,2,2]
3292 ; SSE-NEXT: punpckhdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Folded Reload
3293 ; SSE-NEXT: # xmm10 = xmm10[2],mem[2],xmm10[3],mem[3]
3294 ; SSE-NEXT: movsd {{.*#+}} xmm10 = xmm1[0],xmm10[1]
3295 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm6[1,1,1,1]
3296 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
3297 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
3298 ; SSE-NEXT: pshufd $170, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Folded Reload
3299 ; SSE-NEXT: # xmm9 = mem[2,2,2,2]
3300 ; SSE-NEXT: punpckhdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Folded Reload
3301 ; SSE-NEXT: # xmm9 = xmm9[2],mem[2],xmm9[3],mem[3]
3302 ; SSE-NEXT: movsd {{.*#+}} xmm9 = xmm1[0],xmm9[1]
3303 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm8[1,1,1,1]
3304 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
3305 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
3306 ; SSE-NEXT: pshufd $170, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Folded Reload
3307 ; SSE-NEXT: # xmm8 = mem[2,2,2,2]
3308 ; SSE-NEXT: punpckhdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Folded Reload
3309 ; SSE-NEXT: # xmm8 = xmm8[2],mem[2],xmm8[3],mem[3]
3310 ; SSE-NEXT: movsd {{.*#+}} xmm8 = xmm1[0],xmm8[1]
3311 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm5[1,1,1,1]
3312 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
3313 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
3314 ; SSE-NEXT: pshufd $170, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload
3315 ; SSE-NEXT: # xmm7 = mem[2,2,2,2]
3316 ; SSE-NEXT: punpckhdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload
3317 ; SSE-NEXT: # xmm7 = xmm7[2],mem[2],xmm7[3],mem[3]
3318 ; SSE-NEXT: movsd {{.*#+}} xmm7 = xmm1[0],xmm7[1]
3319 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,1,1,1]
3320 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
3321 ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1]
3322 ; SSE-NEXT: pshufd $170, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
3323 ; SSE-NEXT: # xmm6 = mem[2,2,2,2]
3324 ; SSE-NEXT: punpckhdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
3325 ; SSE-NEXT: # xmm6 = xmm6[2],mem[2],xmm6[3],mem[3]
3326 ; SSE-NEXT: movsd {{.*#+}} xmm6 = xmm3[0],xmm6[1]
3327 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
3328 ; SSE-NEXT: # xmm0 = mem[1,1,1,1]
3329 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
3330 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
3331 ; SSE-NEXT: pshufd $170, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Folded Reload
3332 ; SSE-NEXT: # xmm5 = mem[2,2,2,2]
3333 ; SSE-NEXT: punpckhdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Folded Reload
3334 ; SSE-NEXT: # xmm5 = xmm5[2],mem[2],xmm5[3],mem[3]
3335 ; SSE-NEXT: movsd {{.*#+}} xmm5 = xmm1[0],xmm5[1]
3336 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm4[1,1,1,1]
3337 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
3338 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
3339 ; SSE-NEXT: pshufd $170, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
3340 ; SSE-NEXT: # xmm4 = mem[2,2,2,2]
3341 ; SSE-NEXT: punpckhdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
3342 ; SSE-NEXT: # xmm4 = xmm4[2],mem[2],xmm4[3],mem[3]
3343 ; SSE-NEXT: movsd {{.*#+}} xmm4 = xmm1[0],xmm4[1]
3344 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
3345 ; SSE-NEXT: # xmm0 = mem[1,1,1,1]
3346 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
3347 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
3348 ; SSE-NEXT: pshufd $170, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
3349 ; SSE-NEXT: # xmm3 = mem[2,2,2,2]
3350 ; SSE-NEXT: punpckhdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
3351 ; SSE-NEXT: # xmm3 = xmm3[2],mem[2],xmm3[3],mem[3]
3352 ; SSE-NEXT: movsd {{.*#+}} xmm3 = xmm1[0],xmm3[1]
3353 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,1,1]
3354 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
3355 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
3356 ; SSE-NEXT: pshufd $170, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
3357 ; SSE-NEXT: # xmm2 = mem[2,2,2,2]
3358 ; SSE-NEXT: punpckhdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
3359 ; SSE-NEXT: # xmm2 = xmm2[2],mem[2],xmm2[3],mem[3]
3360 ; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm1[0],xmm2[1]
3361 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
3362 ; SSE-NEXT: # xmm0 = mem[1,1,1,1]
3363 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
3364 ; SSE-NEXT: punpckldq {{.*#+}} xmm15 = xmm15[0],xmm0[0],xmm15[1],xmm0[1]
3365 ; SSE-NEXT: pshufd $170, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
3366 ; SSE-NEXT: # xmm1 = mem[2,2,2,2]
3367 ; SSE-NEXT: punpckhdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
3368 ; SSE-NEXT: # xmm1 = xmm1[2],mem[2],xmm1[3],mem[3]
3369 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm15[0],xmm1[1]
3370 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
3371 ; SSE-NEXT: # xmm0 = mem[1,1,1,1]
3372 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
3373 ; SSE-NEXT: punpckldq {{.*#+}} xmm15 = xmm15[0],xmm0[0],xmm15[1],xmm0[1]
3374 ; SSE-NEXT: pshufd $170, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
3375 ; SSE-NEXT: # xmm0 = mem[2,2,2,2]
3376 ; SSE-NEXT: punpckhdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
3377 ; SSE-NEXT: # xmm0 = xmm0[2],mem[2],xmm0[3],mem[3]
3378 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm15[0],xmm0[1]
3379 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
3380 ; SSE-NEXT: movaps %xmm15, 224(%rsi)
3381 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
3382 ; SSE-NEXT: movaps %xmm15, 160(%rsi)
3383 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
3384 ; SSE-NEXT: movaps %xmm15, 96(%rsi)
3385 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
3386 ; SSE-NEXT: movaps %xmm15, 32(%rsi)
3387 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
3388 ; SSE-NEXT: movaps %xmm15, 240(%rsi)
3389 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
3390 ; SSE-NEXT: movaps %xmm15, 176(%rsi)
3391 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
3392 ; SSE-NEXT: movaps %xmm15, 112(%rsi)
3393 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
3394 ; SSE-NEXT: movaps %xmm15, 48(%rsi)
3395 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
3396 ; SSE-NEXT: movaps %xmm15, 192(%rsi)
3397 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
3398 ; SSE-NEXT: movaps %xmm15, 128(%rsi)
3399 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
3400 ; SSE-NEXT: movaps %xmm15, 64(%rsi)
3401 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
3402 ; SSE-NEXT: movaps %xmm15, (%rsi)
3403 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
3404 ; SSE-NEXT: movaps %xmm15, 208(%rsi)
3405 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
3406 ; SSE-NEXT: movaps %xmm15, 144(%rsi)
3407 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
3408 ; SSE-NEXT: movaps %xmm15, 80(%rsi)
3409 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
3410 ; SSE-NEXT: movaps %xmm15, 16(%rsi)
3411 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
3412 ; SSE-NEXT: movaps %xmm15, 224(%rdx)
3413 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
3414 ; SSE-NEXT: movaps %xmm15, 240(%rdx)
3415 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
3416 ; SSE-NEXT: movaps %xmm15, 192(%rdx)
3417 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
3418 ; SSE-NEXT: movaps %xmm15, 208(%rdx)
3419 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
3420 ; SSE-NEXT: movaps %xmm15, 160(%rdx)
3421 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
3422 ; SSE-NEXT: movaps %xmm15, 176(%rdx)
3423 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
3424 ; SSE-NEXT: movaps %xmm15, 128(%rdx)
3425 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
3426 ; SSE-NEXT: movaps %xmm15, 144(%rdx)
3427 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
3428 ; SSE-NEXT: movaps %xmm15, 96(%rdx)
3429 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
3430 ; SSE-NEXT: movaps %xmm15, 112(%rdx)
3431 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
3432 ; SSE-NEXT: movaps %xmm15, 64(%rdx)
3433 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
3434 ; SSE-NEXT: movaps %xmm15, 80(%rdx)
3435 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
3436 ; SSE-NEXT: movaps %xmm15, 32(%rdx)
3437 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
3438 ; SSE-NEXT: movaps %xmm15, 48(%rdx)
3439 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
3440 ; SSE-NEXT: movaps %xmm15, (%rdx)
3441 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
3442 ; SSE-NEXT: movaps %xmm15, 16(%rdx)
3443 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
3444 ; SSE-NEXT: movaps %xmm15, 240(%rcx)
3445 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
3446 ; SSE-NEXT: movaps %xmm15, 224(%rcx)
3447 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
3448 ; SSE-NEXT: movaps %xmm15, 208(%rcx)
3449 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
3450 ; SSE-NEXT: movaps %xmm15, 192(%rcx)
3451 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
3452 ; SSE-NEXT: movaps %xmm15, 176(%rcx)
3453 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
3454 ; SSE-NEXT: movaps %xmm15, 160(%rcx)
3455 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
3456 ; SSE-NEXT: movaps %xmm15, 144(%rcx)
3457 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
3458 ; SSE-NEXT: movaps %xmm15, 128(%rcx)
3459 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
3460 ; SSE-NEXT: movaps %xmm15, 112(%rcx)
3461 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
3462 ; SSE-NEXT: movaps %xmm15, 96(%rcx)
3463 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
3464 ; SSE-NEXT: movaps %xmm15, 80(%rcx)
3465 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
3466 ; SSE-NEXT: movaps %xmm15, 64(%rcx)
3467 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
3468 ; SSE-NEXT: movaps %xmm15, 48(%rcx)
3469 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
3470 ; SSE-NEXT: movaps %xmm15, 32(%rcx)
3471 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
3472 ; SSE-NEXT: movaps %xmm15, 16(%rcx)
3473 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
3474 ; SSE-NEXT: movaps %xmm15, (%rcx)
3475 ; SSE-NEXT: movapd %xmm13, 240(%r8)
3476 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
3477 ; SSE-NEXT: movaps %xmm13, 224(%r8)
3478 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
3479 ; SSE-NEXT: movaps %xmm13, 208(%r8)
3480 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
3481 ; SSE-NEXT: movaps %xmm13, 192(%r8)
3482 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
3483 ; SSE-NEXT: movaps %xmm13, 176(%r8)
3484 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
3485 ; SSE-NEXT: movaps %xmm13, 160(%r8)
3486 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
3487 ; SSE-NEXT: movaps %xmm13, 144(%r8)
3488 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
3489 ; SSE-NEXT: movaps %xmm13, 128(%r8)
3490 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
3491 ; SSE-NEXT: movaps %xmm13, 112(%r8)
3492 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
3493 ; SSE-NEXT: movaps %xmm13, 96(%r8)
3494 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
3495 ; SSE-NEXT: movaps %xmm13, 80(%r8)
3496 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
3497 ; SSE-NEXT: movaps %xmm13, 64(%r8)
3498 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
3499 ; SSE-NEXT: movaps %xmm13, 48(%r8)
3500 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
3501 ; SSE-NEXT: movaps %xmm13, 32(%r8)
3502 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
3503 ; SSE-NEXT: movaps %xmm13, 16(%r8)
3504 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
3505 ; SSE-NEXT: movaps %xmm13, (%r8)
3506 ; SSE-NEXT: movapd %xmm0, 240(%r9)
3507 ; SSE-NEXT: movapd %xmm1, 224(%r9)
3508 ; SSE-NEXT: movapd %xmm2, 208(%r9)
3509 ; SSE-NEXT: movapd %xmm3, 192(%r9)
3510 ; SSE-NEXT: movapd %xmm4, 176(%r9)
3511 ; SSE-NEXT: movapd %xmm5, 160(%r9)
3512 ; SSE-NEXT: movapd %xmm6, 144(%r9)
3513 ; SSE-NEXT: movapd %xmm7, 128(%r9)
3514 ; SSE-NEXT: movapd %xmm8, 112(%r9)
3515 ; SSE-NEXT: movapd %xmm9, 96(%r9)
3516 ; SSE-NEXT: movapd %xmm10, 80(%r9)
3517 ; SSE-NEXT: movapd %xmm11, 64(%r9)
3518 ; SSE-NEXT: movapd %xmm12, 48(%r9)
3519 ; SSE-NEXT: movapd %xmm14, 32(%r9)
3520 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3521 ; SSE-NEXT: movaps %xmm0, 16(%r9)
3522 ; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
3523 ; SSE-NEXT: movaps %xmm0, (%r9)
3524 ; SSE-NEXT: addq $1928, %rsp # imm = 0x788
3527 ; AVX1-ONLY-LABEL: load_i32_stride5_vf64:
3528 ; AVX1-ONLY: # %bb.0:
3529 ; AVX1-ONLY-NEXT: subq $2488, %rsp # imm = 0x9B8
3530 ; AVX1-ONLY-NEXT: vmovaps 832(%rdi), %ymm3
3531 ; AVX1-ONLY-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3532 ; AVX1-ONLY-NEXT: vmovaps 544(%rdi), %ymm8
3533 ; AVX1-ONLY-NEXT: vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3534 ; AVX1-ONLY-NEXT: vmovaps 576(%rdi), %ymm9
3535 ; AVX1-ONLY-NEXT: vmovups %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3536 ; AVX1-ONLY-NEXT: vmovaps 512(%rdi), %ymm6
3537 ; AVX1-ONLY-NEXT: vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3538 ; AVX1-ONLY-NEXT: vmovaps 480(%rdi), %ymm7
3539 ; AVX1-ONLY-NEXT: vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3540 ; AVX1-ONLY-NEXT: vmovaps 224(%rdi), %ymm5
3541 ; AVX1-ONLY-NEXT: vmovaps 256(%rdi), %ymm4
3542 ; AVX1-ONLY-NEXT: vmovaps 192(%rdi), %ymm1
3543 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3544 ; AVX1-ONLY-NEXT: vmovaps 160(%rdi), %ymm0
3545 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3546 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
3547 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm0, %xmm1
3548 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
3549 ; AVX1-ONLY-NEXT: vinsertf128 $1, 288(%rdi), %ymm0, %ymm1
3550 ; AVX1-ONLY-NEXT: vmovaps 256(%rdi), %xmm2
3551 ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3552 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2
3553 ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3554 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5],ymm4[6,7]
3555 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm5[4],ymm2[5,6,7]
3556 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7]
3557 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3],ymm0[4,5,6],ymm1[7]
3558 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3559 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm7[0,1],ymm6[2,3],ymm7[4,5],ymm6[6,7]
3560 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm0, %xmm1
3561 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
3562 ; AVX1-ONLY-NEXT: vmovaps 576(%rdi), %xmm1
3563 ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3564 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
3565 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3566 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm9[6,7]
3567 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm8[4],ymm1[5,6,7]
3568 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm0[0,1,2,3],ymm1[4,5,6,7]
3569 ; AVX1-ONLY-NEXT: vinsertf128 $1, 608(%rdi), %ymm0, %ymm0
3570 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5,6],ymm0[7]
3571 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3572 ; AVX1-ONLY-NEXT: vmovaps 800(%rdi), %ymm0
3573 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3574 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm3[2,3],ymm0[4,5],ymm3[6,7]
3575 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm0, %xmm1
3576 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
3577 ; AVX1-ONLY-NEXT: vmovaps 864(%rdi), %ymm2
3578 ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3579 ; AVX1-ONLY-NEXT: vmovaps 896(%rdi), %ymm9
3580 ; AVX1-ONLY-NEXT: vmovaps 896(%rdi), %xmm1
3581 ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3582 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
3583 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3584 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm9[6,7]
3585 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4],ymm1[5,6,7]
3586 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm0[0,1,2,3],ymm1[4,5,6,7]
3587 ; AVX1-ONLY-NEXT: vinsertf128 $1, 928(%rdi), %ymm0, %ymm0
3588 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5,6],ymm0[7]
3589 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3590 ; AVX1-ONLY-NEXT: vmovaps 1152(%rdi), %ymm1
3591 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3592 ; AVX1-ONLY-NEXT: vmovaps 1120(%rdi), %ymm0
3593 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3594 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
3595 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm0, %xmm1
3596 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
3597 ; AVX1-ONLY-NEXT: vmovaps 1184(%rdi), %ymm13
3598 ; AVX1-ONLY-NEXT: vmovaps 1216(%rdi), %ymm15
3599 ; AVX1-ONLY-NEXT: vmovaps 1216(%rdi), %xmm1
3600 ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3601 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
3602 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3603 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm15[6,7]
3604 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm13[4],ymm1[5,6,7]
3605 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm0[0,1,2,3],ymm1[4,5,6,7]
3606 ; AVX1-ONLY-NEXT: vinsertf128 $1, 1248(%rdi), %ymm0, %ymm0
3607 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5,6],ymm0[7]
3608 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3609 ; AVX1-ONLY-NEXT: vmovaps 32(%rdi), %ymm1
3610 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3611 ; AVX1-ONLY-NEXT: vmovaps (%rdi), %ymm0
3612 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3613 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
3614 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm0, %xmm1
3615 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
3616 ; AVX1-ONLY-NEXT: vmovaps 64(%rdi), %ymm8
3617 ; AVX1-ONLY-NEXT: vmovaps 96(%rdi), %ymm11
3618 ; AVX1-ONLY-NEXT: vmovaps 96(%rdi), %xmm1
3619 ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3620 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
3621 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3622 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm11[6,7]
3623 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm8[4],ymm1[5,6,7]
3624 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm0[0,1,2,3],ymm1[4,5,6,7]
3625 ; AVX1-ONLY-NEXT: vinsertf128 $1, 128(%rdi), %ymm0, %ymm0
3626 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5,6],ymm0[7]
3627 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3628 ; AVX1-ONLY-NEXT: vmovaps 352(%rdi), %ymm1
3629 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3630 ; AVX1-ONLY-NEXT: vmovaps 320(%rdi), %ymm0
3631 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3632 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
3633 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm0, %xmm1
3634 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
3635 ; AVX1-ONLY-NEXT: vmovaps 384(%rdi), %ymm14
3636 ; AVX1-ONLY-NEXT: vmovaps 416(%rdi), %ymm7
3637 ; AVX1-ONLY-NEXT: vmovaps 416(%rdi), %xmm1
3638 ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3639 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
3640 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3641 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm7[6,7]
3642 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm14[4],ymm1[5,6,7]
3643 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm0[0,1,2,3],ymm1[4,5,6,7]
3644 ; AVX1-ONLY-NEXT: vinsertf128 $1, 448(%rdi), %ymm0, %ymm0
3645 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5,6],ymm0[7]
3646 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3647 ; AVX1-ONLY-NEXT: vmovaps 672(%rdi), %ymm0
3648 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3649 ; AVX1-ONLY-NEXT: vmovaps 640(%rdi), %ymm1
3650 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3651 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
3652 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm0, %xmm1
3653 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
3654 ; AVX1-ONLY-NEXT: vmovaps 704(%rdi), %ymm12
3655 ; AVX1-ONLY-NEXT: vmovaps 736(%rdi), %ymm6
3656 ; AVX1-ONLY-NEXT: vmovaps 736(%rdi), %xmm1
3657 ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3658 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
3659 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3660 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm6[6,7]
3661 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm12[4],ymm1[5,6,7]
3662 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm0[0,1,2,3],ymm1[4,5,6,7]
3663 ; AVX1-ONLY-NEXT: vinsertf128 $1, 768(%rdi), %ymm0, %ymm0
3664 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5,6],ymm0[7]
3665 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3666 ; AVX1-ONLY-NEXT: vmovaps 992(%rdi), %ymm1
3667 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3668 ; AVX1-ONLY-NEXT: vmovaps 960(%rdi), %ymm0
3669 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3670 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
3671 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm0, %xmm1
3672 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm3 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
3673 ; AVX1-ONLY-NEXT: vmovaps 1024(%rdi), %ymm2
3674 ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3675 ; AVX1-ONLY-NEXT: vmovaps 1056(%rdi), %ymm10
3676 ; AVX1-ONLY-NEXT: vmovaps 1056(%rdi), %xmm1
3677 ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3678 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
3679 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3680 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm0[0,1,2,3,4,5],ymm10[6,7]
3681 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4],ymm1[5,6,7]
3682 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm1[4,5,6,7]
3683 ; AVX1-ONLY-NEXT: vinsertf128 $1, 1088(%rdi), %ymm3, %ymm0
3684 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5,6],ymm0[7]
3685 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3686 ; AVX1-ONLY-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3687 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm4[2,3,0,1]
3688 ; AVX1-ONLY-NEXT: vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3689 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,1],ymm5[1,3],ymm0[6,5],ymm5[5,7]
3690 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm5[0,1],ymm4[2,3],ymm5[4,5],ymm4[6,7]
3691 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0],ymm1[3,0],ymm0[6,4],ymm1[7,4]
3692 ; AVX1-ONLY-NEXT: vmovaps 160(%rdi), %xmm4
3693 ; AVX1-ONLY-NEXT: vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3694 ; AVX1-ONLY-NEXT: vmovaps 192(%rdi), %xmm1
3695 ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3696 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm4[0,1],xmm1[2,3]
3697 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2],xmm1[3]
3698 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,2,3,3]
3699 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
3700 ; AVX1-ONLY-NEXT: vbroadcastss 304(%rdi), %ymm1
3701 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6],ymm1[7]
3702 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3703 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
3704 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm1[2,3,0,1]
3705 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
3706 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,1],ymm4[1,3],ymm0[6,5],ymm4[5,7]
3707 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm4[0,1],ymm1[2,3],ymm4[4,5],ymm1[6,7]
3708 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0],ymm1[3,0],ymm0[6,4],ymm1[7,4]
3709 ; AVX1-ONLY-NEXT: vmovaps 480(%rdi), %xmm2
3710 ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3711 ; AVX1-ONLY-NEXT: vmovaps 512(%rdi), %xmm1
3712 ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3713 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3]
3714 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2],xmm1[3]
3715 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,2,3,3]
3716 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
3717 ; AVX1-ONLY-NEXT: vbroadcastss 624(%rdi), %ymm1
3718 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6],ymm1[7]
3719 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3720 ; AVX1-ONLY-NEXT: vmovups %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3721 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm9[2,3,0,1]
3722 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
3723 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,1],ymm1[1,3],ymm0[6,5],ymm1[5,7]
3724 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm9[2,3],ymm1[4,5],ymm9[6,7]
3725 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0],ymm1[3,0],ymm0[6,4],ymm1[7,4]
3726 ; AVX1-ONLY-NEXT: vmovaps 800(%rdi), %xmm2
3727 ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3728 ; AVX1-ONLY-NEXT: vmovaps 832(%rdi), %xmm1
3729 ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3730 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3]
3731 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2],xmm1[3]
3732 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,2,3,3]
3733 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
3734 ; AVX1-ONLY-NEXT: vbroadcastss 944(%rdi), %ymm1
3735 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6],ymm1[7]
3736 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3737 ; AVX1-ONLY-NEXT: vmovups %ymm15, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3738 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm15[2,3,0,1]
3739 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,1],ymm13[1,3],ymm0[6,5],ymm13[5,7]
3740 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm13[0,1],ymm15[2,3],ymm13[4,5],ymm15[6,7]
3741 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0],ymm1[3,0],ymm0[6,4],ymm1[7,4]
3742 ; AVX1-ONLY-NEXT: vmovaps 1120(%rdi), %xmm2
3743 ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3744 ; AVX1-ONLY-NEXT: vmovaps 1152(%rdi), %xmm1
3745 ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3746 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3]
3747 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2],xmm1[3]
3748 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,2,3,3]
3749 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
3750 ; AVX1-ONLY-NEXT: vbroadcastss 1264(%rdi), %ymm1
3751 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6],ymm1[7]
3752 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3753 ; AVX1-ONLY-NEXT: vmovups %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3754 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm11[2,3,0,1]
3755 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,1],ymm8[1,3],ymm0[6,5],ymm8[5,7]
3756 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm8[0,1],ymm11[2,3],ymm8[4,5],ymm11[6,7]
3757 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0],ymm1[3,0],ymm0[6,4],ymm1[7,4]
3758 ; AVX1-ONLY-NEXT: vmovaps (%rdi), %xmm3
3759 ; AVX1-ONLY-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3760 ; AVX1-ONLY-NEXT: vmovaps 32(%rdi), %xmm1
3761 ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3762 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm3[0,1],xmm1[2,3]
3763 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2],xmm1[3]
3764 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,2,3,3]
3765 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
3766 ; AVX1-ONLY-NEXT: vbroadcastss 144(%rdi), %ymm1
3767 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6],ymm1[7]
3768 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3769 ; AVX1-ONLY-NEXT: vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3770 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm7[2,3,0,1]
3771 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,1],ymm14[1,3],ymm0[6,5],ymm14[5,7]
3772 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm14[0,1],ymm7[2,3],ymm14[4,5],ymm7[6,7]
3773 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0],ymm1[3,0],ymm0[6,4],ymm1[7,4]
3774 ; AVX1-ONLY-NEXT: vmovaps 320(%rdi), %xmm2
3775 ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3776 ; AVX1-ONLY-NEXT: vmovaps 352(%rdi), %xmm1
3777 ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3778 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3]
3779 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2],xmm1[3]
3780 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,2,3,3]
3781 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
3782 ; AVX1-ONLY-NEXT: vbroadcastss 464(%rdi), %ymm1
3783 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6],ymm1[7]
3784 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3785 ; AVX1-ONLY-NEXT: vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3786 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm6[2,3,0,1]
3787 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,1],ymm12[1,3],ymm0[6,5],ymm12[5,7]
3788 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm12[0,1],ymm6[2,3],ymm12[4,5],ymm6[6,7]
3789 ; AVX1-ONLY-NEXT: vmovaps %ymm12, %ymm7
3790 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0],ymm1[3,0],ymm0[6,4],ymm1[7,4]
3791 ; AVX1-ONLY-NEXT: vmovaps 640(%rdi), %xmm2
3792 ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3793 ; AVX1-ONLY-NEXT: vmovaps 672(%rdi), %xmm1
3794 ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3795 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3]
3796 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2],xmm1[3]
3797 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,2,3,3]
3798 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
3799 ; AVX1-ONLY-NEXT: vbroadcastss 784(%rdi), %ymm1
3800 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6],ymm1[7]
3801 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3802 ; AVX1-ONLY-NEXT: vmovaps %ymm10, %ymm1
3803 ; AVX1-ONLY-NEXT: vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3804 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm10[2,3,0,1]
3805 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
3806 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,1],ymm10[1,3],ymm0[6,5],ymm10[5,7]
3807 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm10[0,1],ymm1[2,3],ymm10[4,5],ymm1[6,7]
3808 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0],ymm1[3,0],ymm0[6,4],ymm1[7,4]
3809 ; AVX1-ONLY-NEXT: vmovaps 960(%rdi), %xmm2
3810 ; AVX1-ONLY-NEXT: vmovaps %xmm2, (%rsp) # 16-byte Spill
3811 ; AVX1-ONLY-NEXT: vmovaps 992(%rdi), %xmm1
3812 ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3813 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3]
3814 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2],xmm1[3]
3815 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,2,3,3]
3816 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
3817 ; AVX1-ONLY-NEXT: vbroadcastss 1104(%rdi), %ymm1
3818 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6],ymm1[7]
3819 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3820 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
3821 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3822 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[3,0],ymm1[2,0],ymm0[7,4],ymm1[6,4]
3823 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0],ymm1[2,1],ymm0[6,4],ymm1[6,5]
3824 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
3825 ; AVX1-ONLY-NEXT: vblendps $207, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
3826 ; AVX1-ONLY-NEXT: # ymm1 = mem[0,1,2,3],ymm1[4,5],mem[6,7]
3827 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm1, %xmm1
3828 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2],xmm1[3]
3829 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} xmm1 = xmm1[1,0]
3830 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
3831 ; AVX1-ONLY-NEXT: vmovaps 288(%rdi), %ymm2
3832 ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3833 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm2[2,3,0,1]
3834 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3835 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm2[1,0],ymm1[0,0],ymm2[5,4],ymm1[4,4]
3836 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,1,2,0,4,5,6,4]
3837 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
3838 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3839 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3840 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[3,0],ymm4[2,0],ymm0[7,4],ymm4[6,4]
3841 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0],ymm4[2,1],ymm0[6,4],ymm4[6,5]
3842 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
3843 ; AVX1-ONLY-NEXT: vblendps $207, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
3844 ; AVX1-ONLY-NEXT: # ymm1 = mem[0,1,2,3],ymm1[4,5],mem[6,7]
3845 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm1, %xmm1
3846 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2],xmm1[3]
3847 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} xmm1 = xmm1[1,0]
3848 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
3849 ; AVX1-ONLY-NEXT: vmovaps 608(%rdi), %ymm2
3850 ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3851 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm2[2,3,0,1]
3852 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3853 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm2[1,0],ymm1[0,0],ymm2[5,4],ymm1[4,4]
3854 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,1,2,0,4,5,6,4]
3855 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
3856 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3857 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
3858 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3859 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[3,0],ymm1[2,0],ymm0[7,4],ymm1[6,4]
3860 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0],ymm1[2,1],ymm0[6,4],ymm1[6,5]
3861 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
3862 ; AVX1-ONLY-NEXT: vblendps $207, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
3863 ; AVX1-ONLY-NEXT: # ymm1 = mem[0,1,2,3],ymm1[4,5],mem[6,7]
3864 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm1, %xmm1
3865 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2],xmm1[3]
3866 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} xmm1 = xmm1[1,0]
3867 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
3868 ; AVX1-ONLY-NEXT: vmovaps 928(%rdi), %ymm2
3869 ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3870 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm2[2,3,0,1]
3871 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3872 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm2[1,0],ymm1[0,0],ymm2[5,4],ymm1[4,4]
3873 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,1,2,0,4,5,6,4]
3874 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
3875 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3876 ; AVX1-ONLY-NEXT: vmovups %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3877 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3878 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[3,0],ymm13[2,0],ymm0[7,4],ymm13[6,4]
3879 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0],ymm13[2,1],ymm0[6,4],ymm13[6,5]
3880 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
3881 ; AVX1-ONLY-NEXT: vblendps $207, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
3882 ; AVX1-ONLY-NEXT: # ymm1 = mem[0,1,2,3],ymm1[4,5],mem[6,7]
3883 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm1, %xmm1
3884 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2],xmm1[3]
3885 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} xmm1 = xmm1[1,0]
3886 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
3887 ; AVX1-ONLY-NEXT: vmovaps 1248(%rdi), %ymm1
3888 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3889 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm6 = ymm1[2,3,0,1]
3890 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[1,0],ymm6[0,0],ymm1[5,4],ymm6[4,4]
3891 ; AVX1-ONLY-NEXT: vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3892 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,1,2,0,4,5,6,4]
3893 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
3894 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3895 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3896 ; AVX1-ONLY-NEXT: vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3897 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[3,0],ymm8[2,0],ymm0[7,4],ymm8[6,4]
3898 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0],ymm8[2,1],ymm0[6,4],ymm8[6,5]
3899 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
3900 ; AVX1-ONLY-NEXT: vblendps $207, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
3901 ; AVX1-ONLY-NEXT: # ymm1 = mem[0,1,2,3],ymm1[4,5],mem[6,7]
3902 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm1, %xmm1
3903 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2],xmm1[3]
3904 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} xmm1 = xmm1[1,0]
3905 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2],ymm0[3,4,5,6,7]
3906 ; AVX1-ONLY-NEXT: vmovaps 128(%rdi), %ymm4
3907 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm4[2,3,0,1]
3908 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3909 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm2 = ymm4[1,0],ymm0[0,0],ymm4[5,4],ymm0[4,4]
3910 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm2 = ymm2[0,1,2,0,4,5,6,4]
3911 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm2[6,7]
3912 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3913 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3914 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm0[3,0],ymm14[2,0],ymm0[7,4],ymm14[6,4]
3915 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[2,0],ymm14[2,1],ymm1[6,4],ymm14[6,5]
3916 ; AVX1-ONLY-NEXT: vmovaps %ymm14, %ymm8
3917 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3918 ; AVX1-ONLY-NEXT: vblendps $207, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm2 # 32-byte Folded Reload
3919 ; AVX1-ONLY-NEXT: # ymm2 = mem[0,1,2,3],ymm0[4,5],mem[6,7]
3920 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm2, %xmm2
3921 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm2 = xmm2[0,1],mem[2],xmm2[3]
3922 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} xmm2 = xmm2[1,0]
3923 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2],ymm1[3,4,5,6,7]
3924 ; AVX1-ONLY-NEXT: vmovaps 448(%rdi), %ymm12
3925 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm12[2,3,0,1]
3926 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3927 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm3 = ymm12[1,0],ymm0[0,0],ymm12[5,4],ymm0[4,4]
3928 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm3 = ymm3[0,1,2,0,4,5,6,4]
3929 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3,4,5],ymm3[6,7]
3930 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3931 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3932 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm2 = ymm0[3,0],ymm7[2,0],ymm0[7,4],ymm7[6,4]
3933 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm2 = ymm2[2,0],ymm7[2,1],ymm2[6,4],ymm7[6,5]
3934 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3935 ; AVX1-ONLY-NEXT: vblendps $48, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm3 # 32-byte Folded Reload
3936 ; AVX1-ONLY-NEXT: # ymm3 = ymm0[0,1,2,3],mem[4,5],ymm0[6,7]
3937 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm3, %xmm3
3938 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm3 = xmm3[0,1],mem[2],xmm3[3]
3939 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} xmm3 = xmm3[1,0]
3940 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2],ymm2[3,4,5,6,7]
3941 ; AVX1-ONLY-NEXT: vmovaps 768(%rdi), %ymm13
3942 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm13[2,3,0,1]
3943 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3944 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm11 = ymm13[1,0],ymm0[0,0],ymm13[5,4],ymm0[4,4]
3945 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm11 = ymm11[0,1,2,0,4,5,6,4]
3946 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm3[0,1,2,3,4,5],ymm11[6,7]
3947 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3948 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3949 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm3 = ymm0[3,0],ymm10[2,0],ymm0[7,4],ymm10[6,4]
3950 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm3 = ymm3[2,0],ymm10[2,1],ymm3[6,4],ymm10[6,5]
3951 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3952 ; AVX1-ONLY-NEXT: vblendps $207, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm11 # 32-byte Folded Reload
3953 ; AVX1-ONLY-NEXT: # ymm11 = mem[0,1,2,3],ymm0[4,5],mem[6,7]
3954 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm11, %xmm11
3955 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm11 = xmm11[0,1],mem[2],xmm11[3]
3956 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} xmm11 = xmm11[1,0]
3957 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm11[0,1,2],ymm3[3,4,5,6,7]
3958 ; AVX1-ONLY-NEXT: vmovaps 1088(%rdi), %ymm11
3959 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm11[2,3,0,1]
3960 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm15 = ymm11[1,0],ymm1[0,0],ymm11[5,4],ymm1[4,4]
3961 ; AVX1-ONLY-NEXT: vmovaps %ymm1, %ymm3
3962 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3963 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm15 = ymm15[0,1,2,0,4,5,6,4]
3964 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm15[6,7]
3965 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3966 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3967 ; AVX1-ONLY-NEXT: vblendps $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
3968 ; AVX1-ONLY-NEXT: # xmm0 = mem[0,1],xmm0[2,3]
3969 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
3970 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
3971 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm15 = ymm5[0,0],ymm1[3,0],ymm5[4,4],ymm1[7,4]
3972 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm15 = ymm15[2,0],ymm1[2,2],ymm15[6,4],ymm1[6,6]
3973 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm0[3,0],mem[1,3]
3974 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm15[3,4,5,6,7]
3975 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
3976 ; AVX1-ONLY-NEXT: vshufps $18, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm15 # 32-byte Folded Reload
3977 ; AVX1-ONLY-NEXT: # ymm15 = ymm1[2,0],mem[1,0],ymm1[6,4],mem[5,4]
3978 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm15 = ymm15[0,1,2,0,4,5,6,4]
3979 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm15[6,7]
3980 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3981 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3982 ; AVX1-ONLY-NEXT: vblendps $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
3983 ; AVX1-ONLY-NEXT: # xmm0 = mem[0,1],xmm0[2,3]
3984 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
3985 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
3986 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm15 = ymm2[0,0],ymm1[3,0],ymm2[4,4],ymm1[7,4]
3987 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm15 = ymm15[2,0],ymm1[2,2],ymm15[6,4],ymm1[6,6]
3988 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm0[3,0],mem[1,3]
3989 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm15[3,4,5,6,7]
3990 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
3991 ; AVX1-ONLY-NEXT: vshufps $18, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm15 # 32-byte Folded Reload
3992 ; AVX1-ONLY-NEXT: # ymm15 = ymm1[2,0],mem[1,0],ymm1[6,4],mem[5,4]
3993 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm15 = ymm15[0,1,2,0,4,5,6,4]
3994 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm15[6,7]
3995 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3996 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3997 ; AVX1-ONLY-NEXT: vblendps $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
3998 ; AVX1-ONLY-NEXT: # xmm0 = mem[0,1],xmm0[2,3]
3999 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
4000 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
4001 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm15 = ymm1[0,0],ymm2[3,0],ymm1[4,4],ymm2[7,4]
4002 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm15 = ymm15[2,0],ymm2[2,2],ymm15[6,4],ymm2[6,6]
4003 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm0[3,0],mem[1,3]
4004 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm15[3,4,5,6,7]
4005 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
4006 ; AVX1-ONLY-NEXT: vshufps $18, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm15 # 32-byte Folded Reload
4007 ; AVX1-ONLY-NEXT: # ymm15 = ymm1[2,0],mem[1,0],ymm1[6,4],mem[5,4]
4008 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm15 = ymm15[0,1,2,0,4,5,6,4]
4009 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm15[6,7]
4010 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4011 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
4012 ; AVX1-ONLY-NEXT: vblendps $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
4013 ; AVX1-ONLY-NEXT: # xmm0 = mem[0,1],xmm0[2,3]
4014 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
4015 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
4016 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm15 = ymm15[0,0],ymm1[3,0],ymm15[4,4],ymm1[7,4]
4017 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm15 = ymm15[2,0],ymm1[2,2],ymm15[6,4],ymm1[6,6]
4018 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm0[3,0],mem[1,3]
4019 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm15[3,4,5,6,7]
4020 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
4021 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm15 = ymm1[2,0],ymm6[1,0],ymm1[6,4],ymm6[5,4]
4022 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm15 = ymm15[0,1,2,0,4,5,6,4]
4023 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm15[6,7]
4024 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4025 ; AVX1-ONLY-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
4026 ; AVX1-ONLY-NEXT: vblendps $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
4027 ; AVX1-ONLY-NEXT: # xmm0 = mem[0,1],xmm0[2,3]
4028 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
4029 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm15 = ymm1[0,0],ymm10[3,0],ymm1[4,4],ymm10[7,4]
4030 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm15 = ymm15[2,0],ymm10[2,2],ymm15[6,4],ymm10[6,6]
4031 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm0[3,0],mem[1,3]
4032 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm15[3,4,5,6,7]
4033 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm15 = ymm11[2,0],ymm3[1,0],ymm11[6,4],ymm3[5,4]
4034 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm15 = ymm15[0,1,2,0,4,5,6,4]
4035 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm15[6,7]
4036 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4037 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
4038 ; AVX1-ONLY-NEXT: vblendps $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
4039 ; AVX1-ONLY-NEXT: # xmm0 = mem[0,1],xmm0[2,3]
4040 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
4041 ; AVX1-ONLY-NEXT: vmovaps %ymm7, %ymm14
4042 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm15 = ymm9[0,0],ymm7[3,0],ymm9[4,4],ymm7[7,4]
4043 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm15 = ymm15[2,0],ymm7[2,2],ymm15[6,4],ymm7[6,6]
4044 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm0[3,0],mem[1,3]
4045 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm15[3,4,5,6,7]
4046 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
4047 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm15 = ymm13[2,0],ymm10[1,0],ymm13[6,4],ymm10[5,4]
4048 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm15 = ymm15[0,1,2,0,4,5,6,4]
4049 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm15[6,7]
4050 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4051 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
4052 ; AVX1-ONLY-NEXT: vblendps $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
4053 ; AVX1-ONLY-NEXT: # xmm0 = mem[0,1],xmm0[2,3]
4054 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
4055 ; AVX1-ONLY-NEXT: vmovaps %ymm8, %ymm7
4056 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm15 = ymm6[0,0],ymm8[3,0],ymm6[4,4],ymm8[7,4]
4057 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm15 = ymm15[2,0],ymm8[2,2],ymm15[6,4],ymm8[6,6]
4058 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm0[3,0],mem[1,3]
4059 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm15[3,4,5,6,7]
4060 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
4061 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm15 = ymm12[2,0],ymm3[1,0],ymm12[6,4],ymm3[5,4]
4062 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm15 = ymm15[0,1,2,0,4,5,6,4]
4063 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm15[6,7]
4064 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4065 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
4066 ; AVX1-ONLY-NEXT: vblendps $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
4067 ; AVX1-ONLY-NEXT: # xmm0 = mem[0,1],xmm0[2,3]
4068 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
4069 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
4070 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm15 = ymm1[0,0],ymm8[3,0],ymm1[4,4],ymm8[7,4]
4071 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm15 = ymm15[2,0],ymm8[2,2],ymm15[6,4],ymm8[6,6]
4072 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm0[3,0],mem[1,3]
4073 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm15[3,4,5,6,7]
4074 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
4075 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm15 = ymm4[2,0],ymm2[1,0],ymm4[6,4],ymm2[5,4]
4076 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm15 = ymm15[0,1,2,0,4,5,6,4]
4077 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm15 = ymm0[0,1,2,3,4,5],ymm15[6,7]
4078 ; AVX1-ONLY-NEXT: vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm0 # 16-byte Folded Reload
4079 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
4080 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
4081 ; AVX1-ONLY-NEXT: vblendps $48, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm8 # 32-byte Folded Reload
4082 ; AVX1-ONLY-NEXT: # ymm8 = ymm8[0,1,2,3],mem[4,5],ymm8[6,7]
4083 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm8, %xmm8
4084 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm8 = xmm8[0],mem[1],xmm8[2,3]
4085 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm8[0,1,2],ymm0[3,4,5,6,7]
4086 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm4 = ymm2[0,1,2,3,4,5,6],ymm4[7]
4087 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm4[6,7]
4088 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
4089 ; AVX1-ONLY-NEXT: vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm4 # 16-byte Folded Reload
4090 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3,4],ymm5[5],ymm4[6,7]
4091 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
4092 ; AVX1-ONLY-NEXT: vblendps $48, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm5 # 32-byte Folded Reload
4093 ; AVX1-ONLY-NEXT: # ymm5 = ymm5[0,1,2,3],mem[4,5],ymm5[6,7]
4094 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm5, %xmm5
4095 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm5 = xmm5[0],mem[1],xmm5[2,3]
4096 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0,1,2],ymm4[3,4,5,6,7]
4097 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
4098 ; AVX1-ONLY-NEXT: vblendps $127, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm5 # 32-byte Folded Reload
4099 ; AVX1-ONLY-NEXT: # ymm5 = mem[0,1,2,3,4,5,6],ymm1[7]
4100 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3,4,5],ymm5[6,7]
4101 ; AVX1-ONLY-NEXT: vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm5 # 16-byte Folded Reload
4102 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3,4],ymm6[5],ymm5[6,7]
4103 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
4104 ; AVX1-ONLY-NEXT: vblendps $48, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm8 # 32-byte Folded Reload
4105 ; AVX1-ONLY-NEXT: # ymm8 = ymm8[0,1,2,3],mem[4,5],ymm8[6,7]
4106 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm8, %xmm8
4107 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm8 = xmm8[0],mem[1],xmm8[2,3]
4108 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm5 = ymm8[0,1,2],ymm5[3,4,5,6,7]
4109 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm3[0,1,2,3,4,5,6],ymm12[7]
4110 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm5[0,1,2,3,4,5],ymm1[6,7]
4111 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
4112 ; AVX1-ONLY-NEXT: vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm5 # 16-byte Folded Reload
4113 ; AVX1-ONLY-NEXT: vblendps $32, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm5 # 32-byte Folded Reload
4114 ; AVX1-ONLY-NEXT: # ymm5 = ymm5[0,1,2,3,4],mem[5],ymm5[6,7]
4115 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
4116 ; AVX1-ONLY-NEXT: vblendps $48, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm6 # 32-byte Folded Reload
4117 ; AVX1-ONLY-NEXT: # ymm6 = ymm6[0,1,2,3],mem[4,5],ymm6[6,7]
4118 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm6, %xmm6
4119 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm6 = xmm6[0],mem[1],xmm6[2,3]
4120 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm5 = ymm6[0,1,2],ymm5[3,4,5,6,7]
4121 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
4122 ; AVX1-ONLY-NEXT: vblendps $127, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm6 # 32-byte Folded Reload
4123 ; AVX1-ONLY-NEXT: # ymm6 = mem[0,1,2,3,4,5,6],ymm2[7]
4124 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3,4,5],ymm6[6,7]
4125 ; AVX1-ONLY-NEXT: vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm6 # 16-byte Folded Reload
4126 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3,4],ymm9[5],ymm6[6,7]
4127 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
4128 ; AVX1-ONLY-NEXT: vblendps $207, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm8 # 32-byte Folded Reload
4129 ; AVX1-ONLY-NEXT: # ymm8 = mem[0,1,2,3],ymm8[4,5],mem[6,7]
4130 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm8, %xmm8
4131 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm8 = xmm8[0],mem[1],xmm8[2,3]
4132 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm6 = ymm8[0,1,2],ymm6[3,4,5,6,7]
4133 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm10[0,1,2,3,4,5,6],ymm13[7]
4134 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm6[0,1,2,3,4,5],ymm2[6,7]
4135 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
4136 ; AVX1-ONLY-NEXT: vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm6 # 16-byte Folded Reload
4137 ; AVX1-ONLY-NEXT: vblendps $32, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm6 # 32-byte Folded Reload
4138 ; AVX1-ONLY-NEXT: # ymm6 = ymm6[0,1,2,3,4],mem[5],ymm6[6,7]
4139 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
4140 ; AVX1-ONLY-NEXT: vblendps $48, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm7 # 32-byte Folded Reload
4141 ; AVX1-ONLY-NEXT: # ymm7 = ymm7[0,1,2,3],mem[4,5],ymm7[6,7]
4142 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm7, %xmm7
4143 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm7 = xmm7[0],mem[1],xmm7[2,3]
4144 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm6 = ymm7[0,1,2],ymm6[3,4,5,6,7]
4145 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
4146 ; AVX1-ONLY-NEXT: vblendps $127, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm7 # 32-byte Folded Reload
4147 ; AVX1-ONLY-NEXT: # ymm7 = mem[0,1,2,3,4,5,6],ymm3[7]
4148 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3,4,5],ymm7[6,7]
4149 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
4150 ; AVX1-ONLY-NEXT: vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm7 # 16-byte Folded Reload
4151 ; AVX1-ONLY-NEXT: vblendps $32, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm7 # 32-byte Folded Reload
4152 ; AVX1-ONLY-NEXT: # ymm7 = ymm7[0,1,2,3,4],mem[5],ymm7[6,7]
4153 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
4154 ; AVX1-ONLY-NEXT: vblendps $48, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm8 # 32-byte Folded Reload
4155 ; AVX1-ONLY-NEXT: # ymm8 = ymm8[0,1,2,3],mem[4,5],ymm8[6,7]
4156 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm8, %xmm8
4157 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm8 = xmm8[0],mem[1],xmm8[2,3]
4158 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm7 = ymm8[0,1,2],ymm7[3,4,5,6,7]
4159 ; AVX1-ONLY-NEXT: vblendps $127, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm3 # 32-byte Folded Reload
4160 ; AVX1-ONLY-NEXT: # ymm3 = mem[0,1,2,3,4,5,6],ymm11[7]
4161 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm3 = ymm7[0,1,2,3,4,5],ymm3[6,7]
4162 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
4163 ; AVX1-ONLY-NEXT: vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm7 # 16-byte Folded Reload
4164 ; AVX1-ONLY-NEXT: vblendps $32, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm7 # 32-byte Folded Reload
4165 ; AVX1-ONLY-NEXT: # ymm7 = ymm7[0,1,2,3,4],mem[5],ymm7[6,7]
4166 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
4167 ; AVX1-ONLY-NEXT: vblendps $48, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm8 # 32-byte Folded Reload
4168 ; AVX1-ONLY-NEXT: # ymm8 = ymm8[0,1,2,3],mem[4,5],ymm8[6,7]
4169 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm8, %xmm8
4170 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm8 = xmm8[0],mem[1],xmm8[2,3]
4171 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm7 = ymm8[0,1,2],ymm7[3,4,5,6,7]
4172 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
4173 ; AVX1-ONLY-NEXT: vblendps $128, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm8 # 32-byte Folded Reload
4174 ; AVX1-ONLY-NEXT: # ymm8 = ymm8[0,1,2,3,4,5,6],mem[7]
4175 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3,4,5],ymm8[6,7]
4176 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
4177 ; AVX1-ONLY-NEXT: vmovaps %ymm8, 192(%rsi)
4178 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
4179 ; AVX1-ONLY-NEXT: vmovaps %ymm8, 128(%rsi)
4180 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
4181 ; AVX1-ONLY-NEXT: vmovaps %ymm8, 64(%rsi)
4182 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
4183 ; AVX1-ONLY-NEXT: vmovaps %ymm8, (%rsi)
4184 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
4185 ; AVX1-ONLY-NEXT: vmovaps %ymm8, 224(%rsi)
4186 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
4187 ; AVX1-ONLY-NEXT: vmovaps %ymm8, 160(%rsi)
4188 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
4189 ; AVX1-ONLY-NEXT: vmovaps %ymm8, 96(%rsi)
4190 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
4191 ; AVX1-ONLY-NEXT: vmovaps %ymm8, 32(%rsi)
4192 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
4193 ; AVX1-ONLY-NEXT: vmovaps %ymm8, 192(%rdx)
4194 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
4195 ; AVX1-ONLY-NEXT: vmovaps %ymm8, 128(%rdx)
4196 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
4197 ; AVX1-ONLY-NEXT: vmovaps %ymm8, 64(%rdx)
4198 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
4199 ; AVX1-ONLY-NEXT: vmovaps %ymm8, (%rdx)
4200 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
4201 ; AVX1-ONLY-NEXT: vmovaps %ymm8, 224(%rdx)
4202 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
4203 ; AVX1-ONLY-NEXT: vmovaps %ymm8, 160(%rdx)
4204 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
4205 ; AVX1-ONLY-NEXT: vmovaps %ymm8, 96(%rdx)
4206 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
4207 ; AVX1-ONLY-NEXT: vmovaps %ymm8, 32(%rdx)
4208 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
4209 ; AVX1-ONLY-NEXT: vmovaps %ymm8, 192(%rcx)
4210 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
4211 ; AVX1-ONLY-NEXT: vmovaps %ymm8, 128(%rcx)
4212 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
4213 ; AVX1-ONLY-NEXT: vmovaps %ymm8, 64(%rcx)
4214 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
4215 ; AVX1-ONLY-NEXT: vmovaps %ymm8, (%rcx)
4216 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
4217 ; AVX1-ONLY-NEXT: vmovaps %ymm8, 224(%rcx)
4218 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
4219 ; AVX1-ONLY-NEXT: vmovaps %ymm8, 160(%rcx)
4220 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
4221 ; AVX1-ONLY-NEXT: vmovaps %ymm8, 96(%rcx)
4222 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
4223 ; AVX1-ONLY-NEXT: vmovaps %ymm8, 32(%rcx)
4224 ; AVX1-ONLY-NEXT: vmovaps %ymm15, (%r8)
4225 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
4226 ; AVX1-ONLY-NEXT: vmovaps %ymm8, 64(%r8)
4227 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
4228 ; AVX1-ONLY-NEXT: vmovaps %ymm8, 128(%r8)
4229 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
4230 ; AVX1-ONLY-NEXT: vmovaps %ymm8, 192(%r8)
4231 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
4232 ; AVX1-ONLY-NEXT: vmovaps %ymm8, 224(%r8)
4233 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
4234 ; AVX1-ONLY-NEXT: vmovaps %ymm8, 160(%r8)
4235 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
4236 ; AVX1-ONLY-NEXT: vmovaps %ymm8, 96(%r8)
4237 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
4238 ; AVX1-ONLY-NEXT: vmovaps %ymm8, 32(%r8)
4239 ; AVX1-ONLY-NEXT: vmovaps %ymm7, 224(%r9)
4240 ; AVX1-ONLY-NEXT: vmovaps %ymm3, 192(%r9)
4241 ; AVX1-ONLY-NEXT: vmovaps %ymm6, 160(%r9)
4242 ; AVX1-ONLY-NEXT: vmovaps %ymm2, 128(%r9)
4243 ; AVX1-ONLY-NEXT: vmovaps %ymm5, 96(%r9)
4244 ; AVX1-ONLY-NEXT: vmovaps %ymm1, 64(%r9)
4245 ; AVX1-ONLY-NEXT: vmovaps %ymm4, 32(%r9)
4246 ; AVX1-ONLY-NEXT: vmovaps %ymm0, (%r9)
4247 ; AVX1-ONLY-NEXT: addq $2488, %rsp # imm = 0x9B8
4248 ; AVX1-ONLY-NEXT: vzeroupper
4249 ; AVX1-ONLY-NEXT: retq
4251 ; AVX2-ONLY-LABEL: load_i32_stride5_vf64:
4252 ; AVX2-ONLY: # %bb.0:
4253 ; AVX2-ONLY-NEXT: subq $2152, %rsp # imm = 0x868
4254 ; AVX2-ONLY-NEXT: vmovdqa 864(%rdi), %ymm4
4255 ; AVX2-ONLY-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4256 ; AVX2-ONLY-NEXT: vmovdqa 896(%rdi), %ymm5
4257 ; AVX2-ONLY-NEXT: vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4258 ; AVX2-ONLY-NEXT: vmovdqa 832(%rdi), %ymm6
4259 ; AVX2-ONLY-NEXT: vmovdqa 800(%rdi), %ymm7
4260 ; AVX2-ONLY-NEXT: vmovdqa 544(%rdi), %ymm8
4261 ; AVX2-ONLY-NEXT: vmovdqa 576(%rdi), %ymm9
4262 ; AVX2-ONLY-NEXT: vmovdqu %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4263 ; AVX2-ONLY-NEXT: vmovdqa 512(%rdi), %ymm10
4264 ; AVX2-ONLY-NEXT: vmovdqu %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4265 ; AVX2-ONLY-NEXT: vmovdqa 480(%rdi), %ymm11
4266 ; AVX2-ONLY-NEXT: vmovdqu %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4267 ; AVX2-ONLY-NEXT: vmovdqa 224(%rdi), %ymm12
4268 ; AVX2-ONLY-NEXT: vmovdqu %ymm12, (%rsp) # 32-byte Spill
4269 ; AVX2-ONLY-NEXT: vmovdqa 256(%rdi), %ymm13
4270 ; AVX2-ONLY-NEXT: vmovdqu %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4271 ; AVX2-ONLY-NEXT: vmovdqa 192(%rdi), %ymm2
4272 ; AVX2-ONLY-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4273 ; AVX2-ONLY-NEXT: vmovdqa 160(%rdi), %ymm1
4274 ; AVX2-ONLY-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4275 ; AVX2-ONLY-NEXT: vmovdqa {{.*#+}} xmm0 = [0,5,2,7]
4276 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3],ymm1[4,5],ymm2[6,7]
4277 ; AVX2-ONLY-NEXT: vpermd %ymm1, %ymm0, %ymm1
4278 ; AVX2-ONLY-NEXT: vinserti128 $1, 288(%rdi), %ymm1, %ymm2
4279 ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm3 = ymm13[0,1,0,3]
4280 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm12[4],ymm3[5,6,7]
4281 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm3[4,5,6,7]
4282 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3],ymm1[4,5,6],ymm2[7]
4283 ; AVX2-ONLY-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4284 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm1 = ymm11[0,1],ymm10[2,3],ymm11[4,5],ymm10[6,7]
4285 ; AVX2-ONLY-NEXT: vpermd %ymm1, %ymm0, %ymm1
4286 ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm2 = ymm9[0,1,0,3]
4287 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm8[4],ymm2[5,6,7]
4288 ; AVX2-ONLY-NEXT: vmovdqa %ymm8, %ymm13
4289 ; AVX2-ONLY-NEXT: vmovdqu %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4290 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm2 = ymm1[0,1,2,3],ymm2[4,5,6,7]
4291 ; AVX2-ONLY-NEXT: vinserti128 $1, 608(%rdi), %ymm1, %ymm1
4292 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3],ymm2[4,5,6],ymm1[7]
4293 ; AVX2-ONLY-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4294 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm1 = ymm7[0,1],ymm6[2,3],ymm7[4,5],ymm6[6,7]
4295 ; AVX2-ONLY-NEXT: vmovdqa %ymm7, %ymm12
4296 ; AVX2-ONLY-NEXT: vmovdqa %ymm6, %ymm14
4297 ; AVX2-ONLY-NEXT: vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4298 ; AVX2-ONLY-NEXT: vpermd %ymm1, %ymm0, %ymm1
4299 ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm2 = ymm5[0,1,0,3]
4300 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4],ymm2[5,6,7]
4301 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm2 = ymm1[0,1,2,3],ymm2[4,5,6,7]
4302 ; AVX2-ONLY-NEXT: vinserti128 $1, 928(%rdi), %ymm1, %ymm1
4303 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3],ymm2[4,5,6],ymm1[7]
4304 ; AVX2-ONLY-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4305 ; AVX2-ONLY-NEXT: vmovdqa 1152(%rdi), %ymm1
4306 ; AVX2-ONLY-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4307 ; AVX2-ONLY-NEXT: vmovdqa 1120(%rdi), %ymm2
4308 ; AVX2-ONLY-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4309 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1],ymm1[2,3],ymm2[4,5],ymm1[6,7]
4310 ; AVX2-ONLY-NEXT: vpermd %ymm1, %ymm0, %ymm1
4311 ; AVX2-ONLY-NEXT: vmovdqa 1184(%rdi), %ymm3
4312 ; AVX2-ONLY-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4313 ; AVX2-ONLY-NEXT: vmovdqa 1216(%rdi), %ymm2
4314 ; AVX2-ONLY-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4315 ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,1,0,3]
4316 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4],ymm2[5,6,7]
4317 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm2 = ymm1[0,1,2,3],ymm2[4,5,6,7]
4318 ; AVX2-ONLY-NEXT: vinserti128 $1, 1248(%rdi), %ymm1, %ymm1
4319 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3],ymm2[4,5,6],ymm1[7]
4320 ; AVX2-ONLY-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4321 ; AVX2-ONLY-NEXT: vmovdqa 352(%rdi), %ymm1
4322 ; AVX2-ONLY-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4323 ; AVX2-ONLY-NEXT: vmovdqa 320(%rdi), %ymm2
4324 ; AVX2-ONLY-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4325 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1],ymm1[2,3],ymm2[4,5],ymm1[6,7]
4326 ; AVX2-ONLY-NEXT: vpermd %ymm1, %ymm0, %ymm1
4327 ; AVX2-ONLY-NEXT: vmovdqa 384(%rdi), %ymm3
4328 ; AVX2-ONLY-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4329 ; AVX2-ONLY-NEXT: vmovdqa 416(%rdi), %ymm2
4330 ; AVX2-ONLY-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4331 ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,1,0,3]
4332 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4],ymm2[5,6,7]
4333 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm2 = ymm1[0,1,2,3],ymm2[4,5,6,7]
4334 ; AVX2-ONLY-NEXT: vinserti128 $1, 448(%rdi), %ymm1, %ymm1
4335 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3],ymm2[4,5,6],ymm1[7]
4336 ; AVX2-ONLY-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4337 ; AVX2-ONLY-NEXT: vmovdqa 672(%rdi), %ymm2
4338 ; AVX2-ONLY-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4339 ; AVX2-ONLY-NEXT: vmovdqa 640(%rdi), %ymm1
4340 ; AVX2-ONLY-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4341 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3],ymm1[4,5],ymm2[6,7]
4342 ; AVX2-ONLY-NEXT: vpermd %ymm1, %ymm0, %ymm1
4343 ; AVX2-ONLY-NEXT: vmovdqa 704(%rdi), %ymm3
4344 ; AVX2-ONLY-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4345 ; AVX2-ONLY-NEXT: vmovdqa 736(%rdi), %ymm2
4346 ; AVX2-ONLY-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4347 ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,1,0,3]
4348 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4],ymm2[5,6,7]
4349 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm2 = ymm1[0,1,2,3],ymm2[4,5,6,7]
4350 ; AVX2-ONLY-NEXT: vinserti128 $1, 768(%rdi), %ymm1, %ymm1
4351 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3],ymm2[4,5,6],ymm1[7]
4352 ; AVX2-ONLY-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4353 ; AVX2-ONLY-NEXT: vmovdqa 992(%rdi), %ymm2
4354 ; AVX2-ONLY-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4355 ; AVX2-ONLY-NEXT: vmovdqa 960(%rdi), %ymm1
4356 ; AVX2-ONLY-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4357 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3],ymm1[4,5],ymm2[6,7]
4358 ; AVX2-ONLY-NEXT: vpermd %ymm1, %ymm0, %ymm1
4359 ; AVX2-ONLY-NEXT: vmovdqa 1024(%rdi), %ymm3
4360 ; AVX2-ONLY-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4361 ; AVX2-ONLY-NEXT: vmovdqa 1056(%rdi), %ymm2
4362 ; AVX2-ONLY-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4363 ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,1,0,3]
4364 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4],ymm2[5,6,7]
4365 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm2 = ymm1[0,1,2,3],ymm2[4,5,6,7]
4366 ; AVX2-ONLY-NEXT: vinserti128 $1, 1088(%rdi), %ymm1, %ymm1
4367 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3],ymm2[4,5,6],ymm1[7]
4368 ; AVX2-ONLY-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4369 ; AVX2-ONLY-NEXT: vmovdqa (%rdi), %ymm7
4370 ; AVX2-ONLY-NEXT: vmovdqa 32(%rdi), %ymm10
4371 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm1 = ymm7[0,1],ymm10[2,3],ymm7[4,5],ymm10[6,7]
4372 ; AVX2-ONLY-NEXT: vmovdqu %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4373 ; AVX2-ONLY-NEXT: vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4374 ; AVX2-ONLY-NEXT: vpermd %ymm1, %ymm0, %ymm0
4375 ; AVX2-ONLY-NEXT: vmovdqa 64(%rdi), %ymm4
4376 ; AVX2-ONLY-NEXT: vmovdqa 96(%rdi), %ymm15
4377 ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm1 = ymm15[0,1,0,3]
4378 ; AVX2-ONLY-NEXT: vmovdqu %ymm15, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4379 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm4[4],ymm1[5,6,7]
4380 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm1 = ymm0[0,1,2,3],ymm1[4,5,6,7]
4381 ; AVX2-ONLY-NEXT: vinserti128 $1, 128(%rdi), %ymm0, %ymm0
4382 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5,6],ymm0[7]
4383 ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4384 ; AVX2-ONLY-NEXT: vmovdqa {{.*#+}} xmm0 = [1,6,3,u]
4385 ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
4386 ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
4387 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm1 = ymm6[0,1],ymm5[2,3],ymm6[4,5,6,7]
4388 ; AVX2-ONLY-NEXT: vpermd %ymm1, %ymm0, %ymm2
4389 ; AVX2-ONLY-NEXT: vmovdqu (%rsp), %ymm11 # 32-byte Reload
4390 ; AVX2-ONLY-NEXT: vpblendd $204, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm3 # 32-byte Folded Reload
4391 ; AVX2-ONLY-NEXT: # ymm3 = ymm11[0,1],mem[2,3],ymm11[4,5],mem[6,7]
4392 ; AVX2-ONLY-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [5,2,7,0,5,2,7,0]
4393 ; AVX2-ONLY-NEXT: # ymm1 = mem[0,1,0,1]
4394 ; AVX2-ONLY-NEXT: vpermd %ymm3, %ymm1, %ymm3
4395 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2],ymm3[3,4,5,6,7]
4396 ; AVX2-ONLY-NEXT: vpbroadcastd 304(%rdi), %ymm3
4397 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,6],ymm3[7]
4398 ; AVX2-ONLY-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4399 ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
4400 ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
4401 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm2 = ymm9[0,1],ymm8[2,3],ymm9[4,5,6,7]
4402 ; AVX2-ONLY-NEXT: vpermd %ymm2, %ymm0, %ymm2
4403 ; AVX2-ONLY-NEXT: vpblendd $204, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm3 # 32-byte Folded Reload
4404 ; AVX2-ONLY-NEXT: # ymm3 = ymm13[0,1],mem[2,3],ymm13[4,5],mem[6,7]
4405 ; AVX2-ONLY-NEXT: vpermd %ymm3, %ymm1, %ymm3
4406 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2],ymm3[3,4,5,6,7]
4407 ; AVX2-ONLY-NEXT: vpbroadcastd 624(%rdi), %ymm3
4408 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,6],ymm3[7]
4409 ; AVX2-ONLY-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4410 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm2 = ymm12[0,1],ymm14[2,3],ymm12[4,5,6,7]
4411 ; AVX2-ONLY-NEXT: vmovdqa %ymm12, %ymm14
4412 ; AVX2-ONLY-NEXT: vmovdqu %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4413 ; AVX2-ONLY-NEXT: vpermd %ymm2, %ymm0, %ymm2
4414 ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
4415 ; AVX2-ONLY-NEXT: vpblendd $204, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm3 # 32-byte Folded Reload
4416 ; AVX2-ONLY-NEXT: # ymm3 = ymm12[0,1],mem[2,3],ymm12[4,5],mem[6,7]
4417 ; AVX2-ONLY-NEXT: vpermd %ymm3, %ymm1, %ymm3
4418 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2],ymm3[3,4,5,6,7]
4419 ; AVX2-ONLY-NEXT: vpbroadcastd 944(%rdi), %ymm3
4420 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,6],ymm3[7]
4421 ; AVX2-ONLY-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4422 ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
4423 ; AVX2-ONLY-NEXT: vpblendd $12, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
4424 ; AVX2-ONLY-NEXT: # ymm2 = ymm2[0,1],mem[2,3],ymm2[4,5,6,7]
4425 ; AVX2-ONLY-NEXT: vpermd %ymm2, %ymm0, %ymm2
4426 ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
4427 ; AVX2-ONLY-NEXT: vpblendd $204, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm3 # 32-byte Folded Reload
4428 ; AVX2-ONLY-NEXT: # ymm3 = ymm13[0,1],mem[2,3],ymm13[4,5],mem[6,7]
4429 ; AVX2-ONLY-NEXT: vpermd %ymm3, %ymm1, %ymm3
4430 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2],ymm3[3,4,5,6,7]
4431 ; AVX2-ONLY-NEXT: vpbroadcastd 1264(%rdi), %ymm3
4432 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,6],ymm3[7]
4433 ; AVX2-ONLY-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4434 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm2 = ymm7[0,1],ymm10[2,3],ymm7[4,5,6,7]
4435 ; AVX2-ONLY-NEXT: vpermd %ymm2, %ymm0, %ymm2
4436 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0,1],ymm15[2,3],ymm4[4,5],ymm15[6,7]
4437 ; AVX2-ONLY-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4438 ; AVX2-ONLY-NEXT: vpermd %ymm3, %ymm1, %ymm3
4439 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2],ymm3[3,4,5,6,7]
4440 ; AVX2-ONLY-NEXT: vpbroadcastd 144(%rdi), %ymm3
4441 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,6],ymm3[7]
4442 ; AVX2-ONLY-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4443 ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
4444 ; AVX2-ONLY-NEXT: vpblendd $12, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm2 # 32-byte Folded Reload
4445 ; AVX2-ONLY-NEXT: # ymm2 = ymm7[0,1],mem[2,3],ymm7[4,5,6,7]
4446 ; AVX2-ONLY-NEXT: vpermd %ymm2, %ymm0, %ymm2
4447 ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
4448 ; AVX2-ONLY-NEXT: vpblendd $204, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm3 # 32-byte Folded Reload
4449 ; AVX2-ONLY-NEXT: # ymm3 = ymm15[0,1],mem[2,3],ymm15[4,5],mem[6,7]
4450 ; AVX2-ONLY-NEXT: vpermd %ymm3, %ymm1, %ymm3
4451 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2],ymm3[3,4,5,6,7]
4452 ; AVX2-ONLY-NEXT: vpbroadcastd 464(%rdi), %ymm3
4453 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,6],ymm3[7]
4454 ; AVX2-ONLY-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4455 ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
4456 ; AVX2-ONLY-NEXT: vpblendd $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm2 # 32-byte Folded Reload
4457 ; AVX2-ONLY-NEXT: # ymm2 = mem[0,1],ymm10[2,3],mem[4,5,6,7]
4458 ; AVX2-ONLY-NEXT: vpermd %ymm2, %ymm0, %ymm2
4459 ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
4460 ; AVX2-ONLY-NEXT: vpblendd $204, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
4461 ; AVX2-ONLY-NEXT: # ymm3 = ymm3[0,1],mem[2,3],ymm3[4,5],mem[6,7]
4462 ; AVX2-ONLY-NEXT: vpermd %ymm3, %ymm1, %ymm3
4463 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2],ymm3[3,4,5,6,7]
4464 ; AVX2-ONLY-NEXT: vpbroadcastd 784(%rdi), %ymm3
4465 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,6],ymm3[7]
4466 ; AVX2-ONLY-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4467 ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
4468 ; AVX2-ONLY-NEXT: vpblendd $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
4469 ; AVX2-ONLY-NEXT: # ymm2 = mem[0,1],ymm2[2,3],mem[4,5,6,7]
4470 ; AVX2-ONLY-NEXT: vpermd %ymm2, %ymm0, %ymm0
4471 ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
4472 ; AVX2-ONLY-NEXT: vpblendd $204, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
4473 ; AVX2-ONLY-NEXT: # ymm2 = ymm2[0,1],mem[2,3],ymm2[4,5],mem[6,7]
4474 ; AVX2-ONLY-NEXT: vpermd %ymm2, %ymm1, %ymm1
4475 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
4476 ; AVX2-ONLY-NEXT: vpbroadcastd 1104(%rdi), %ymm1
4477 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6],ymm1[7]
4478 ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4479 ; AVX2-ONLY-NEXT: vmovdqa {{.*#+}} xmm3 = [2,7,4,u]
4480 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm6[0,1,2,3],ymm5[4,5],ymm6[6,7]
4481 ; AVX2-ONLY-NEXT: vpermd %ymm0, %ymm3, %ymm0
4482 ; AVX2-ONLY-NEXT: vinserti128 $1, 256(%rdi), %ymm0, %ymm1
4483 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm1 = ymm11[0,1,2,3,4,5,6],ymm1[7]
4484 ; AVX2-ONLY-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[2,3,0,1,6,7,4,5]
4485 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm1 = ymm0[0,1,2],ymm1[3,4,5,6,7]
4486 ; AVX2-ONLY-NEXT: vmovdqa 288(%rdi), %ymm2
4487 ; AVX2-ONLY-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4488 ; AVX2-ONLY-NEXT: vpbroadcastq {{.*#+}} ymm0 = [0,5,0,5,0,5,0,5]
4489 ; AVX2-ONLY-NEXT: vpermd %ymm2, %ymm0, %ymm2
4490 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm2[6,7]
4491 ; AVX2-ONLY-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4492 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm1 = ymm9[0,1,2,3],ymm8[4,5],ymm9[6,7]
4493 ; AVX2-ONLY-NEXT: vpermd %ymm1, %ymm3, %ymm1
4494 ; AVX2-ONLY-NEXT: vinserti128 $1, 576(%rdi), %ymm0, %ymm2
4495 ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
4496 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm2 = ymm5[0,1,2,3,4,5,6],ymm2[7]
4497 ; AVX2-ONLY-NEXT: vpshufd {{.*#+}} ymm2 = ymm2[2,3,0,1,6,7,4,5]
4498 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4,5,6,7]
4499 ; AVX2-ONLY-NEXT: vmovdqa 608(%rdi), %ymm2
4500 ; AVX2-ONLY-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4501 ; AVX2-ONLY-NEXT: vpermd %ymm2, %ymm0, %ymm2
4502 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm2[6,7]
4503 ; AVX2-ONLY-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4504 ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
4505 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm1 = ymm14[0,1,2,3],ymm11[4,5],ymm14[6,7]
4506 ; AVX2-ONLY-NEXT: vpermd %ymm1, %ymm3, %ymm1
4507 ; AVX2-ONLY-NEXT: vinserti128 $1, 896(%rdi), %ymm0, %ymm2
4508 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm2 = ymm12[0,1,2,3,4,5,6],ymm2[7]
4509 ; AVX2-ONLY-NEXT: vpshufd {{.*#+}} ymm2 = ymm2[2,3,0,1,6,7,4,5]
4510 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4,5,6,7]
4511 ; AVX2-ONLY-NEXT: vmovdqa 928(%rdi), %ymm2
4512 ; AVX2-ONLY-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4513 ; AVX2-ONLY-NEXT: vpermd %ymm2, %ymm0, %ymm2
4514 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm2[6,7]
4515 ; AVX2-ONLY-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4516 ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
4517 ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
4518 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm1 = ymm12[0,1,2,3],ymm6[4,5],ymm12[6,7]
4519 ; AVX2-ONLY-NEXT: vpermd %ymm1, %ymm3, %ymm1
4520 ; AVX2-ONLY-NEXT: vinserti128 $1, 1216(%rdi), %ymm0, %ymm2
4521 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm2 = ymm13[0,1,2,3,4,5,6],ymm2[7]
4522 ; AVX2-ONLY-NEXT: vmovdqa %ymm13, %ymm9
4523 ; AVX2-ONLY-NEXT: vpshufd {{.*#+}} ymm2 = ymm2[2,3,0,1,6,7,4,5]
4524 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4,5,6,7]
4525 ; AVX2-ONLY-NEXT: vmovdqa 1248(%rdi), %ymm2
4526 ; AVX2-ONLY-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4527 ; AVX2-ONLY-NEXT: vpermd %ymm2, %ymm0, %ymm2
4528 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm2[6,7]
4529 ; AVX2-ONLY-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4530 ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
4531 ; AVX2-ONLY-NEXT: vpblendd $207, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
4532 ; AVX2-ONLY-NEXT: # ymm1 = mem[0,1,2,3],ymm1[4,5],mem[6,7]
4533 ; AVX2-ONLY-NEXT: vpermd %ymm1, %ymm3, %ymm1
4534 ; AVX2-ONLY-NEXT: vinserti128 $1, 96(%rdi), %ymm0, %ymm2
4535 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm2 = ymm4[0,1,2,3,4,5,6],ymm2[7]
4536 ; AVX2-ONLY-NEXT: vpshufd {{.*#+}} ymm2 = ymm2[2,3,0,1,6,7,4,5]
4537 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm2 = ymm1[0,1,2],ymm2[3,4,5,6,7]
4538 ; AVX2-ONLY-NEXT: vmovdqa 128(%rdi), %ymm1
4539 ; AVX2-ONLY-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4540 ; AVX2-ONLY-NEXT: vpermd %ymm1, %ymm0, %ymm4
4541 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3,4,5],ymm4[6,7]
4542 ; AVX2-ONLY-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4543 ; AVX2-ONLY-NEXT: vpblendd $48, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm2 # 32-byte Folded Reload
4544 ; AVX2-ONLY-NEXT: # ymm2 = ymm7[0,1,2,3],mem[4,5],ymm7[6,7]
4545 ; AVX2-ONLY-NEXT: vpermd %ymm2, %ymm3, %ymm2
4546 ; AVX2-ONLY-NEXT: vinserti128 $1, 416(%rdi), %ymm0, %ymm4
4547 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm4 = ymm15[0,1,2,3,4,5,6],ymm4[7]
4548 ; AVX2-ONLY-NEXT: vpshufd {{.*#+}} ymm4 = ymm4[2,3,0,1,6,7,4,5]
4549 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm4 = ymm2[0,1,2],ymm4[3,4,5,6,7]
4550 ; AVX2-ONLY-NEXT: vmovdqa 448(%rdi), %ymm1
4551 ; AVX2-ONLY-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4552 ; AVX2-ONLY-NEXT: vpermd %ymm1, %ymm0, %ymm15
4553 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm1 = ymm4[0,1,2,3,4,5],ymm15[6,7]
4554 ; AVX2-ONLY-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4555 ; AVX2-ONLY-NEXT: vpblendd $207, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm4 # 32-byte Folded Reload
4556 ; AVX2-ONLY-NEXT: # ymm4 = mem[0,1,2,3],ymm10[4,5],mem[6,7]
4557 ; AVX2-ONLY-NEXT: vmovdqa %ymm10, %ymm14
4558 ; AVX2-ONLY-NEXT: vpermd %ymm4, %ymm3, %ymm4
4559 ; AVX2-ONLY-NEXT: vinserti128 $1, 736(%rdi), %ymm0, %ymm15
4560 ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
4561 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm15 = ymm2[0,1,2,3,4,5,6],ymm15[7]
4562 ; AVX2-ONLY-NEXT: vpshufd {{.*#+}} ymm15 = ymm15[2,3,0,1,6,7,4,5]
4563 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm15 = ymm4[0,1,2],ymm15[3,4,5,6,7]
4564 ; AVX2-ONLY-NEXT: vmovdqa 768(%rdi), %ymm1
4565 ; AVX2-ONLY-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4566 ; AVX2-ONLY-NEXT: vpermd %ymm1, %ymm0, %ymm13
4567 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm1 = ymm15[0,1,2,3,4,5],ymm13[6,7]
4568 ; AVX2-ONLY-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4569 ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
4570 ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
4571 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm13 = ymm7[0,1,2,3],ymm8[4,5],ymm7[6,7]
4572 ; AVX2-ONLY-NEXT: vpermd %ymm13, %ymm3, %ymm3
4573 ; AVX2-ONLY-NEXT: vinserti128 $1, 1056(%rdi), %ymm0, %ymm13
4574 ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
4575 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm13 = ymm1[0,1,2,3,4,5,6],ymm13[7]
4576 ; AVX2-ONLY-NEXT: vpshufd {{.*#+}} ymm13 = ymm13[2,3,0,1,6,7,4,5]
4577 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm13 = ymm3[0,1,2],ymm13[3,4,5,6,7]
4578 ; AVX2-ONLY-NEXT: vmovdqa 1088(%rdi), %ymm3
4579 ; AVX2-ONLY-NEXT: vpermd %ymm3, %ymm0, %ymm0
4580 ; AVX2-ONLY-NEXT: vmovdqa %ymm3, %ymm4
4581 ; AVX2-ONLY-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4582 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm13[0,1,2,3,4,5],ymm0[6,7]
4583 ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4584 ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4585 ; AVX2-ONLY-NEXT: vpblendd $207, (%rsp), %ymm0, %ymm0 # 32-byte Folded Reload
4586 ; AVX2-ONLY-NEXT: # ymm0 = mem[0,1,2,3],ymm0[4,5],mem[6,7]
4587 ; AVX2-ONLY-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[3,0,2,2,7,4,6,6]
4588 ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
4589 ; AVX2-ONLY-NEXT: vpalignr $12, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm13 # 32-byte Folded Reload
4590 ; AVX2-ONLY-NEXT: # ymm13 = mem[12,13,14,15],ymm3[0,1,2,3,4,5,6,7,8,9,10,11],mem[28,29,30,31],ymm3[16,17,18,19,20,21,22,23,24,25,26,27]
4591 ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm13 = ymm13[0,3,2,3]
4592 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm13 = ymm13[0,1,2],ymm0[3,4,5,6,7]
4593 ; AVX2-ONLY-NEXT: vpbroadcastq {{.*#+}} ymm0 = [1,6,1,6,1,6,1,6]
4594 ; AVX2-ONLY-NEXT: vpermd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm15 # 32-byte Folded Reload
4595 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm3 = ymm13[0,1,2,3,4,5],ymm15[6,7]
4596 ; AVX2-ONLY-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4597 ; AVX2-ONLY-NEXT: vpblendd $48, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm13 # 32-byte Folded Reload
4598 ; AVX2-ONLY-NEXT: # ymm13 = ymm5[0,1,2,3],mem[4,5],ymm5[6,7]
4599 ; AVX2-ONLY-NEXT: vpshufd {{.*#+}} ymm13 = ymm13[3,0,2,2,7,4,6,6]
4600 ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
4601 ; AVX2-ONLY-NEXT: vpalignr $12, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm15 # 32-byte Folded Reload
4602 ; AVX2-ONLY-NEXT: # ymm15 = mem[12,13,14,15],ymm3[0,1,2,3,4,5,6,7,8,9,10,11],mem[28,29,30,31],ymm3[16,17,18,19,20,21,22,23,24,25,26,27]
4603 ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm15 = ymm15[0,3,2,3]
4604 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm13 = ymm15[0,1,2],ymm13[3,4,5,6,7]
4605 ; AVX2-ONLY-NEXT: vpermd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm15 # 32-byte Folded Reload
4606 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm3 = ymm13[0,1,2,3,4,5],ymm15[6,7]
4607 ; AVX2-ONLY-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4608 ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
4609 ; AVX2-ONLY-NEXT: vpblendd $48, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm13 # 32-byte Folded Reload
4610 ; AVX2-ONLY-NEXT: # ymm13 = ymm3[0,1,2,3],mem[4,5],ymm3[6,7]
4611 ; AVX2-ONLY-NEXT: vpshufd {{.*#+}} ymm13 = ymm13[3,0,2,2,7,4,6,6]
4612 ; AVX2-ONLY-NEXT: vpalignr $12, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm15 # 32-byte Folded Reload
4613 ; AVX2-ONLY-NEXT: # ymm15 = mem[12,13,14,15],ymm11[0,1,2,3,4,5,6,7,8,9,10,11],mem[28,29,30,31],ymm11[16,17,18,19,20,21,22,23,24,25,26,27]
4614 ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm15 = ymm15[0,3,2,3]
4615 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm13 = ymm15[0,1,2],ymm13[3,4,5,6,7]
4616 ; AVX2-ONLY-NEXT: vpermd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm15 # 32-byte Folded Reload
4617 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm3 = ymm13[0,1,2,3,4,5],ymm15[6,7]
4618 ; AVX2-ONLY-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4619 ; AVX2-ONLY-NEXT: vpblendd $48, {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm13 # 32-byte Folded Reload
4620 ; AVX2-ONLY-NEXT: # ymm13 = ymm9[0,1,2,3],mem[4,5],ymm9[6,7]
4621 ; AVX2-ONLY-NEXT: vpshufd {{.*#+}} ymm13 = ymm13[3,0,2,2,7,4,6,6]
4622 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} ymm15 = ymm12[12,13,14,15],ymm6[0,1,2,3,4,5,6,7,8,9,10,11],ymm12[28,29,30,31],ymm6[16,17,18,19,20,21,22,23,24,25,26,27]
4623 ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm15 = ymm15[0,3,2,3]
4624 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm13 = ymm15[0,1,2],ymm13[3,4,5,6,7]
4625 ; AVX2-ONLY-NEXT: vpermd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm15 # 32-byte Folded Reload
4626 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm3 = ymm13[0,1,2,3,4,5],ymm15[6,7]
4627 ; AVX2-ONLY-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4628 ; AVX2-ONLY-NEXT: vpblendd $48, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm13 # 32-byte Folded Reload
4629 ; AVX2-ONLY-NEXT: # ymm13 = ymm1[0,1,2,3],mem[4,5],ymm1[6,7]
4630 ; AVX2-ONLY-NEXT: vpshufd {{.*#+}} ymm13 = ymm13[3,0,2,2,7,4,6,6]
4631 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} ymm15 = ymm7[12,13,14,15],ymm8[0,1,2,3,4,5,6,7,8,9,10,11],ymm7[28,29,30,31],ymm8[16,17,18,19,20,21,22,23,24,25,26,27]
4632 ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm15 = ymm15[0,3,2,3]
4633 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm13 = ymm15[0,1,2],ymm13[3,4,5,6,7]
4634 ; AVX2-ONLY-NEXT: vpermd %ymm4, %ymm0, %ymm15
4635 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm1 = ymm13[0,1,2,3,4,5],ymm15[6,7]
4636 ; AVX2-ONLY-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4637 ; AVX2-ONLY-NEXT: vpblendd $48, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm13 # 32-byte Folded Reload
4638 ; AVX2-ONLY-NEXT: # ymm13 = ymm2[0,1,2,3],mem[4,5],ymm2[6,7]
4639 ; AVX2-ONLY-NEXT: vpshufd {{.*#+}} ymm13 = ymm13[3,0,2,2,7,4,6,6]
4640 ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
4641 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} ymm15 = ymm12[12,13,14,15],ymm10[0,1,2,3,4,5,6,7,8,9,10,11],ymm12[28,29,30,31],ymm10[16,17,18,19,20,21,22,23,24,25,26,27]
4642 ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm15 = ymm15[0,3,2,3]
4643 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm13 = ymm15[0,1,2],ymm13[3,4,5,6,7]
4644 ; AVX2-ONLY-NEXT: vpermd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm15 # 32-byte Folded Reload
4645 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm1 = ymm13[0,1,2,3,4,5],ymm15[6,7]
4646 ; AVX2-ONLY-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4647 ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
4648 ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
4649 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm13 = ymm10[0,1,2,3],ymm4[4,5],ymm10[6,7]
4650 ; AVX2-ONLY-NEXT: vpshufd {{.*#+}} ymm13 = ymm13[3,0,2,2,7,4,6,6]
4651 ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
4652 ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
4653 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} ymm15 = ymm9[12,13,14,15],ymm2[0,1,2,3,4,5,6,7,8,9,10,11],ymm9[28,29,30,31],ymm2[16,17,18,19,20,21,22,23,24,25,26,27]
4654 ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm15 = ymm15[0,3,2,3]
4655 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm13 = ymm15[0,1,2],ymm13[3,4,5,6,7]
4656 ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
4657 ; AVX2-ONLY-NEXT: vpermd %ymm3, %ymm0, %ymm15
4658 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm1 = ymm13[0,1,2,3,4,5],ymm15[6,7]
4659 ; AVX2-ONLY-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4660 ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
4661 ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
4662 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm13 = ymm7[0,1,2,3],ymm6[4,5],ymm7[6,7]
4663 ; AVX2-ONLY-NEXT: vpshufd {{.*#+}} ymm13 = ymm13[3,0,2,2,7,4,6,6]
4664 ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
4665 ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
4666 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} ymm15 = ymm5[12,13,14,15],ymm1[0,1,2,3,4,5,6,7,8,9,10,11],ymm5[28,29,30,31],ymm1[16,17,18,19,20,21,22,23,24,25,26,27]
4667 ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm15 = ymm15[0,3,2,3]
4668 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm13 = ymm15[0,1,2],ymm13[3,4,5,6,7]
4669 ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
4670 ; AVX2-ONLY-NEXT: vpermd %ymm11, %ymm0, %ymm0
4671 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm13 = ymm13[0,1,2,3,4,5],ymm0[6,7]
4672 ; AVX2-ONLY-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm7[0,1],ymm6[0,1]
4673 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm6[5],ymm0[6,7]
4674 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm5 = ymm1[0,1,2,3],ymm5[4,5],ymm1[6,7]
4675 ; AVX2-ONLY-NEXT: vmovdqa {{.*#+}} xmm8 = [4,1,6,u]
4676 ; AVX2-ONLY-NEXT: vpermd %ymm5, %ymm8, %ymm5
4677 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm5[0,1,2],ymm0[3,4,5,6,7]
4678 ; AVX2-ONLY-NEXT: vpbroadcastq {{.*#+}} ymm7 = [2,7,2,7,2,7,2,7]
4679 ; AVX2-ONLY-NEXT: vpermd %ymm11, %ymm7, %ymm1
4680 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
4681 ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
4682 ; AVX2-ONLY-NEXT: vpblendd $48, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
4683 ; AVX2-ONLY-NEXT: # ymm1 = ymm1[0,1,2,3],mem[4,5],ymm1[6,7]
4684 ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
4685 ; AVX2-ONLY-NEXT: vperm2i128 $2, (%rsp), %ymm11, %ymm5 # 32-byte Folded Reload
4686 ; AVX2-ONLY-NEXT: # ymm5 = mem[0,1],ymm11[0,1]
4687 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3,4],ymm11[5],ymm5[6,7]
4688 ; AVX2-ONLY-NEXT: vpermd %ymm1, %ymm8, %ymm1
4689 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm5[3,4,5,6,7]
4690 ; AVX2-ONLY-NEXT: vpermd {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm5 # 32-byte Folded Reload
4691 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm11 = ymm1[0,1,2,3,4,5],ymm5[6,7]
4692 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm9[4,5],ymm2[6,7]
4693 ; AVX2-ONLY-NEXT: vperm2i128 {{.*#+}} ymm5 = ymm10[0,1],ymm4[0,1]
4694 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3,4],ymm4[5],ymm5[6,7]
4695 ; AVX2-ONLY-NEXT: vpermd %ymm1, %ymm8, %ymm1
4696 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm5[3,4,5,6,7]
4697 ; AVX2-ONLY-NEXT: vpermd %ymm3, %ymm7, %ymm2
4698 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm2[6,7]
4699 ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
4700 ; AVX2-ONLY-NEXT: vpblendd $207, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
4701 ; AVX2-ONLY-NEXT: # ymm2 = mem[0,1,2,3],ymm2[4,5],mem[6,7]
4702 ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
4703 ; AVX2-ONLY-NEXT: vperm2i128 $2, {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm5 # 32-byte Folded Reload
4704 ; AVX2-ONLY-NEXT: # ymm5 = mem[0,1],ymm9[0,1]
4705 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3,4],ymm9[5],ymm5[6,7]
4706 ; AVX2-ONLY-NEXT: vpermd %ymm2, %ymm8, %ymm2
4707 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2],ymm5[3,4,5,6,7]
4708 ; AVX2-ONLY-NEXT: vpermd {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm5 # 32-byte Folded Reload
4709 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm5 = ymm2[0,1,2,3,4,5],ymm5[6,7]
4710 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm2 = ymm14[0,1,2,3],ymm12[4,5],ymm14[6,7]
4711 ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
4712 ; AVX2-ONLY-NEXT: vperm2i128 $2, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm9 # 32-byte Folded Reload
4713 ; AVX2-ONLY-NEXT: # ymm9 = mem[0,1],ymm10[0,1]
4714 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3,4],ymm10[5],ymm9[6,7]
4715 ; AVX2-ONLY-NEXT: vpermd %ymm2, %ymm8, %ymm2
4716 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2],ymm9[3,4,5,6,7]
4717 ; AVX2-ONLY-NEXT: vpermd {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm4 # 32-byte Folded Reload
4718 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5],ymm4[6,7]
4719 ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
4720 ; AVX2-ONLY-NEXT: vpblendd $48, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm4 # 32-byte Folded Reload
4721 ; AVX2-ONLY-NEXT: # ymm4 = ymm3[0,1,2,3],mem[4,5],ymm3[6,7]
4722 ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
4723 ; AVX2-ONLY-NEXT: vperm2i128 $2, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm9 # 32-byte Folded Reload
4724 ; AVX2-ONLY-NEXT: # ymm9 = mem[0,1],ymm10[0,1]
4725 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3,4],ymm10[5],ymm9[6,7]
4726 ; AVX2-ONLY-NEXT: vpermd %ymm4, %ymm8, %ymm4
4727 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1,2],ymm9[3,4,5,6,7]
4728 ; AVX2-ONLY-NEXT: vpermd {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm9 # 32-byte Folded Reload
4729 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3,4,5],ymm9[6,7]
4730 ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
4731 ; AVX2-ONLY-NEXT: vpblendd $207, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm9 # 32-byte Folded Reload
4732 ; AVX2-ONLY-NEXT: # ymm9 = mem[0,1,2,3],ymm3[4,5],mem[6,7]
4733 ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
4734 ; AVX2-ONLY-NEXT: vperm2i128 $2, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm10 # 32-byte Folded Reload
4735 ; AVX2-ONLY-NEXT: # ymm10 = mem[0,1],ymm12[0,1]
4736 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm10 = ymm10[0,1,2,3,4],ymm12[5],ymm10[6,7]
4737 ; AVX2-ONLY-NEXT: vpermd %ymm9, %ymm8, %ymm9
4738 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm9 = ymm9[0,1,2],ymm10[3,4,5,6,7]
4739 ; AVX2-ONLY-NEXT: vpermd {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm3 # 32-byte Folded Reload
4740 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm3 = ymm9[0,1,2,3,4,5],ymm3[6,7]
4741 ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
4742 ; AVX2-ONLY-NEXT: vpblendd $48, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm9 # 32-byte Folded Reload
4743 ; AVX2-ONLY-NEXT: # ymm9 = ymm6[0,1,2,3],mem[4,5],ymm6[6,7]
4744 ; AVX2-ONLY-NEXT: vpermd %ymm9, %ymm8, %ymm8
4745 ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
4746 ; AVX2-ONLY-NEXT: vperm2i128 $2, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm9 # 32-byte Folded Reload
4747 ; AVX2-ONLY-NEXT: # ymm9 = mem[0,1],ymm10[0,1]
4748 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3,4],ymm10[5],ymm9[6,7]
4749 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm8 = ymm8[0,1,2],ymm9[3,4,5,6,7]
4750 ; AVX2-ONLY-NEXT: vpermd {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm7 # 32-byte Folded Reload
4751 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm7 = ymm8[0,1,2,3,4,5],ymm7[6,7]
4752 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
4753 ; AVX2-ONLY-NEXT: vmovaps %ymm6, 192(%rsi)
4754 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
4755 ; AVX2-ONLY-NEXT: vmovaps %ymm6, 128(%rsi)
4756 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
4757 ; AVX2-ONLY-NEXT: vmovaps %ymm8, 64(%rsi)
4758 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
4759 ; AVX2-ONLY-NEXT: vmovaps %ymm6, (%rsi)
4760 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
4761 ; AVX2-ONLY-NEXT: vmovaps %ymm8, 224(%rsi)
4762 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
4763 ; AVX2-ONLY-NEXT: vmovaps %ymm8, 160(%rsi)
4764 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
4765 ; AVX2-ONLY-NEXT: vmovaps %ymm8, 96(%rsi)
4766 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
4767 ; AVX2-ONLY-NEXT: vmovaps %ymm8, 32(%rsi)
4768 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
4769 ; AVX2-ONLY-NEXT: vmovaps %ymm6, 192(%rdx)
4770 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
4771 ; AVX2-ONLY-NEXT: vmovaps %ymm6, 128(%rdx)
4772 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
4773 ; AVX2-ONLY-NEXT: vmovaps %ymm6, 64(%rdx)
4774 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
4775 ; AVX2-ONLY-NEXT: vmovaps %ymm6, (%rdx)
4776 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
4777 ; AVX2-ONLY-NEXT: vmovaps %ymm6, 224(%rdx)
4778 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
4779 ; AVX2-ONLY-NEXT: vmovaps %ymm6, 160(%rdx)
4780 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
4781 ; AVX2-ONLY-NEXT: vmovaps %ymm6, 96(%rdx)
4782 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
4783 ; AVX2-ONLY-NEXT: vmovaps %ymm6, 32(%rdx)
4784 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
4785 ; AVX2-ONLY-NEXT: vmovaps %ymm6, 192(%rcx)
4786 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
4787 ; AVX2-ONLY-NEXT: vmovaps %ymm6, 128(%rcx)
4788 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
4789 ; AVX2-ONLY-NEXT: vmovaps %ymm6, 64(%rcx)
4790 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
4791 ; AVX2-ONLY-NEXT: vmovaps %ymm6, (%rcx)
4792 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
4793 ; AVX2-ONLY-NEXT: vmovaps %ymm6, 224(%rcx)
4794 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
4795 ; AVX2-ONLY-NEXT: vmovaps %ymm6, 160(%rcx)
4796 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
4797 ; AVX2-ONLY-NEXT: vmovaps %ymm6, 96(%rcx)
4798 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
4799 ; AVX2-ONLY-NEXT: vmovaps %ymm6, 32(%rcx)
4800 ; AVX2-ONLY-NEXT: vmovdqa %ymm13, (%r8)
4801 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
4802 ; AVX2-ONLY-NEXT: vmovaps %ymm6, 64(%r8)
4803 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
4804 ; AVX2-ONLY-NEXT: vmovaps %ymm6, 128(%r8)
4805 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
4806 ; AVX2-ONLY-NEXT: vmovaps %ymm6, 192(%r8)
4807 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
4808 ; AVX2-ONLY-NEXT: vmovaps %ymm6, 224(%r8)
4809 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
4810 ; AVX2-ONLY-NEXT: vmovaps %ymm6, 160(%r8)
4811 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
4812 ; AVX2-ONLY-NEXT: vmovaps %ymm6, 96(%r8)
4813 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
4814 ; AVX2-ONLY-NEXT: vmovaps %ymm6, 32(%r8)
4815 ; AVX2-ONLY-NEXT: vmovdqa %ymm7, 224(%r9)
4816 ; AVX2-ONLY-NEXT: vmovdqa %ymm3, 192(%r9)
4817 ; AVX2-ONLY-NEXT: vmovdqa %ymm4, 160(%r9)
4818 ; AVX2-ONLY-NEXT: vmovdqa %ymm2, 128(%r9)
4819 ; AVX2-ONLY-NEXT: vmovdqa %ymm5, 96(%r9)
4820 ; AVX2-ONLY-NEXT: vmovdqa %ymm1, 64(%r9)
4821 ; AVX2-ONLY-NEXT: vmovdqa %ymm11, 32(%r9)
4822 ; AVX2-ONLY-NEXT: vmovdqa %ymm0, (%r9)
4823 ; AVX2-ONLY-NEXT: addq $2152, %rsp # imm = 0x868
4824 ; AVX2-ONLY-NEXT: vzeroupper
4825 ; AVX2-ONLY-NEXT: retq
4827 ; AVX512F-LABEL: load_i32_stride5_vf64:
4829 ; AVX512F-NEXT: subq $584, %rsp # imm = 0x248
4830 ; AVX512F-NEXT: vmovdqa64 1152(%rdi), %zmm20
4831 ; AVX512F-NEXT: vmovdqa64 1088(%rdi), %zmm1
4832 ; AVX512F-NEXT: vmovdqa64 832(%rdi), %zmm17
4833 ; AVX512F-NEXT: vmovdqa64 768(%rdi), %zmm2
4834 ; AVX512F-NEXT: vmovdqa64 384(%rdi), %zmm11
4835 ; AVX512F-NEXT: vmovdqa64 512(%rdi), %zmm26
4836 ; AVX512F-NEXT: vmovdqa64 448(%rdi), %zmm0
4837 ; AVX512F-NEXT: vmovdqa64 (%rdi), %zmm13
4838 ; AVX512F-NEXT: vmovdqa64 64(%rdi), %zmm21
4839 ; AVX512F-NEXT: vmovdqa64 128(%rdi), %zmm3
4840 ; AVX512F-NEXT: vmovdqa64 192(%rdi), %zmm30
4841 ; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm19 = [8,13,18,23,28,0,0,3,8,13,18,23,28,0,0,3]
4842 ; AVX512F-NEXT: # zmm19 = mem[0,1,2,3,0,1,2,3]
4843 ; AVX512F-NEXT: vmovdqa64 %zmm3, %zmm4
4844 ; AVX512F-NEXT: vpermt2d %zmm30, %zmm19, %zmm4
4845 ; AVX512F-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
4846 ; AVX512F-NEXT: vmovdqa {{.*#+}} ymm15 = [0,5,10,15,20,25,30,u]
4847 ; AVX512F-NEXT: vmovdqa64 %zmm13, %zmm4
4848 ; AVX512F-NEXT: vpermt2d %zmm21, %zmm15, %zmm4
4849 ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm5
4850 ; AVX512F-NEXT: vpermt2d %zmm26, %zmm19, %zmm5
4851 ; AVX512F-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
4852 ; AVX512F-NEXT: vmovdqa64 %zmm2, %zmm5
4853 ; AVX512F-NEXT: vpermt2d %zmm17, %zmm19, %zmm5
4854 ; AVX512F-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
4855 ; AVX512F-NEXT: vpermi2d %zmm20, %zmm1, %zmm19
4856 ; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm16 = [9,14,19,24,29,0,0,4,9,14,19,24,29,0,0,4]
4857 ; AVX512F-NEXT: # zmm16 = mem[0,1,2,3,0,1,2,3]
4858 ; AVX512F-NEXT: vmovdqa64 %zmm3, %zmm5
4859 ; AVX512F-NEXT: vpermt2d %zmm30, %zmm16, %zmm5
4860 ; AVX512F-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
4861 ; AVX512F-NEXT: vmovdqa {{.*#+}} ymm12 = [17,22,27,0,5,10,15,u]
4862 ; AVX512F-NEXT: vmovdqa64 %zmm21, %zmm24
4863 ; AVX512F-NEXT: vpermt2d %zmm13, %zmm12, %zmm24
4864 ; AVX512F-NEXT: vmovdqa64 %zmm2, %zmm5
4865 ; AVX512F-NEXT: vpermt2d %zmm17, %zmm16, %zmm5
4866 ; AVX512F-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
4867 ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm5
4868 ; AVX512F-NEXT: vpermt2d %zmm26, %zmm16, %zmm5
4869 ; AVX512F-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
4870 ; AVX512F-NEXT: vpermi2d %zmm20, %zmm1, %zmm16
4871 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} ymm25 = [2,7,12,17,22,27,u,u]
4872 ; AVX512F-NEXT: vmovdqa64 %zmm13, %zmm5
4873 ; AVX512F-NEXT: vpermt2d %zmm21, %zmm25, %zmm5
4874 ; AVX512F-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
4875 ; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm14 = [10,15,20,25,30,0,0,5,10,15,20,25,30,0,0,5]
4876 ; AVX512F-NEXT: # zmm14 = mem[0,1,2,3,0,1,2,3]
4877 ; AVX512F-NEXT: vmovdqa64 %zmm3, %zmm23
4878 ; AVX512F-NEXT: vmovdqa64 %zmm3, %zmm5
4879 ; AVX512F-NEXT: vpermt2d %zmm30, %zmm14, %zmm23
4880 ; AVX512F-NEXT: vmovdqa64 %zmm2, %zmm18
4881 ; AVX512F-NEXT: vpermt2d %zmm17, %zmm14, %zmm18
4882 ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm3
4883 ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm22
4884 ; AVX512F-NEXT: vpermt2d %zmm26, %zmm14, %zmm22
4885 ; AVX512F-NEXT: vpermi2d %zmm20, %zmm1, %zmm14
4886 ; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm28 = [27,0,5,10,15,0,17,22,27,0,5,10,15,0,17,22]
4887 ; AVX512F-NEXT: # zmm28 = mem[0,1,2,3,0,1,2,3]
4888 ; AVX512F-NEXT: vmovdqa64 %zmm30, %zmm27
4889 ; AVX512F-NEXT: vpermt2d %zmm5, %zmm28, %zmm27
4890 ; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm0 = [12,17,22,27,0,0,2,7,12,17,22,27,0,0,2,7]
4891 ; AVX512F-NEXT: # zmm0 = mem[0,1,2,3,0,1,2,3]
4892 ; AVX512F-NEXT: vpermt2d %zmm30, %zmm0, %zmm5
4893 ; AVX512F-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
4894 ; AVX512F-NEXT: vmovdqa64 %zmm26, %zmm30
4895 ; AVX512F-NEXT: vpermt2d %zmm3, %zmm28, %zmm30
4896 ; AVX512F-NEXT: vpermt2d %zmm26, %zmm0, %zmm3
4897 ; AVX512F-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
4898 ; AVX512F-NEXT: vmovdqa64 %zmm17, %zmm26
4899 ; AVX512F-NEXT: vpermt2d %zmm2, %zmm28, %zmm26
4900 ; AVX512F-NEXT: vpermi2d %zmm1, %zmm20, %zmm28
4901 ; AVX512F-NEXT: vpermt2d %zmm20, %zmm0, %zmm1
4902 ; AVX512F-NEXT: vmovdqu64 %zmm1, (%rsp) # 64-byte Spill
4903 ; AVX512F-NEXT: vmovdqa {{.*#+}} ymm5 = [3,8,13,18,23,28,u,u]
4904 ; AVX512F-NEXT: vpermt2d %zmm17, %zmm0, %zmm2
4905 ; AVX512F-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
4906 ; AVX512F-NEXT: vmovdqa64 %zmm13, %zmm29
4907 ; AVX512F-NEXT: vpermt2d %zmm21, %zmm5, %zmm29
4908 ; AVX512F-NEXT: vmovdqa {{.*#+}} ymm9 = [4,9,14,19,24,29,u,u]
4909 ; AVX512F-NEXT: vpermt2d %zmm21, %zmm9, %zmm13
4910 ; AVX512F-NEXT: vmovdqa64 320(%rdi), %zmm17
4911 ; AVX512F-NEXT: vmovdqa64 %zmm17, %zmm31
4912 ; AVX512F-NEXT: vpermt2d %zmm11, %zmm15, %zmm31
4913 ; AVX512F-NEXT: vmovdqa64 %zmm11, %zmm20
4914 ; AVX512F-NEXT: vpermt2d %zmm17, %zmm12, %zmm20
4915 ; AVX512F-NEXT: vmovdqa64 %zmm17, %zmm8
4916 ; AVX512F-NEXT: vpermt2d %zmm11, %zmm25, %zmm8
4917 ; AVX512F-NEXT: vmovdqa64 %zmm17, %zmm10
4918 ; AVX512F-NEXT: vpermt2d %zmm11, %zmm5, %zmm10
4919 ; AVX512F-NEXT: vpermt2d %zmm11, %zmm9, %zmm17
4920 ; AVX512F-NEXT: vmovdqa64 704(%rdi), %zmm1
4921 ; AVX512F-NEXT: vmovdqa64 640(%rdi), %zmm11
4922 ; AVX512F-NEXT: vmovdqa64 %zmm11, %zmm2
4923 ; AVX512F-NEXT: vpermt2d %zmm1, %zmm15, %zmm2
4924 ; AVX512F-NEXT: vmovdqa64 1024(%rdi), %zmm0
4925 ; AVX512F-NEXT: vmovdqa64 960(%rdi), %zmm21
4926 ; AVX512F-NEXT: vpermi2d %zmm0, %zmm21, %zmm15
4927 ; AVX512F-NEXT: vmovdqa64 %zmm1, %zmm3
4928 ; AVX512F-NEXT: vpermt2d %zmm11, %zmm12, %zmm3
4929 ; AVX512F-NEXT: vpermi2d %zmm21, %zmm0, %zmm12
4930 ; AVX512F-NEXT: vmovdqa64 %zmm11, %zmm6
4931 ; AVX512F-NEXT: vpermt2d %zmm1, %zmm25, %zmm6
4932 ; AVX512F-NEXT: vpermi2d %zmm0, %zmm21, %zmm25
4933 ; AVX512F-NEXT: vmovdqa64 %zmm11, %zmm7
4934 ; AVX512F-NEXT: vpermt2d %zmm1, %zmm5, %zmm7
4935 ; AVX512F-NEXT: vpermi2d %zmm0, %zmm21, %zmm5
4936 ; AVX512F-NEXT: vpermt2d %zmm0, %zmm9, %zmm21
4937 ; AVX512F-NEXT: vpermt2d %zmm1, %zmm9, %zmm11
4938 ; AVX512F-NEXT: movw $8064, %ax # imm = 0x1F80
4939 ; AVX512F-NEXT: kmovw %eax, %k1
4940 ; AVX512F-NEXT: vmovdqa64 %zmm4, %zmm0
4941 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
4942 ; AVX512F-NEXT: vmovdqa32 %zmm1, %zmm0 {%k1}
4943 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
4944 ; AVX512F-NEXT: vmovdqa32 %zmm1, %zmm31 {%k1}
4945 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
4946 ; AVX512F-NEXT: vmovdqa32 %zmm1, %zmm2 {%k1}
4947 ; AVX512F-NEXT: vmovdqa32 %zmm19, %zmm15 {%k1}
4948 ; AVX512F-NEXT: vmovdqa64 256(%rdi), %zmm1
4949 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm4 = [0,1,2,3,4,5,6,7,8,9,10,11,12,17,22,27]
4950 ; AVX512F-NEXT: vpermt2d %zmm1, %zmm4, %zmm0
4951 ; AVX512F-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
4952 ; AVX512F-NEXT: vmovdqa64 576(%rdi), %zmm9
4953 ; AVX512F-NEXT: vpermt2d %zmm9, %zmm4, %zmm31
4954 ; AVX512F-NEXT: vmovdqa64 896(%rdi), %zmm19
4955 ; AVX512F-NEXT: vpermt2d %zmm19, %zmm4, %zmm2
4956 ; AVX512F-NEXT: vmovdqa64 1216(%rdi), %zmm0
4957 ; AVX512F-NEXT: vpermt2d %zmm0, %zmm4, %zmm15
4958 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
4959 ; AVX512F-NEXT: vmovdqa32 %zmm4, %zmm24 {%k1}
4960 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
4961 ; AVX512F-NEXT: vmovdqa32 %zmm4, %zmm3 {%k1}
4962 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
4963 ; AVX512F-NEXT: vmovdqa32 %zmm4, %zmm20 {%k1}
4964 ; AVX512F-NEXT: vmovdqa32 %zmm16, %zmm12 {%k1}
4965 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm4 = [0,1,2,3,4,5,6,7,8,9,10,11,12,18,23,28]
4966 ; AVX512F-NEXT: vpermt2d %zmm1, %zmm4, %zmm24
4967 ; AVX512F-NEXT: vpermt2d %zmm19, %zmm4, %zmm3
4968 ; AVX512F-NEXT: vpermt2d %zmm9, %zmm4, %zmm20
4969 ; AVX512F-NEXT: vpermt2d %zmm0, %zmm4, %zmm12
4970 ; AVX512F-NEXT: movb $7, %al
4971 ; AVX512F-NEXT: kmovw %eax, %k1
4972 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
4973 ; AVX512F-NEXT: vmovdqa64 %zmm4, %zmm23 {%k1}
4974 ; AVX512F-NEXT: vmovdqa64 %zmm6, %zmm18 {%k1}
4975 ; AVX512F-NEXT: vmovdqa64 %zmm8, %zmm22 {%k1}
4976 ; AVX512F-NEXT: vmovdqa64 %zmm25, %zmm14 {%k1}
4977 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm4 = [0,1,2,3,4,5,6,7,8,9,10,11,12,19,24,29]
4978 ; AVX512F-NEXT: vpermt2d %zmm1, %zmm4, %zmm23
4979 ; AVX512F-NEXT: vpermt2d %zmm19, %zmm4, %zmm18
4980 ; AVX512F-NEXT: vpermt2d %zmm9, %zmm4, %zmm22
4981 ; AVX512F-NEXT: vpermt2d %zmm0, %zmm4, %zmm14
4982 ; AVX512F-NEXT: vmovdqa64 %zmm29, %zmm27 {%k1}
4983 ; AVX512F-NEXT: vmovdqa64 %zmm7, %zmm26 {%k1}
4984 ; AVX512F-NEXT: vmovdqa64 %zmm10, %zmm30 {%k1}
4985 ; AVX512F-NEXT: vmovdqa64 %zmm5, %zmm28 {%k1}
4986 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm4 = [0,1,2,3,4,5,6,7,8,9,10,11,12,20,25,30]
4987 ; AVX512F-NEXT: vpermt2d %zmm1, %zmm4, %zmm27
4988 ; AVX512F-NEXT: vpermt2d %zmm19, %zmm4, %zmm26
4989 ; AVX512F-NEXT: vpermt2d %zmm9, %zmm4, %zmm30
4990 ; AVX512F-NEXT: vpermt2d %zmm0, %zmm4, %zmm28
4991 ; AVX512F-NEXT: movb $56, %al
4992 ; AVX512F-NEXT: kmovw %eax, %k1
4993 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
4994 ; AVX512F-NEXT: vmovdqa64 %zmm4, %zmm13 {%k1}
4995 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm4 = [0,1,2,3,4,5,6,7,8,9,10,11,16,21,26,31]
4996 ; AVX512F-NEXT: vpermt2d %zmm1, %zmm4, %zmm13
4997 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
4998 ; AVX512F-NEXT: vmovdqa64 %zmm1, %zmm17 {%k1}
4999 ; AVX512F-NEXT: vpermt2d %zmm9, %zmm4, %zmm17
5000 ; AVX512F-NEXT: vmovdqu64 (%rsp), %zmm1 # 64-byte Reload
5001 ; AVX512F-NEXT: vmovdqa64 %zmm1, %zmm21 {%k1}
5002 ; AVX512F-NEXT: vpermt2d %zmm0, %zmm4, %zmm21
5003 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
5004 ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm11 {%k1}
5005 ; AVX512F-NEXT: vpermt2d %zmm19, %zmm4, %zmm11
5006 ; AVX512F-NEXT: vmovdqa64 %zmm15, 192(%rsi)
5007 ; AVX512F-NEXT: vmovdqa64 %zmm2, 128(%rsi)
5008 ; AVX512F-NEXT: vmovdqa64 %zmm31, 64(%rsi)
5009 ; AVX512F-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
5010 ; AVX512F-NEXT: vmovaps %zmm0, (%rsi)
5011 ; AVX512F-NEXT: vmovdqa64 %zmm12, 192(%rdx)
5012 ; AVX512F-NEXT: vmovdqa64 %zmm24, (%rdx)
5013 ; AVX512F-NEXT: vmovdqa64 %zmm20, 64(%rdx)
5014 ; AVX512F-NEXT: vmovdqa64 %zmm3, 128(%rdx)
5015 ; AVX512F-NEXT: vmovdqa64 %zmm14, 192(%rcx)
5016 ; AVX512F-NEXT: vmovdqa64 %zmm23, (%rcx)
5017 ; AVX512F-NEXT: vmovdqa64 %zmm22, 64(%rcx)
5018 ; AVX512F-NEXT: vmovdqa64 %zmm18, 128(%rcx)
5019 ; AVX512F-NEXT: vmovdqa64 %zmm28, 192(%r8)
5020 ; AVX512F-NEXT: vmovdqa64 %zmm27, (%r8)
5021 ; AVX512F-NEXT: vmovdqa64 %zmm30, 64(%r8)
5022 ; AVX512F-NEXT: vmovdqa64 %zmm26, 128(%r8)
5023 ; AVX512F-NEXT: vmovdqa64 %zmm11, 128(%r9)
5024 ; AVX512F-NEXT: vmovdqa64 %zmm21, 192(%r9)
5025 ; AVX512F-NEXT: vmovdqa64 %zmm13, (%r9)
5026 ; AVX512F-NEXT: vmovdqa64 %zmm17, 64(%r9)
5027 ; AVX512F-NEXT: addq $584, %rsp # imm = 0x248
5028 ; AVX512F-NEXT: vzeroupper
5029 ; AVX512F-NEXT: retq
5031 ; AVX512BW-LABEL: load_i32_stride5_vf64:
5032 ; AVX512BW: # %bb.0:
5033 ; AVX512BW-NEXT: subq $584, %rsp # imm = 0x248
5034 ; AVX512BW-NEXT: vmovdqa64 1152(%rdi), %zmm20
5035 ; AVX512BW-NEXT: vmovdqa64 1088(%rdi), %zmm1
5036 ; AVX512BW-NEXT: vmovdqa64 832(%rdi), %zmm17
5037 ; AVX512BW-NEXT: vmovdqa64 768(%rdi), %zmm2
5038 ; AVX512BW-NEXT: vmovdqa64 384(%rdi), %zmm11
5039 ; AVX512BW-NEXT: vmovdqa64 512(%rdi), %zmm26
5040 ; AVX512BW-NEXT: vmovdqa64 448(%rdi), %zmm0
5041 ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm13
5042 ; AVX512BW-NEXT: vmovdqa64 64(%rdi), %zmm21
5043 ; AVX512BW-NEXT: vmovdqa64 128(%rdi), %zmm3
5044 ; AVX512BW-NEXT: vmovdqa64 192(%rdi), %zmm30
5045 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm19 = [8,13,18,23,28,0,0,3,8,13,18,23,28,0,0,3]
5046 ; AVX512BW-NEXT: # zmm19 = mem[0,1,2,3,0,1,2,3]
5047 ; AVX512BW-NEXT: vmovdqa64 %zmm3, %zmm4
5048 ; AVX512BW-NEXT: vpermt2d %zmm30, %zmm19, %zmm4
5049 ; AVX512BW-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
5050 ; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm15 = [0,5,10,15,20,25,30,u]
5051 ; AVX512BW-NEXT: vmovdqa64 %zmm13, %zmm4
5052 ; AVX512BW-NEXT: vpermt2d %zmm21, %zmm15, %zmm4
5053 ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm5
5054 ; AVX512BW-NEXT: vpermt2d %zmm26, %zmm19, %zmm5
5055 ; AVX512BW-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
5056 ; AVX512BW-NEXT: vmovdqa64 %zmm2, %zmm5
5057 ; AVX512BW-NEXT: vpermt2d %zmm17, %zmm19, %zmm5
5058 ; AVX512BW-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
5059 ; AVX512BW-NEXT: vpermi2d %zmm20, %zmm1, %zmm19
5060 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm16 = [9,14,19,24,29,0,0,4,9,14,19,24,29,0,0,4]
5061 ; AVX512BW-NEXT: # zmm16 = mem[0,1,2,3,0,1,2,3]
5062 ; AVX512BW-NEXT: vmovdqa64 %zmm3, %zmm5
5063 ; AVX512BW-NEXT: vpermt2d %zmm30, %zmm16, %zmm5
5064 ; AVX512BW-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
5065 ; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm12 = [17,22,27,0,5,10,15,u]
5066 ; AVX512BW-NEXT: vmovdqa64 %zmm21, %zmm24
5067 ; AVX512BW-NEXT: vpermt2d %zmm13, %zmm12, %zmm24
5068 ; AVX512BW-NEXT: vmovdqa64 %zmm2, %zmm5
5069 ; AVX512BW-NEXT: vpermt2d %zmm17, %zmm16, %zmm5
5070 ; AVX512BW-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
5071 ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm5
5072 ; AVX512BW-NEXT: vpermt2d %zmm26, %zmm16, %zmm5
5073 ; AVX512BW-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
5074 ; AVX512BW-NEXT: vpermi2d %zmm20, %zmm1, %zmm16
5075 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} ymm25 = [2,7,12,17,22,27,u,u]
5076 ; AVX512BW-NEXT: vmovdqa64 %zmm13, %zmm5
5077 ; AVX512BW-NEXT: vpermt2d %zmm21, %zmm25, %zmm5
5078 ; AVX512BW-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
5079 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm14 = [10,15,20,25,30,0,0,5,10,15,20,25,30,0,0,5]
5080 ; AVX512BW-NEXT: # zmm14 = mem[0,1,2,3,0,1,2,3]
5081 ; AVX512BW-NEXT: vmovdqa64 %zmm3, %zmm23
5082 ; AVX512BW-NEXT: vmovdqa64 %zmm3, %zmm5
5083 ; AVX512BW-NEXT: vpermt2d %zmm30, %zmm14, %zmm23
5084 ; AVX512BW-NEXT: vmovdqa64 %zmm2, %zmm18
5085 ; AVX512BW-NEXT: vpermt2d %zmm17, %zmm14, %zmm18
5086 ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm3
5087 ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm22
5088 ; AVX512BW-NEXT: vpermt2d %zmm26, %zmm14, %zmm22
5089 ; AVX512BW-NEXT: vpermi2d %zmm20, %zmm1, %zmm14
5090 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm28 = [27,0,5,10,15,0,17,22,27,0,5,10,15,0,17,22]
5091 ; AVX512BW-NEXT: # zmm28 = mem[0,1,2,3,0,1,2,3]
5092 ; AVX512BW-NEXT: vmovdqa64 %zmm30, %zmm27
5093 ; AVX512BW-NEXT: vpermt2d %zmm5, %zmm28, %zmm27
5094 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm0 = [12,17,22,27,0,0,2,7,12,17,22,27,0,0,2,7]
5095 ; AVX512BW-NEXT: # zmm0 = mem[0,1,2,3,0,1,2,3]
5096 ; AVX512BW-NEXT: vpermt2d %zmm30, %zmm0, %zmm5
5097 ; AVX512BW-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
5098 ; AVX512BW-NEXT: vmovdqa64 %zmm26, %zmm30
5099 ; AVX512BW-NEXT: vpermt2d %zmm3, %zmm28, %zmm30
5100 ; AVX512BW-NEXT: vpermt2d %zmm26, %zmm0, %zmm3
5101 ; AVX512BW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
5102 ; AVX512BW-NEXT: vmovdqa64 %zmm17, %zmm26
5103 ; AVX512BW-NEXT: vpermt2d %zmm2, %zmm28, %zmm26
5104 ; AVX512BW-NEXT: vpermi2d %zmm1, %zmm20, %zmm28
5105 ; AVX512BW-NEXT: vpermt2d %zmm20, %zmm0, %zmm1
5106 ; AVX512BW-NEXT: vmovdqu64 %zmm1, (%rsp) # 64-byte Spill
5107 ; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm5 = [3,8,13,18,23,28,u,u]
5108 ; AVX512BW-NEXT: vpermt2d %zmm17, %zmm0, %zmm2
5109 ; AVX512BW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
5110 ; AVX512BW-NEXT: vmovdqa64 %zmm13, %zmm29
5111 ; AVX512BW-NEXT: vpermt2d %zmm21, %zmm5, %zmm29
5112 ; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm9 = [4,9,14,19,24,29,u,u]
5113 ; AVX512BW-NEXT: vpermt2d %zmm21, %zmm9, %zmm13
5114 ; AVX512BW-NEXT: vmovdqa64 320(%rdi), %zmm17
5115 ; AVX512BW-NEXT: vmovdqa64 %zmm17, %zmm31
5116 ; AVX512BW-NEXT: vpermt2d %zmm11, %zmm15, %zmm31
5117 ; AVX512BW-NEXT: vmovdqa64 %zmm11, %zmm20
5118 ; AVX512BW-NEXT: vpermt2d %zmm17, %zmm12, %zmm20
5119 ; AVX512BW-NEXT: vmovdqa64 %zmm17, %zmm8
5120 ; AVX512BW-NEXT: vpermt2d %zmm11, %zmm25, %zmm8
5121 ; AVX512BW-NEXT: vmovdqa64 %zmm17, %zmm10
5122 ; AVX512BW-NEXT: vpermt2d %zmm11, %zmm5, %zmm10
5123 ; AVX512BW-NEXT: vpermt2d %zmm11, %zmm9, %zmm17
5124 ; AVX512BW-NEXT: vmovdqa64 704(%rdi), %zmm1
5125 ; AVX512BW-NEXT: vmovdqa64 640(%rdi), %zmm11
5126 ; AVX512BW-NEXT: vmovdqa64 %zmm11, %zmm2
5127 ; AVX512BW-NEXT: vpermt2d %zmm1, %zmm15, %zmm2
5128 ; AVX512BW-NEXT: vmovdqa64 1024(%rdi), %zmm0
5129 ; AVX512BW-NEXT: vmovdqa64 960(%rdi), %zmm21
5130 ; AVX512BW-NEXT: vpermi2d %zmm0, %zmm21, %zmm15
5131 ; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm3
5132 ; AVX512BW-NEXT: vpermt2d %zmm11, %zmm12, %zmm3
5133 ; AVX512BW-NEXT: vpermi2d %zmm21, %zmm0, %zmm12
5134 ; AVX512BW-NEXT: vmovdqa64 %zmm11, %zmm6
5135 ; AVX512BW-NEXT: vpermt2d %zmm1, %zmm25, %zmm6
5136 ; AVX512BW-NEXT: vpermi2d %zmm0, %zmm21, %zmm25
5137 ; AVX512BW-NEXT: vmovdqa64 %zmm11, %zmm7
5138 ; AVX512BW-NEXT: vpermt2d %zmm1, %zmm5, %zmm7
5139 ; AVX512BW-NEXT: vpermi2d %zmm0, %zmm21, %zmm5
5140 ; AVX512BW-NEXT: vpermt2d %zmm0, %zmm9, %zmm21
5141 ; AVX512BW-NEXT: vpermt2d %zmm1, %zmm9, %zmm11
5142 ; AVX512BW-NEXT: movw $8064, %ax # imm = 0x1F80
5143 ; AVX512BW-NEXT: kmovd %eax, %k1
5144 ; AVX512BW-NEXT: vmovdqa64 %zmm4, %zmm0
5145 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
5146 ; AVX512BW-NEXT: vmovdqa32 %zmm1, %zmm0 {%k1}
5147 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
5148 ; AVX512BW-NEXT: vmovdqa32 %zmm1, %zmm31 {%k1}
5149 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
5150 ; AVX512BW-NEXT: vmovdqa32 %zmm1, %zmm2 {%k1}
5151 ; AVX512BW-NEXT: vmovdqa32 %zmm19, %zmm15 {%k1}
5152 ; AVX512BW-NEXT: vmovdqa64 256(%rdi), %zmm1
5153 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm4 = [0,1,2,3,4,5,6,7,8,9,10,11,12,17,22,27]
5154 ; AVX512BW-NEXT: vpermt2d %zmm1, %zmm4, %zmm0
5155 ; AVX512BW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
5156 ; AVX512BW-NEXT: vmovdqa64 576(%rdi), %zmm9
5157 ; AVX512BW-NEXT: vpermt2d %zmm9, %zmm4, %zmm31
5158 ; AVX512BW-NEXT: vmovdqa64 896(%rdi), %zmm19
5159 ; AVX512BW-NEXT: vpermt2d %zmm19, %zmm4, %zmm2
5160 ; AVX512BW-NEXT: vmovdqa64 1216(%rdi), %zmm0
5161 ; AVX512BW-NEXT: vpermt2d %zmm0, %zmm4, %zmm15
5162 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
5163 ; AVX512BW-NEXT: vmovdqa32 %zmm4, %zmm24 {%k1}
5164 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
5165 ; AVX512BW-NEXT: vmovdqa32 %zmm4, %zmm3 {%k1}
5166 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
5167 ; AVX512BW-NEXT: vmovdqa32 %zmm4, %zmm20 {%k1}
5168 ; AVX512BW-NEXT: vmovdqa32 %zmm16, %zmm12 {%k1}
5169 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm4 = [0,1,2,3,4,5,6,7,8,9,10,11,12,18,23,28]
5170 ; AVX512BW-NEXT: vpermt2d %zmm1, %zmm4, %zmm24
5171 ; AVX512BW-NEXT: vpermt2d %zmm19, %zmm4, %zmm3
5172 ; AVX512BW-NEXT: vpermt2d %zmm9, %zmm4, %zmm20
5173 ; AVX512BW-NEXT: vpermt2d %zmm0, %zmm4, %zmm12
5174 ; AVX512BW-NEXT: movb $7, %al
5175 ; AVX512BW-NEXT: kmovd %eax, %k1
5176 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
5177 ; AVX512BW-NEXT: vmovdqa64 %zmm4, %zmm23 {%k1}
5178 ; AVX512BW-NEXT: vmovdqa64 %zmm6, %zmm18 {%k1}
5179 ; AVX512BW-NEXT: vmovdqa64 %zmm8, %zmm22 {%k1}
5180 ; AVX512BW-NEXT: vmovdqa64 %zmm25, %zmm14 {%k1}
5181 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm4 = [0,1,2,3,4,5,6,7,8,9,10,11,12,19,24,29]
5182 ; AVX512BW-NEXT: vpermt2d %zmm1, %zmm4, %zmm23
5183 ; AVX512BW-NEXT: vpermt2d %zmm19, %zmm4, %zmm18
5184 ; AVX512BW-NEXT: vpermt2d %zmm9, %zmm4, %zmm22
5185 ; AVX512BW-NEXT: vpermt2d %zmm0, %zmm4, %zmm14
5186 ; AVX512BW-NEXT: vmovdqa64 %zmm29, %zmm27 {%k1}
5187 ; AVX512BW-NEXT: vmovdqa64 %zmm7, %zmm26 {%k1}
5188 ; AVX512BW-NEXT: vmovdqa64 %zmm10, %zmm30 {%k1}
5189 ; AVX512BW-NEXT: vmovdqa64 %zmm5, %zmm28 {%k1}
5190 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm4 = [0,1,2,3,4,5,6,7,8,9,10,11,12,20,25,30]
5191 ; AVX512BW-NEXT: vpermt2d %zmm1, %zmm4, %zmm27
5192 ; AVX512BW-NEXT: vpermt2d %zmm19, %zmm4, %zmm26
5193 ; AVX512BW-NEXT: vpermt2d %zmm9, %zmm4, %zmm30
5194 ; AVX512BW-NEXT: vpermt2d %zmm0, %zmm4, %zmm28
5195 ; AVX512BW-NEXT: movb $56, %al
5196 ; AVX512BW-NEXT: kmovd %eax, %k1
5197 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
5198 ; AVX512BW-NEXT: vmovdqa64 %zmm4, %zmm13 {%k1}
5199 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm4 = [0,1,2,3,4,5,6,7,8,9,10,11,16,21,26,31]
5200 ; AVX512BW-NEXT: vpermt2d %zmm1, %zmm4, %zmm13
5201 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
5202 ; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm17 {%k1}
5203 ; AVX512BW-NEXT: vpermt2d %zmm9, %zmm4, %zmm17
5204 ; AVX512BW-NEXT: vmovdqu64 (%rsp), %zmm1 # 64-byte Reload
5205 ; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm21 {%k1}
5206 ; AVX512BW-NEXT: vpermt2d %zmm0, %zmm4, %zmm21
5207 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
5208 ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm11 {%k1}
5209 ; AVX512BW-NEXT: vpermt2d %zmm19, %zmm4, %zmm11
5210 ; AVX512BW-NEXT: vmovdqa64 %zmm15, 192(%rsi)
5211 ; AVX512BW-NEXT: vmovdqa64 %zmm2, 128(%rsi)
5212 ; AVX512BW-NEXT: vmovdqa64 %zmm31, 64(%rsi)
5213 ; AVX512BW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
5214 ; AVX512BW-NEXT: vmovaps %zmm0, (%rsi)
5215 ; AVX512BW-NEXT: vmovdqa64 %zmm12, 192(%rdx)
5216 ; AVX512BW-NEXT: vmovdqa64 %zmm24, (%rdx)
5217 ; AVX512BW-NEXT: vmovdqa64 %zmm20, 64(%rdx)
5218 ; AVX512BW-NEXT: vmovdqa64 %zmm3, 128(%rdx)
5219 ; AVX512BW-NEXT: vmovdqa64 %zmm14, 192(%rcx)
5220 ; AVX512BW-NEXT: vmovdqa64 %zmm23, (%rcx)
5221 ; AVX512BW-NEXT: vmovdqa64 %zmm22, 64(%rcx)
5222 ; AVX512BW-NEXT: vmovdqa64 %zmm18, 128(%rcx)
5223 ; AVX512BW-NEXT: vmovdqa64 %zmm28, 192(%r8)
5224 ; AVX512BW-NEXT: vmovdqa64 %zmm27, (%r8)
5225 ; AVX512BW-NEXT: vmovdqa64 %zmm30, 64(%r8)
5226 ; AVX512BW-NEXT: vmovdqa64 %zmm26, 128(%r8)
5227 ; AVX512BW-NEXT: vmovdqa64 %zmm11, 128(%r9)
5228 ; AVX512BW-NEXT: vmovdqa64 %zmm21, 192(%r9)
5229 ; AVX512BW-NEXT: vmovdqa64 %zmm13, (%r9)
5230 ; AVX512BW-NEXT: vmovdqa64 %zmm17, 64(%r9)
5231 ; AVX512BW-NEXT: addq $584, %rsp # imm = 0x248
5232 ; AVX512BW-NEXT: vzeroupper
5233 ; AVX512BW-NEXT: retq
5234 %wide.vec = load <320 x i32>, ptr %in.vec, align 64
5235 %strided.vec0 = shufflevector <320 x i32> %wide.vec, <320 x i32> poison, <64 x i32> <i32 0, i32 5, i32 10, i32 15, i32 20, i32 25, i32 30, i32 35, i32 40, i32 45, i32 50, i32 55, i32 60, i32 65, i32 70, i32 75, i32 80, i32 85, i32 90, i32 95, i32 100, i32 105, i32 110, i32 115, i32 120, i32 125, i32 130, i32 135, i32 140, i32 145, i32 150, i32 155, i32 160, i32 165, i32 170, i32 175, i32 180, i32 185, i32 190, i32 195, i32 200, i32 205, i32 210, i32 215, i32 220, i32 225, i32 230, i32 235, i32 240, i32 245, i32 250, i32 255, i32 260, i32 265, i32 270, i32 275, i32 280, i32 285, i32 290, i32 295, i32 300, i32 305, i32 310, i32 315>
5236 %strided.vec1 = shufflevector <320 x i32> %wide.vec, <320 x i32> poison, <64 x i32> <i32 1, i32 6, i32 11, i32 16, i32 21, i32 26, i32 31, i32 36, i32 41, i32 46, i32 51, i32 56, i32 61, i32 66, i32 71, i32 76, i32 81, i32 86, i32 91, i32 96, i32 101, i32 106, i32 111, i32 116, i32 121, i32 126, i32 131, i32 136, i32 141, i32 146, i32 151, i32 156, i32 161, i32 166, i32 171, i32 176, i32 181, i32 186, i32 191, i32 196, i32 201, i32 206, i32 211, i32 216, i32 221, i32 226, i32 231, i32 236, i32 241, i32 246, i32 251, i32 256, i32 261, i32 266, i32 271, i32 276, i32 281, i32 286, i32 291, i32 296, i32 301, i32 306, i32 311, i32 316>
5237 %strided.vec2 = shufflevector <320 x i32> %wide.vec, <320 x i32> poison, <64 x i32> <i32 2, i32 7, i32 12, i32 17, i32 22, i32 27, i32 32, i32 37, i32 42, i32 47, i32 52, i32 57, i32 62, i32 67, i32 72, i32 77, i32 82, i32 87, i32 92, i32 97, i32 102, i32 107, i32 112, i32 117, i32 122, i32 127, i32 132, i32 137, i32 142, i32 147, i32 152, i32 157, i32 162, i32 167, i32 172, i32 177, i32 182, i32 187, i32 192, i32 197, i32 202, i32 207, i32 212, i32 217, i32 222, i32 227, i32 232, i32 237, i32 242, i32 247, i32 252, i32 257, i32 262, i32 267, i32 272, i32 277, i32 282, i32 287, i32 292, i32 297, i32 302, i32 307, i32 312, i32 317>
5238 %strided.vec3 = shufflevector <320 x i32> %wide.vec, <320 x i32> poison, <64 x i32> <i32 3, i32 8, i32 13, i32 18, i32 23, i32 28, i32 33, i32 38, i32 43, i32 48, i32 53, i32 58, i32 63, i32 68, i32 73, i32 78, i32 83, i32 88, i32 93, i32 98, i32 103, i32 108, i32 113, i32 118, i32 123, i32 128, i32 133, i32 138, i32 143, i32 148, i32 153, i32 158, i32 163, i32 168, i32 173, i32 178, i32 183, i32 188, i32 193, i32 198, i32 203, i32 208, i32 213, i32 218, i32 223, i32 228, i32 233, i32 238, i32 243, i32 248, i32 253, i32 258, i32 263, i32 268, i32 273, i32 278, i32 283, i32 288, i32 293, i32 298, i32 303, i32 308, i32 313, i32 318>
5239 %strided.vec4 = shufflevector <320 x i32> %wide.vec, <320 x i32> poison, <64 x i32> <i32 4, i32 9, i32 14, i32 19, i32 24, i32 29, i32 34, i32 39, i32 44, i32 49, i32 54, i32 59, i32 64, i32 69, i32 74, i32 79, i32 84, i32 89, i32 94, i32 99, i32 104, i32 109, i32 114, i32 119, i32 124, i32 129, i32 134, i32 139, i32 144, i32 149, i32 154, i32 159, i32 164, i32 169, i32 174, i32 179, i32 184, i32 189, i32 194, i32 199, i32 204, i32 209, i32 214, i32 219, i32 224, i32 229, i32 234, i32 239, i32 244, i32 249, i32 254, i32 259, i32 264, i32 269, i32 274, i32 279, i32 284, i32 289, i32 294, i32 299, i32 304, i32 309, i32 314, i32 319>
5240 store <64 x i32> %strided.vec0, ptr %out.vec0, align 64
5241 store <64 x i32> %strided.vec1, ptr %out.vec1, align 64
5242 store <64 x i32> %strided.vec2, ptr %out.vec2, align 64
5243 store <64 x i32> %strided.vec3, ptr %out.vec3, align 64
5244 store <64 x i32> %strided.vec4, ptr %out.vec4, align 64
5247 ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
5252 ; AVX2-FAST-PERLANE: {{.*}}
5254 ; AVX512BW-FAST: {{.*}}
5255 ; AVX512BW-ONLY: {{.*}}
5256 ; AVX512BW-ONLY-FAST: {{.*}}
5257 ; AVX512BW-ONLY-SLOW: {{.*}}
5258 ; AVX512BW-SLOW: {{.*}}
5259 ; AVX512DQ-FAST: {{.*}}
5260 ; AVX512DQ-ONLY: {{.*}}
5261 ; AVX512DQ-SLOW: {{.*}}
5262 ; AVX512DQBW-FAST: {{.*}}
5263 ; AVX512DQBW-ONLY: {{.*}}
5264 ; AVX512DQBW-SLOW: {{.*}}
5265 ; AVX512F-FAST: {{.*}}
5266 ; AVX512F-ONLY: {{.*}}
5267 ; AVX512F-ONLY-FAST: {{.*}}
5268 ; AVX512F-ONLY-SLOW: {{.*}}
5269 ; AVX512F-SLOW: {{.*}}
5272 ; FALLBACK10: {{.*}}
5273 ; FALLBACK11: {{.*}}
5274 ; FALLBACK12: {{.*}}