1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+sse2 | FileCheck %s --check-prefixes=SSE
3 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx | FileCheck %s --check-prefixes=AVX
4 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX2
5 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX2-FP
6 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX2-FCP
7 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX512
8 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX512-FCP
9 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX512DQ
10 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX512DQ-FCP
11 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX512BW
12 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX512BW-FCP
13 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX512DQ-BW
14 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX512DQ-BW-FCP
16 ; These patterns are produced by LoopVectorizer for interleaved loads.
18 define void @load_i8_stride4_vf2(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2, ptr %out.vec3) nounwind {
19 ; SSE-LABEL: load_i8_stride4_vf2:
21 ; SSE-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
22 ; SSE-NEXT: movdqa {{.*#+}} xmm1 = [255,0,255,0,255,0,255,0]
23 ; SSE-NEXT: pand %xmm0, %xmm1
24 ; SSE-NEXT: packuswb %xmm1, %xmm1
25 ; SSE-NEXT: packuswb %xmm1, %xmm1
26 ; SSE-NEXT: pxor %xmm2, %xmm2
27 ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm0[1,3,2,3,4,5,6,7]
28 ; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
29 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,2,2,3]
30 ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[1,3,2,3,4,5,6,7]
31 ; SSE-NEXT: packuswb %xmm2, %xmm2
32 ; SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3
33 ; SSE-NEXT: packuswb %xmm3, %xmm3
34 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,1,2,3]
35 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,1,2,3,4,5,6,7]
36 ; SSE-NEXT: packuswb %xmm0, %xmm0
37 ; SSE-NEXT: movd %xmm1, %eax
38 ; SSE-NEXT: movw %ax, (%rsi)
39 ; SSE-NEXT: movd %xmm2, %eax
40 ; SSE-NEXT: movw %ax, (%rdx)
41 ; SSE-NEXT: movd %xmm3, %eax
42 ; SSE-NEXT: movw %ax, (%rcx)
43 ; SSE-NEXT: movd %xmm0, %eax
44 ; SSE-NEXT: movw %ax, (%r8)
47 ; AVX-LABEL: load_i8_stride4_vf2:
49 ; AVX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
50 ; AVX-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[0,4,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
51 ; AVX-NEXT: vpshufb {{.*#+}} xmm2 = xmm0[1,5,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
52 ; AVX-NEXT: vpshufb {{.*#+}} xmm3 = xmm0[2,6,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
53 ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[3,7,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
54 ; AVX-NEXT: vpextrw $0, %xmm1, (%rsi)
55 ; AVX-NEXT: vpextrw $0, %xmm2, (%rdx)
56 ; AVX-NEXT: vpextrw $0, %xmm3, (%rcx)
57 ; AVX-NEXT: vpextrw $0, %xmm0, (%r8)
60 ; AVX2-LABEL: load_i8_stride4_vf2:
62 ; AVX2-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
63 ; AVX2-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[0,4,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
64 ; AVX2-NEXT: vpshufb {{.*#+}} xmm2 = xmm0[1,5,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
65 ; AVX2-NEXT: vpshufb {{.*#+}} xmm3 = xmm0[2,6,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
66 ; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[3,7,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
67 ; AVX2-NEXT: vpextrw $0, %xmm1, (%rsi)
68 ; AVX2-NEXT: vpextrw $0, %xmm2, (%rdx)
69 ; AVX2-NEXT: vpextrw $0, %xmm3, (%rcx)
70 ; AVX2-NEXT: vpextrw $0, %xmm0, (%r8)
73 ; AVX2-FP-LABEL: load_i8_stride4_vf2:
75 ; AVX2-FP-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
76 ; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[0,4,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
77 ; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm2 = xmm0[1,5,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
78 ; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm3 = xmm0[2,6,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
79 ; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[3,7,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
80 ; AVX2-FP-NEXT: vpextrw $0, %xmm1, (%rsi)
81 ; AVX2-FP-NEXT: vpextrw $0, %xmm2, (%rdx)
82 ; AVX2-FP-NEXT: vpextrw $0, %xmm3, (%rcx)
83 ; AVX2-FP-NEXT: vpextrw $0, %xmm0, (%r8)
86 ; AVX2-FCP-LABEL: load_i8_stride4_vf2:
88 ; AVX2-FCP-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
89 ; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[0,4,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
90 ; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm0[1,5,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
91 ; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm0[2,6,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
92 ; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[3,7,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
93 ; AVX2-FCP-NEXT: vpextrw $0, %xmm1, (%rsi)
94 ; AVX2-FCP-NEXT: vpextrw $0, %xmm2, (%rdx)
95 ; AVX2-FCP-NEXT: vpextrw $0, %xmm3, (%rcx)
96 ; AVX2-FCP-NEXT: vpextrw $0, %xmm0, (%r8)
99 ; AVX512-LABEL: load_i8_stride4_vf2:
101 ; AVX512-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
102 ; AVX512-NEXT: vpmovdb %xmm0, %xmm1
103 ; AVX512-NEXT: vpshufb {{.*#+}} xmm2 = xmm0[1,5,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
104 ; AVX512-NEXT: vpshufb {{.*#+}} xmm3 = xmm0[2,6,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
105 ; AVX512-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[3,7,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
106 ; AVX512-NEXT: vpextrw $0, %xmm1, (%rsi)
107 ; AVX512-NEXT: vpextrw $0, %xmm2, (%rdx)
108 ; AVX512-NEXT: vpextrw $0, %xmm3, (%rcx)
109 ; AVX512-NEXT: vpextrw $0, %xmm0, (%r8)
112 ; AVX512-FCP-LABEL: load_i8_stride4_vf2:
113 ; AVX512-FCP: # %bb.0:
114 ; AVX512-FCP-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
115 ; AVX512-FCP-NEXT: vpmovdb %xmm0, %xmm1
116 ; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm0[1,5,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
117 ; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm0[2,6,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
118 ; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[3,7,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
119 ; AVX512-FCP-NEXT: vpextrw $0, %xmm1, (%rsi)
120 ; AVX512-FCP-NEXT: vpextrw $0, %xmm2, (%rdx)
121 ; AVX512-FCP-NEXT: vpextrw $0, %xmm3, (%rcx)
122 ; AVX512-FCP-NEXT: vpextrw $0, %xmm0, (%r8)
123 ; AVX512-FCP-NEXT: retq
125 ; AVX512DQ-LABEL: load_i8_stride4_vf2:
127 ; AVX512DQ-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
128 ; AVX512DQ-NEXT: vpmovdb %xmm0, %xmm1
129 ; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm2 = xmm0[1,5,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
130 ; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm3 = xmm0[2,6,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
131 ; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[3,7,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
132 ; AVX512DQ-NEXT: vpextrw $0, %xmm1, (%rsi)
133 ; AVX512DQ-NEXT: vpextrw $0, %xmm2, (%rdx)
134 ; AVX512DQ-NEXT: vpextrw $0, %xmm3, (%rcx)
135 ; AVX512DQ-NEXT: vpextrw $0, %xmm0, (%r8)
136 ; AVX512DQ-NEXT: retq
138 ; AVX512DQ-FCP-LABEL: load_i8_stride4_vf2:
139 ; AVX512DQ-FCP: # %bb.0:
140 ; AVX512DQ-FCP-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
141 ; AVX512DQ-FCP-NEXT: vpmovdb %xmm0, %xmm1
142 ; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm0[1,5,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
143 ; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm0[2,6,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
144 ; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[3,7,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
145 ; AVX512DQ-FCP-NEXT: vpextrw $0, %xmm1, (%rsi)
146 ; AVX512DQ-FCP-NEXT: vpextrw $0, %xmm2, (%rdx)
147 ; AVX512DQ-FCP-NEXT: vpextrw $0, %xmm3, (%rcx)
148 ; AVX512DQ-FCP-NEXT: vpextrw $0, %xmm0, (%r8)
149 ; AVX512DQ-FCP-NEXT: retq
151 ; AVX512BW-LABEL: load_i8_stride4_vf2:
153 ; AVX512BW-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
154 ; AVX512BW-NEXT: vpmovdb %xmm0, %xmm1
155 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm2 = xmm0[1,5,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
156 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm3 = xmm0[2,6,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
157 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[3,7,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
158 ; AVX512BW-NEXT: vpextrw $0, %xmm1, (%rsi)
159 ; AVX512BW-NEXT: vpextrw $0, %xmm2, (%rdx)
160 ; AVX512BW-NEXT: vpextrw $0, %xmm3, (%rcx)
161 ; AVX512BW-NEXT: vpextrw $0, %xmm0, (%r8)
162 ; AVX512BW-NEXT: retq
164 ; AVX512BW-FCP-LABEL: load_i8_stride4_vf2:
165 ; AVX512BW-FCP: # %bb.0:
166 ; AVX512BW-FCP-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
167 ; AVX512BW-FCP-NEXT: vpmovdb %xmm0, %xmm1
168 ; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm0[1,5,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
169 ; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm0[2,6,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
170 ; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[3,7,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
171 ; AVX512BW-FCP-NEXT: vpextrw $0, %xmm1, (%rsi)
172 ; AVX512BW-FCP-NEXT: vpextrw $0, %xmm2, (%rdx)
173 ; AVX512BW-FCP-NEXT: vpextrw $0, %xmm3, (%rcx)
174 ; AVX512BW-FCP-NEXT: vpextrw $0, %xmm0, (%r8)
175 ; AVX512BW-FCP-NEXT: retq
177 ; AVX512DQ-BW-LABEL: load_i8_stride4_vf2:
178 ; AVX512DQ-BW: # %bb.0:
179 ; AVX512DQ-BW-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
180 ; AVX512DQ-BW-NEXT: vpmovdb %xmm0, %xmm1
181 ; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm2 = xmm0[1,5,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
182 ; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm3 = xmm0[2,6,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
183 ; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[3,7,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
184 ; AVX512DQ-BW-NEXT: vpextrw $0, %xmm1, (%rsi)
185 ; AVX512DQ-BW-NEXT: vpextrw $0, %xmm2, (%rdx)
186 ; AVX512DQ-BW-NEXT: vpextrw $0, %xmm3, (%rcx)
187 ; AVX512DQ-BW-NEXT: vpextrw $0, %xmm0, (%r8)
188 ; AVX512DQ-BW-NEXT: retq
190 ; AVX512DQ-BW-FCP-LABEL: load_i8_stride4_vf2:
191 ; AVX512DQ-BW-FCP: # %bb.0:
192 ; AVX512DQ-BW-FCP-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
193 ; AVX512DQ-BW-FCP-NEXT: vpmovdb %xmm0, %xmm1
194 ; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm0[1,5,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
195 ; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm0[2,6,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
196 ; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[3,7,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
197 ; AVX512DQ-BW-FCP-NEXT: vpextrw $0, %xmm1, (%rsi)
198 ; AVX512DQ-BW-FCP-NEXT: vpextrw $0, %xmm2, (%rdx)
199 ; AVX512DQ-BW-FCP-NEXT: vpextrw $0, %xmm3, (%rcx)
200 ; AVX512DQ-BW-FCP-NEXT: vpextrw $0, %xmm0, (%r8)
201 ; AVX512DQ-BW-FCP-NEXT: retq
202 %wide.vec = load <8 x i8>, ptr %in.vec, align 64
203 %strided.vec0 = shufflevector <8 x i8> %wide.vec, <8 x i8> poison, <2 x i32> <i32 0, i32 4>
204 %strided.vec1 = shufflevector <8 x i8> %wide.vec, <8 x i8> poison, <2 x i32> <i32 1, i32 5>
205 %strided.vec2 = shufflevector <8 x i8> %wide.vec, <8 x i8> poison, <2 x i32> <i32 2, i32 6>
206 %strided.vec3 = shufflevector <8 x i8> %wide.vec, <8 x i8> poison, <2 x i32> <i32 3, i32 7>
207 store <2 x i8> %strided.vec0, ptr %out.vec0, align 64
208 store <2 x i8> %strided.vec1, ptr %out.vec1, align 64
209 store <2 x i8> %strided.vec2, ptr %out.vec2, align 64
210 store <2 x i8> %strided.vec3, ptr %out.vec3, align 64
214 define void @load_i8_stride4_vf4(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2, ptr %out.vec3) nounwind {
215 ; SSE-LABEL: load_i8_stride4_vf4:
217 ; SSE-NEXT: movdqa (%rdi), %xmm1
218 ; SSE-NEXT: movdqa {{.*#+}} xmm0 = [255,0,255,0,255,0,255,0]
219 ; SSE-NEXT: pand %xmm1, %xmm0
220 ; SSE-NEXT: packuswb %xmm0, %xmm0
221 ; SSE-NEXT: packuswb %xmm0, %xmm0
222 ; SSE-NEXT: pxor %xmm2, %xmm2
223 ; SSE-NEXT: movdqa %xmm1, %xmm3
224 ; SSE-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm2[8],xmm3[9],xmm2[9],xmm3[10],xmm2[10],xmm3[11],xmm2[11],xmm3[12],xmm2[12],xmm3[13],xmm2[13],xmm3[14],xmm2[14],xmm3[15],xmm2[15]
225 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm3[0,2,2,3]
226 ; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[1,3,2,3,4,5,6,7]
227 ; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm1[3,1,2,3,4,5,6,7]
228 ; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
229 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm1[0,2,2,3]
230 ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[1,3,2,3,4,5,6,7]
231 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1]
232 ; SSE-NEXT: packuswb %xmm2, %xmm2
233 ; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm5[0,1,2,3,7,5,6,7]
234 ; SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4
235 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,2,2,3]
236 ; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[1,0,3,2,4,5,6,7]
237 ; SSE-NEXT: packuswb %xmm4, %xmm4
238 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[3,1,2,3]
239 ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[3,1,2,3,4,5,6,7]
240 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[3,1,2,3]
241 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[3,1,2,3,4,5,6,7]
242 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
243 ; SSE-NEXT: packuswb %xmm1, %xmm1
244 ; SSE-NEXT: movd %xmm0, (%rsi)
245 ; SSE-NEXT: movd %xmm2, (%rdx)
246 ; SSE-NEXT: movd %xmm4, (%rcx)
247 ; SSE-NEXT: movd %xmm1, (%r8)
250 ; AVX-LABEL: load_i8_stride4_vf4:
252 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
253 ; AVX-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
254 ; AVX-NEXT: vpshufb {{.*#+}} xmm2 = xmm0[1,5,9,13,u,u,u,u,u,u,u,u,u,u,u,u]
255 ; AVX-NEXT: vpshufb {{.*#+}} xmm3 = xmm0[2,6,10,14,u,u,u,u,u,u,u,u,u,u,u,u]
256 ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[3,7,11,15,u,u,u,u,u,u,u,u,u,u,u,u]
257 ; AVX-NEXT: vmovd %xmm1, (%rsi)
258 ; AVX-NEXT: vmovd %xmm2, (%rdx)
259 ; AVX-NEXT: vmovd %xmm3, (%rcx)
260 ; AVX-NEXT: vmovd %xmm0, (%r8)
263 ; AVX2-LABEL: load_i8_stride4_vf4:
265 ; AVX2-NEXT: vmovdqa (%rdi), %xmm0
266 ; AVX2-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
267 ; AVX2-NEXT: vpshufb {{.*#+}} xmm2 = xmm0[1,5,9,13,u,u,u,u,u,u,u,u,u,u,u,u]
268 ; AVX2-NEXT: vpshufb {{.*#+}} xmm3 = xmm0[2,6,10,14,u,u,u,u,u,u,u,u,u,u,u,u]
269 ; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[3,7,11,15,u,u,u,u,u,u,u,u,u,u,u,u]
270 ; AVX2-NEXT: vmovd %xmm1, (%rsi)
271 ; AVX2-NEXT: vmovd %xmm2, (%rdx)
272 ; AVX2-NEXT: vmovd %xmm3, (%rcx)
273 ; AVX2-NEXT: vmovd %xmm0, (%r8)
276 ; AVX2-FP-LABEL: load_i8_stride4_vf4:
278 ; AVX2-FP-NEXT: vmovdqa (%rdi), %xmm0
279 ; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
280 ; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm2 = xmm0[1,5,9,13,u,u,u,u,u,u,u,u,u,u,u,u]
281 ; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm3 = xmm0[2,6,10,14,u,u,u,u,u,u,u,u,u,u,u,u]
282 ; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[3,7,11,15,u,u,u,u,u,u,u,u,u,u,u,u]
283 ; AVX2-FP-NEXT: vmovd %xmm1, (%rsi)
284 ; AVX2-FP-NEXT: vmovd %xmm2, (%rdx)
285 ; AVX2-FP-NEXT: vmovd %xmm3, (%rcx)
286 ; AVX2-FP-NEXT: vmovd %xmm0, (%r8)
289 ; AVX2-FCP-LABEL: load_i8_stride4_vf4:
291 ; AVX2-FCP-NEXT: vmovdqa (%rdi), %xmm0
292 ; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
293 ; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm0[1,5,9,13,u,u,u,u,u,u,u,u,u,u,u,u]
294 ; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm0[2,6,10,14,u,u,u,u,u,u,u,u,u,u,u,u]
295 ; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[3,7,11,15,u,u,u,u,u,u,u,u,u,u,u,u]
296 ; AVX2-FCP-NEXT: vmovd %xmm1, (%rsi)
297 ; AVX2-FCP-NEXT: vmovd %xmm2, (%rdx)
298 ; AVX2-FCP-NEXT: vmovd %xmm3, (%rcx)
299 ; AVX2-FCP-NEXT: vmovd %xmm0, (%r8)
300 ; AVX2-FCP-NEXT: retq
302 ; AVX512-LABEL: load_i8_stride4_vf4:
304 ; AVX512-NEXT: vmovdqa (%rdi), %xmm0
305 ; AVX512-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[1,5,9,13,u,u,u,u,u,u,u,u,u,u,u,u]
306 ; AVX512-NEXT: vpshufb {{.*#+}} xmm2 = xmm0[2,6,10,14,u,u,u,u,u,u,u,u,u,u,u,u]
307 ; AVX512-NEXT: vpshufb {{.*#+}} xmm3 = xmm0[3,7,11,15,u,u,u,u,u,u,u,u,u,u,u,u]
308 ; AVX512-NEXT: vpmovdb %xmm0, (%rsi)
309 ; AVX512-NEXT: vmovd %xmm1, (%rdx)
310 ; AVX512-NEXT: vmovd %xmm2, (%rcx)
311 ; AVX512-NEXT: vmovd %xmm3, (%r8)
314 ; AVX512-FCP-LABEL: load_i8_stride4_vf4:
315 ; AVX512-FCP: # %bb.0:
316 ; AVX512-FCP-NEXT: vmovdqa (%rdi), %xmm0
317 ; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[1,5,9,13,u,u,u,u,u,u,u,u,u,u,u,u]
318 ; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm0[2,6,10,14,u,u,u,u,u,u,u,u,u,u,u,u]
319 ; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm0[3,7,11,15,u,u,u,u,u,u,u,u,u,u,u,u]
320 ; AVX512-FCP-NEXT: vpmovdb %xmm0, (%rsi)
321 ; AVX512-FCP-NEXT: vmovd %xmm1, (%rdx)
322 ; AVX512-FCP-NEXT: vmovd %xmm2, (%rcx)
323 ; AVX512-FCP-NEXT: vmovd %xmm3, (%r8)
324 ; AVX512-FCP-NEXT: retq
326 ; AVX512DQ-LABEL: load_i8_stride4_vf4:
328 ; AVX512DQ-NEXT: vmovdqa (%rdi), %xmm0
329 ; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[1,5,9,13,u,u,u,u,u,u,u,u,u,u,u,u]
330 ; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm2 = xmm0[2,6,10,14,u,u,u,u,u,u,u,u,u,u,u,u]
331 ; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm3 = xmm0[3,7,11,15,u,u,u,u,u,u,u,u,u,u,u,u]
332 ; AVX512DQ-NEXT: vpmovdb %xmm0, (%rsi)
333 ; AVX512DQ-NEXT: vmovd %xmm1, (%rdx)
334 ; AVX512DQ-NEXT: vmovd %xmm2, (%rcx)
335 ; AVX512DQ-NEXT: vmovd %xmm3, (%r8)
336 ; AVX512DQ-NEXT: retq
338 ; AVX512DQ-FCP-LABEL: load_i8_stride4_vf4:
339 ; AVX512DQ-FCP: # %bb.0:
340 ; AVX512DQ-FCP-NEXT: vmovdqa (%rdi), %xmm0
341 ; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[1,5,9,13,u,u,u,u,u,u,u,u,u,u,u,u]
342 ; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm0[2,6,10,14,u,u,u,u,u,u,u,u,u,u,u,u]
343 ; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm0[3,7,11,15,u,u,u,u,u,u,u,u,u,u,u,u]
344 ; AVX512DQ-FCP-NEXT: vpmovdb %xmm0, (%rsi)
345 ; AVX512DQ-FCP-NEXT: vmovd %xmm1, (%rdx)
346 ; AVX512DQ-FCP-NEXT: vmovd %xmm2, (%rcx)
347 ; AVX512DQ-FCP-NEXT: vmovd %xmm3, (%r8)
348 ; AVX512DQ-FCP-NEXT: retq
350 ; AVX512BW-LABEL: load_i8_stride4_vf4:
352 ; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0
353 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[1,5,9,13,u,u,u,u,u,u,u,u,u,u,u,u]
354 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm2 = xmm0[2,6,10,14,u,u,u,u,u,u,u,u,u,u,u,u]
355 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm3 = xmm0[3,7,11,15,u,u,u,u,u,u,u,u,u,u,u,u]
356 ; AVX512BW-NEXT: vpmovdb %xmm0, (%rsi)
357 ; AVX512BW-NEXT: vmovd %xmm1, (%rdx)
358 ; AVX512BW-NEXT: vmovd %xmm2, (%rcx)
359 ; AVX512BW-NEXT: vmovd %xmm3, (%r8)
360 ; AVX512BW-NEXT: retq
362 ; AVX512BW-FCP-LABEL: load_i8_stride4_vf4:
363 ; AVX512BW-FCP: # %bb.0:
364 ; AVX512BW-FCP-NEXT: vmovdqa (%rdi), %xmm0
365 ; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[1,5,9,13,u,u,u,u,u,u,u,u,u,u,u,u]
366 ; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm0[2,6,10,14,u,u,u,u,u,u,u,u,u,u,u,u]
367 ; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm0[3,7,11,15,u,u,u,u,u,u,u,u,u,u,u,u]
368 ; AVX512BW-FCP-NEXT: vpmovdb %xmm0, (%rsi)
369 ; AVX512BW-FCP-NEXT: vmovd %xmm1, (%rdx)
370 ; AVX512BW-FCP-NEXT: vmovd %xmm2, (%rcx)
371 ; AVX512BW-FCP-NEXT: vmovd %xmm3, (%r8)
372 ; AVX512BW-FCP-NEXT: retq
374 ; AVX512DQ-BW-LABEL: load_i8_stride4_vf4:
375 ; AVX512DQ-BW: # %bb.0:
376 ; AVX512DQ-BW-NEXT: vmovdqa (%rdi), %xmm0
377 ; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[1,5,9,13,u,u,u,u,u,u,u,u,u,u,u,u]
378 ; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm2 = xmm0[2,6,10,14,u,u,u,u,u,u,u,u,u,u,u,u]
379 ; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm3 = xmm0[3,7,11,15,u,u,u,u,u,u,u,u,u,u,u,u]
380 ; AVX512DQ-BW-NEXT: vpmovdb %xmm0, (%rsi)
381 ; AVX512DQ-BW-NEXT: vmovd %xmm1, (%rdx)
382 ; AVX512DQ-BW-NEXT: vmovd %xmm2, (%rcx)
383 ; AVX512DQ-BW-NEXT: vmovd %xmm3, (%r8)
384 ; AVX512DQ-BW-NEXT: retq
386 ; AVX512DQ-BW-FCP-LABEL: load_i8_stride4_vf4:
387 ; AVX512DQ-BW-FCP: # %bb.0:
388 ; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdi), %xmm0
389 ; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[1,5,9,13,u,u,u,u,u,u,u,u,u,u,u,u]
390 ; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm0[2,6,10,14,u,u,u,u,u,u,u,u,u,u,u,u]
391 ; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm0[3,7,11,15,u,u,u,u,u,u,u,u,u,u,u,u]
392 ; AVX512DQ-BW-FCP-NEXT: vpmovdb %xmm0, (%rsi)
393 ; AVX512DQ-BW-FCP-NEXT: vmovd %xmm1, (%rdx)
394 ; AVX512DQ-BW-FCP-NEXT: vmovd %xmm2, (%rcx)
395 ; AVX512DQ-BW-FCP-NEXT: vmovd %xmm3, (%r8)
396 ; AVX512DQ-BW-FCP-NEXT: retq
397 %wide.vec = load <16 x i8>, ptr %in.vec, align 64
398 %strided.vec0 = shufflevector <16 x i8> %wide.vec, <16 x i8> poison, <4 x i32> <i32 0, i32 4, i32 8, i32 12>
399 %strided.vec1 = shufflevector <16 x i8> %wide.vec, <16 x i8> poison, <4 x i32> <i32 1, i32 5, i32 9, i32 13>
400 %strided.vec2 = shufflevector <16 x i8> %wide.vec, <16 x i8> poison, <4 x i32> <i32 2, i32 6, i32 10, i32 14>
401 %strided.vec3 = shufflevector <16 x i8> %wide.vec, <16 x i8> poison, <4 x i32> <i32 3, i32 7, i32 11, i32 15>
402 store <4 x i8> %strided.vec0, ptr %out.vec0, align 64
403 store <4 x i8> %strided.vec1, ptr %out.vec1, align 64
404 store <4 x i8> %strided.vec2, ptr %out.vec2, align 64
405 store <4 x i8> %strided.vec3, ptr %out.vec3, align 64
409 define void @load_i8_stride4_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2, ptr %out.vec3) nounwind {
410 ; SSE-LABEL: load_i8_stride4_vf8:
412 ; SSE-NEXT: movdqa (%rdi), %xmm3
413 ; SSE-NEXT: movdqa 16(%rdi), %xmm4
414 ; SSE-NEXT: movdqa {{.*#+}} xmm0 = [255,0,255,0,255,0,255,0]
415 ; SSE-NEXT: movdqa %xmm4, %xmm1
416 ; SSE-NEXT: movdqa %xmm4, %xmm2
417 ; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm4[3,1,2,3,4,5,6,7]
418 ; SSE-NEXT: pand %xmm0, %xmm4
419 ; SSE-NEXT: pand %xmm3, %xmm0
420 ; SSE-NEXT: packuswb %xmm4, %xmm0
421 ; SSE-NEXT: packuswb %xmm0, %xmm0
422 ; SSE-NEXT: pxor %xmm6, %xmm6
423 ; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm6[8],xmm1[9],xmm6[9],xmm1[10],xmm6[10],xmm1[11],xmm6[11],xmm1[12],xmm6[12],xmm1[13],xmm6[13],xmm1[14],xmm6[14],xmm1[15],xmm6[15]
424 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm1[0,2,2,3]
425 ; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[0,1,1,3,4,5,6,7]
426 ; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm6[0],xmm2[1],xmm6[1],xmm2[2],xmm6[2],xmm2[3],xmm6[3],xmm2[4],xmm6[4],xmm2[5],xmm6[5],xmm2[6],xmm6[6],xmm2[7],xmm6[7]
427 ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm2[0,2,2,3]
428 ; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm7[0,1,1,3,4,5,6,7]
429 ; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm4[0],xmm7[1],xmm4[1]
430 ; SSE-NEXT: movdqa %xmm3, %xmm4
431 ; SSE-NEXT: pshuflw {{.*#+}} xmm8 = xmm3[3,1,2,3,4,5,6,7]
432 ; SSE-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm6[8],xmm3[9],xmm6[9],xmm3[10],xmm6[10],xmm3[11],xmm6[11],xmm3[12],xmm6[12],xmm3[13],xmm6[13],xmm3[14],xmm6[14],xmm3[15],xmm6[15]
433 ; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm3[0,2,2,3]
434 ; SSE-NEXT: pshuflw {{.*#+}} xmm9 = xmm9[1,3,2,3,4,5,6,7]
435 ; SSE-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm6[0],xmm4[1],xmm6[1],xmm4[2],xmm6[2],xmm4[3],xmm6[3],xmm4[4],xmm6[4],xmm4[5],xmm6[5],xmm4[6],xmm6[6],xmm4[7],xmm6[7]
436 ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm4[0,2,2,3]
437 ; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[1,3,2,3,4,5,6,7]
438 ; SSE-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm9[0],xmm6[1],xmm9[1]
439 ; SSE-NEXT: packuswb %xmm7, %xmm6
440 ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,3,2,3]
441 ; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,7,5,6,7]
442 ; SSE-NEXT: movdqa {{.*#+}} xmm7 = [255,255,255,255,255,255,255,255]
443 ; SSE-NEXT: pand %xmm7, %xmm5
444 ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,1,2,0]
445 ; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,7,6,5,4]
446 ; SSE-NEXT: pshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,7,5,6,7]
447 ; SSE-NEXT: pand %xmm7, %xmm8
448 ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm8[0,2,2,3]
449 ; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm7[1,0,3,2,4,5,6,7]
450 ; SSE-NEXT: packuswb %xmm5, %xmm7
451 ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm7[0,3,2,3]
452 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[3,1,2,3]
453 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,3,1,4,5,6,7]
454 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[3,1,2,3]
455 ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,1,3,1,4,5,6,7]
456 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
457 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm3[3,1,2,3]
458 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[3,1,2,3,4,5,6,7]
459 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm4[3,1,2,3]
460 ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[3,1,2,3,4,5,6,7]
461 ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
462 ; SSE-NEXT: packuswb %xmm2, %xmm3
463 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm3[0,3,2,3]
464 ; SSE-NEXT: movq %xmm0, (%rsi)
465 ; SSE-NEXT: movq %xmm6, (%rdx)
466 ; SSE-NEXT: movq %xmm5, (%rcx)
467 ; SSE-NEXT: movq %xmm1, (%r8)
470 ; AVX-LABEL: load_i8_stride4_vf8:
472 ; AVX-NEXT: vmovd {{.*#+}} xmm0 = [0,4,8,12,0,0,0,0,0,0,0,0,0,0,0,0]
473 ; AVX-NEXT: vmovdqa (%rdi), %xmm1
474 ; AVX-NEXT: vmovdqa 16(%rdi), %xmm2
475 ; AVX-NEXT: vpshufb %xmm0, %xmm2, %xmm3
476 ; AVX-NEXT: vpshufb %xmm0, %xmm1, %xmm0
477 ; AVX-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
478 ; AVX-NEXT: vmovd {{.*#+}} xmm3 = [1,5,9,13,0,0,0,0,0,0,0,0,0,0,0,0]
479 ; AVX-NEXT: vpshufb %xmm3, %xmm2, %xmm4
480 ; AVX-NEXT: vpshufb %xmm3, %xmm1, %xmm3
481 ; AVX-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
482 ; AVX-NEXT: vmovd {{.*#+}} xmm4 = [2,6,10,14,0,0,0,0,0,0,0,0,0,0,0,0]
483 ; AVX-NEXT: vpshufb %xmm4, %xmm2, %xmm5
484 ; AVX-NEXT: vpshufb %xmm4, %xmm1, %xmm4
485 ; AVX-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
486 ; AVX-NEXT: vmovd {{.*#+}} xmm5 = [3,7,11,15,0,0,0,0,0,0,0,0,0,0,0,0]
487 ; AVX-NEXT: vpshufb %xmm5, %xmm2, %xmm2
488 ; AVX-NEXT: vpshufb %xmm5, %xmm1, %xmm1
489 ; AVX-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
490 ; AVX-NEXT: vmovq %xmm0, (%rsi)
491 ; AVX-NEXT: vmovq %xmm3, (%rdx)
492 ; AVX-NEXT: vmovq %xmm4, (%rcx)
493 ; AVX-NEXT: vmovq %xmm1, (%r8)
496 ; AVX2-LABEL: load_i8_stride4_vf8:
498 ; AVX2-NEXT: vmovd {{.*#+}} xmm0 = [0,4,8,12,0,0,0,0,0,0,0,0,0,0,0,0]
499 ; AVX2-NEXT: vmovdqa (%rdi), %xmm1
500 ; AVX2-NEXT: vmovdqa 16(%rdi), %xmm2
501 ; AVX2-NEXT: vpshufb %xmm0, %xmm2, %xmm3
502 ; AVX2-NEXT: vpshufb %xmm0, %xmm1, %xmm0
503 ; AVX2-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
504 ; AVX2-NEXT: vmovd {{.*#+}} xmm3 = [1,5,9,13,0,0,0,0,0,0,0,0,0,0,0,0]
505 ; AVX2-NEXT: vpshufb %xmm3, %xmm2, %xmm4
506 ; AVX2-NEXT: vpshufb %xmm3, %xmm1, %xmm3
507 ; AVX2-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
508 ; AVX2-NEXT: vmovd {{.*#+}} xmm4 = [2,6,10,14,0,0,0,0,0,0,0,0,0,0,0,0]
509 ; AVX2-NEXT: vpshufb %xmm4, %xmm2, %xmm5
510 ; AVX2-NEXT: vpshufb %xmm4, %xmm1, %xmm4
511 ; AVX2-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
512 ; AVX2-NEXT: vmovd {{.*#+}} xmm5 = [3,7,11,15,0,0,0,0,0,0,0,0,0,0,0,0]
513 ; AVX2-NEXT: vpshufb %xmm5, %xmm2, %xmm2
514 ; AVX2-NEXT: vpshufb %xmm5, %xmm1, %xmm1
515 ; AVX2-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
516 ; AVX2-NEXT: vmovq %xmm0, (%rsi)
517 ; AVX2-NEXT: vmovq %xmm3, (%rdx)
518 ; AVX2-NEXT: vmovq %xmm4, (%rcx)
519 ; AVX2-NEXT: vmovq %xmm1, (%r8)
522 ; AVX2-FP-LABEL: load_i8_stride4_vf8:
524 ; AVX2-FP-NEXT: vmovd {{.*#+}} xmm0 = [0,4,8,12,0,0,0,0,0,0,0,0,0,0,0,0]
525 ; AVX2-FP-NEXT: vmovdqa (%rdi), %xmm1
526 ; AVX2-FP-NEXT: vmovdqa 16(%rdi), %xmm2
527 ; AVX2-FP-NEXT: vpshufb %xmm0, %xmm2, %xmm3
528 ; AVX2-FP-NEXT: vpshufb %xmm0, %xmm1, %xmm0
529 ; AVX2-FP-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
530 ; AVX2-FP-NEXT: vmovd {{.*#+}} xmm3 = [1,5,9,13,0,0,0,0,0,0,0,0,0,0,0,0]
531 ; AVX2-FP-NEXT: vpshufb %xmm3, %xmm2, %xmm4
532 ; AVX2-FP-NEXT: vpshufb %xmm3, %xmm1, %xmm3
533 ; AVX2-FP-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
534 ; AVX2-FP-NEXT: vmovd {{.*#+}} xmm4 = [2,6,10,14,0,0,0,0,0,0,0,0,0,0,0,0]
535 ; AVX2-FP-NEXT: vpshufb %xmm4, %xmm2, %xmm5
536 ; AVX2-FP-NEXT: vpshufb %xmm4, %xmm1, %xmm4
537 ; AVX2-FP-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
538 ; AVX2-FP-NEXT: vmovd {{.*#+}} xmm5 = [3,7,11,15,0,0,0,0,0,0,0,0,0,0,0,0]
539 ; AVX2-FP-NEXT: vpshufb %xmm5, %xmm2, %xmm2
540 ; AVX2-FP-NEXT: vpshufb %xmm5, %xmm1, %xmm1
541 ; AVX2-FP-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
542 ; AVX2-FP-NEXT: vmovq %xmm0, (%rsi)
543 ; AVX2-FP-NEXT: vmovq %xmm3, (%rdx)
544 ; AVX2-FP-NEXT: vmovq %xmm4, (%rcx)
545 ; AVX2-FP-NEXT: vmovq %xmm1, (%r8)
548 ; AVX2-FCP-LABEL: load_i8_stride4_vf8:
550 ; AVX2-FCP-NEXT: vmovd {{.*#+}} xmm0 = [0,4,8,12,0,0,0,0,0,0,0,0,0,0,0,0]
551 ; AVX2-FCP-NEXT: vmovdqa (%rdi), %xmm1
552 ; AVX2-FCP-NEXT: vmovdqa 16(%rdi), %xmm2
553 ; AVX2-FCP-NEXT: vpshufb %xmm0, %xmm2, %xmm3
554 ; AVX2-FCP-NEXT: vpshufb %xmm0, %xmm1, %xmm0
555 ; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
556 ; AVX2-FCP-NEXT: vmovd {{.*#+}} xmm3 = [1,5,9,13,0,0,0,0,0,0,0,0,0,0,0,0]
557 ; AVX2-FCP-NEXT: vpshufb %xmm3, %xmm2, %xmm4
558 ; AVX2-FCP-NEXT: vpshufb %xmm3, %xmm1, %xmm3
559 ; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
560 ; AVX2-FCP-NEXT: vmovd {{.*#+}} xmm4 = [2,6,10,14,0,0,0,0,0,0,0,0,0,0,0,0]
561 ; AVX2-FCP-NEXT: vpshufb %xmm4, %xmm2, %xmm5
562 ; AVX2-FCP-NEXT: vpshufb %xmm4, %xmm1, %xmm4
563 ; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
564 ; AVX2-FCP-NEXT: vmovd {{.*#+}} xmm5 = [3,7,11,15,0,0,0,0,0,0,0,0,0,0,0,0]
565 ; AVX2-FCP-NEXT: vpshufb %xmm5, %xmm2, %xmm2
566 ; AVX2-FCP-NEXT: vpshufb %xmm5, %xmm1, %xmm1
567 ; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
568 ; AVX2-FCP-NEXT: vmovq %xmm0, (%rsi)
569 ; AVX2-FCP-NEXT: vmovq %xmm3, (%rdx)
570 ; AVX2-FCP-NEXT: vmovq %xmm4, (%rcx)
571 ; AVX2-FCP-NEXT: vmovq %xmm1, (%r8)
572 ; AVX2-FCP-NEXT: retq
574 ; AVX512-LABEL: load_i8_stride4_vf8:
576 ; AVX512-NEXT: vmovdqa (%rdi), %ymm0
577 ; AVX512-NEXT: vpsrld $8, %ymm0, %ymm1
578 ; AVX512-NEXT: vpsrld $16, %ymm0, %ymm2
579 ; AVX512-NEXT: vpsrld $24, %ymm0, %ymm3
580 ; AVX512-NEXT: vpmovdb %ymm0, (%rsi)
581 ; AVX512-NEXT: vpmovdb %ymm1, (%rdx)
582 ; AVX512-NEXT: vpmovdb %ymm2, (%rcx)
583 ; AVX512-NEXT: vpmovdb %ymm3, (%r8)
584 ; AVX512-NEXT: vzeroupper
587 ; AVX512-FCP-LABEL: load_i8_stride4_vf8:
588 ; AVX512-FCP: # %bb.0:
589 ; AVX512-FCP-NEXT: vmovdqa (%rdi), %ymm0
590 ; AVX512-FCP-NEXT: vpsrld $8, %ymm0, %ymm1
591 ; AVX512-FCP-NEXT: vpsrld $16, %ymm0, %ymm2
592 ; AVX512-FCP-NEXT: vpsrld $24, %ymm0, %ymm3
593 ; AVX512-FCP-NEXT: vpmovdb %ymm0, (%rsi)
594 ; AVX512-FCP-NEXT: vpmovdb %ymm1, (%rdx)
595 ; AVX512-FCP-NEXT: vpmovdb %ymm2, (%rcx)
596 ; AVX512-FCP-NEXT: vpmovdb %ymm3, (%r8)
597 ; AVX512-FCP-NEXT: vzeroupper
598 ; AVX512-FCP-NEXT: retq
600 ; AVX512DQ-LABEL: load_i8_stride4_vf8:
602 ; AVX512DQ-NEXT: vmovdqa (%rdi), %ymm0
603 ; AVX512DQ-NEXT: vpsrld $8, %ymm0, %ymm1
604 ; AVX512DQ-NEXT: vpsrld $16, %ymm0, %ymm2
605 ; AVX512DQ-NEXT: vpsrld $24, %ymm0, %ymm3
606 ; AVX512DQ-NEXT: vpmovdb %ymm0, (%rsi)
607 ; AVX512DQ-NEXT: vpmovdb %ymm1, (%rdx)
608 ; AVX512DQ-NEXT: vpmovdb %ymm2, (%rcx)
609 ; AVX512DQ-NEXT: vpmovdb %ymm3, (%r8)
610 ; AVX512DQ-NEXT: vzeroupper
611 ; AVX512DQ-NEXT: retq
613 ; AVX512DQ-FCP-LABEL: load_i8_stride4_vf8:
614 ; AVX512DQ-FCP: # %bb.0:
615 ; AVX512DQ-FCP-NEXT: vmovdqa (%rdi), %ymm0
616 ; AVX512DQ-FCP-NEXT: vpsrld $8, %ymm0, %ymm1
617 ; AVX512DQ-FCP-NEXT: vpsrld $16, %ymm0, %ymm2
618 ; AVX512DQ-FCP-NEXT: vpsrld $24, %ymm0, %ymm3
619 ; AVX512DQ-FCP-NEXT: vpmovdb %ymm0, (%rsi)
620 ; AVX512DQ-FCP-NEXT: vpmovdb %ymm1, (%rdx)
621 ; AVX512DQ-FCP-NEXT: vpmovdb %ymm2, (%rcx)
622 ; AVX512DQ-FCP-NEXT: vpmovdb %ymm3, (%r8)
623 ; AVX512DQ-FCP-NEXT: vzeroupper
624 ; AVX512DQ-FCP-NEXT: retq
626 ; AVX512BW-LABEL: load_i8_stride4_vf8:
628 ; AVX512BW-NEXT: vmovdqa (%rdi), %ymm0
629 ; AVX512BW-NEXT: vpsrld $8, %ymm0, %ymm1
630 ; AVX512BW-NEXT: vpsrld $16, %ymm0, %ymm2
631 ; AVX512BW-NEXT: vpsrld $24, %ymm0, %ymm3
632 ; AVX512BW-NEXT: vpmovdb %ymm0, (%rsi)
633 ; AVX512BW-NEXT: vpmovdb %ymm1, (%rdx)
634 ; AVX512BW-NEXT: vpmovdb %ymm2, (%rcx)
635 ; AVX512BW-NEXT: vpmovdb %ymm3, (%r8)
636 ; AVX512BW-NEXT: vzeroupper
637 ; AVX512BW-NEXT: retq
639 ; AVX512BW-FCP-LABEL: load_i8_stride4_vf8:
640 ; AVX512BW-FCP: # %bb.0:
641 ; AVX512BW-FCP-NEXT: vmovdqa (%rdi), %ymm0
642 ; AVX512BW-FCP-NEXT: vpsrld $8, %ymm0, %ymm1
643 ; AVX512BW-FCP-NEXT: vpsrld $16, %ymm0, %ymm2
644 ; AVX512BW-FCP-NEXT: vpsrld $24, %ymm0, %ymm3
645 ; AVX512BW-FCP-NEXT: vpmovdb %ymm0, (%rsi)
646 ; AVX512BW-FCP-NEXT: vpmovdb %ymm1, (%rdx)
647 ; AVX512BW-FCP-NEXT: vpmovdb %ymm2, (%rcx)
648 ; AVX512BW-FCP-NEXT: vpmovdb %ymm3, (%r8)
649 ; AVX512BW-FCP-NEXT: vzeroupper
650 ; AVX512BW-FCP-NEXT: retq
652 ; AVX512DQ-BW-LABEL: load_i8_stride4_vf8:
653 ; AVX512DQ-BW: # %bb.0:
654 ; AVX512DQ-BW-NEXT: vmovdqa (%rdi), %ymm0
655 ; AVX512DQ-BW-NEXT: vpsrld $8, %ymm0, %ymm1
656 ; AVX512DQ-BW-NEXT: vpsrld $16, %ymm0, %ymm2
657 ; AVX512DQ-BW-NEXT: vpsrld $24, %ymm0, %ymm3
658 ; AVX512DQ-BW-NEXT: vpmovdb %ymm0, (%rsi)
659 ; AVX512DQ-BW-NEXT: vpmovdb %ymm1, (%rdx)
660 ; AVX512DQ-BW-NEXT: vpmovdb %ymm2, (%rcx)
661 ; AVX512DQ-BW-NEXT: vpmovdb %ymm3, (%r8)
662 ; AVX512DQ-BW-NEXT: vzeroupper
663 ; AVX512DQ-BW-NEXT: retq
665 ; AVX512DQ-BW-FCP-LABEL: load_i8_stride4_vf8:
666 ; AVX512DQ-BW-FCP: # %bb.0:
667 ; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdi), %ymm0
668 ; AVX512DQ-BW-FCP-NEXT: vpsrld $8, %ymm0, %ymm1
669 ; AVX512DQ-BW-FCP-NEXT: vpsrld $16, %ymm0, %ymm2
670 ; AVX512DQ-BW-FCP-NEXT: vpsrld $24, %ymm0, %ymm3
671 ; AVX512DQ-BW-FCP-NEXT: vpmovdb %ymm0, (%rsi)
672 ; AVX512DQ-BW-FCP-NEXT: vpmovdb %ymm1, (%rdx)
673 ; AVX512DQ-BW-FCP-NEXT: vpmovdb %ymm2, (%rcx)
674 ; AVX512DQ-BW-FCP-NEXT: vpmovdb %ymm3, (%r8)
675 ; AVX512DQ-BW-FCP-NEXT: vzeroupper
676 ; AVX512DQ-BW-FCP-NEXT: retq
677 %wide.vec = load <32 x i8>, ptr %in.vec, align 64
678 %strided.vec0 = shufflevector <32 x i8> %wide.vec, <32 x i8> poison, <8 x i32> <i32 0, i32 4, i32 8, i32 12, i32 16, i32 20, i32 24, i32 28>
679 %strided.vec1 = shufflevector <32 x i8> %wide.vec, <32 x i8> poison, <8 x i32> <i32 1, i32 5, i32 9, i32 13, i32 17, i32 21, i32 25, i32 29>
680 %strided.vec2 = shufflevector <32 x i8> %wide.vec, <32 x i8> poison, <8 x i32> <i32 2, i32 6, i32 10, i32 14, i32 18, i32 22, i32 26, i32 30>
681 %strided.vec3 = shufflevector <32 x i8> %wide.vec, <32 x i8> poison, <8 x i32> <i32 3, i32 7, i32 11, i32 15, i32 19, i32 23, i32 27, i32 31>
682 store <8 x i8> %strided.vec0, ptr %out.vec0, align 64
683 store <8 x i8> %strided.vec1, ptr %out.vec1, align 64
684 store <8 x i8> %strided.vec2, ptr %out.vec2, align 64
685 store <8 x i8> %strided.vec3, ptr %out.vec3, align 64
689 define void @load_i8_stride4_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2, ptr %out.vec3) nounwind {
690 ; SSE-LABEL: load_i8_stride4_vf16:
692 ; SSE-NEXT: movdqa (%rdi), %xmm1
693 ; SSE-NEXT: movdqa 16(%rdi), %xmm3
694 ; SSE-NEXT: movdqa 32(%rdi), %xmm9
695 ; SSE-NEXT: movdqa 48(%rdi), %xmm10
696 ; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,0,255,0,255,0,255,0]
697 ; SSE-NEXT: movdqa %xmm10, %xmm0
698 ; SSE-NEXT: movdqa %xmm10, %xmm4
699 ; SSE-NEXT: movdqa %xmm10, %xmm7
700 ; SSE-NEXT: pand %xmm2, %xmm7
701 ; SSE-NEXT: movdqa %xmm9, %xmm5
702 ; SSE-NEXT: movdqa %xmm9, %xmm6
703 ; SSE-NEXT: movdqa %xmm9, %xmm11
704 ; SSE-NEXT: pand %xmm2, %xmm11
705 ; SSE-NEXT: packuswb %xmm7, %xmm11
706 ; SSE-NEXT: movdqa %xmm3, %xmm7
707 ; SSE-NEXT: movdqa %xmm3, %xmm8
708 ; SSE-NEXT: movdqa %xmm3, %xmm12
709 ; SSE-NEXT: pand %xmm2, %xmm12
710 ; SSE-NEXT: pand %xmm1, %xmm2
711 ; SSE-NEXT: packuswb %xmm12, %xmm2
712 ; SSE-NEXT: packuswb %xmm11, %xmm2
713 ; SSE-NEXT: pxor %xmm11, %xmm11
714 ; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm11[8],xmm0[9],xmm11[9],xmm0[10],xmm11[10],xmm0[11],xmm11[11],xmm0[12],xmm11[12],xmm0[13],xmm11[13],xmm0[14],xmm11[14],xmm0[15],xmm11[15]
715 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
716 ; SSE-NEXT: pshufd {{.*#+}} xmm12 = xmm0[0,2,2,3]
717 ; SSE-NEXT: pshuflw {{.*#+}} xmm12 = xmm12[0,1,1,3,4,5,6,7]
718 ; SSE-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm11[0],xmm4[1],xmm11[1],xmm4[2],xmm11[2],xmm4[3],xmm11[3],xmm4[4],xmm11[4],xmm4[5],xmm11[5],xmm4[6],xmm11[6],xmm4[7],xmm11[7]
719 ; SSE-NEXT: pshufd {{.*#+}} xmm13 = xmm4[0,2,2,3]
720 ; SSE-NEXT: pshuflw {{.*#+}} xmm13 = xmm13[0,1,1,3,4,5,6,7]
721 ; SSE-NEXT: punpckldq {{.*#+}} xmm13 = xmm13[0],xmm12[0],xmm13[1],xmm12[1]
722 ; SSE-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm11[8],xmm5[9],xmm11[9],xmm5[10],xmm11[10],xmm5[11],xmm11[11],xmm5[12],xmm11[12],xmm5[13],xmm11[13],xmm5[14],xmm11[14],xmm5[15],xmm11[15]
723 ; SSE-NEXT: pshufd {{.*#+}} xmm12 = xmm5[0,2,2,3]
724 ; SSE-NEXT: pshuflw {{.*#+}} xmm12 = xmm12[1,3,2,3,4,5,6,7]
725 ; SSE-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm11[0],xmm6[1],xmm11[1],xmm6[2],xmm11[2],xmm6[3],xmm11[3],xmm6[4],xmm11[4],xmm6[5],xmm11[5],xmm6[6],xmm11[6],xmm6[7],xmm11[7]
726 ; SSE-NEXT: pshufd {{.*#+}} xmm14 = xmm6[0,2,2,3]
727 ; SSE-NEXT: pshuflw {{.*#+}} xmm14 = xmm14[1,3,2,3,4,5,6,7]
728 ; SSE-NEXT: punpckldq {{.*#+}} xmm14 = xmm14[0],xmm12[0],xmm14[1],xmm12[1]
729 ; SSE-NEXT: packuswb %xmm13, %xmm14
730 ; SSE-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm11[8],xmm7[9],xmm11[9],xmm7[10],xmm11[10],xmm7[11],xmm11[11],xmm7[12],xmm11[12],xmm7[13],xmm11[13],xmm7[14],xmm11[14],xmm7[15],xmm11[15]
731 ; SSE-NEXT: pshufd {{.*#+}} xmm12 = xmm7[0,2,2,3]
732 ; SSE-NEXT: pshuflw {{.*#+}} xmm12 = xmm12[0,1,1,3,4,5,6,7]
733 ; SSE-NEXT: punpcklbw {{.*#+}} xmm8 = xmm8[0],xmm11[0],xmm8[1],xmm11[1],xmm8[2],xmm11[2],xmm8[3],xmm11[3],xmm8[4],xmm11[4],xmm8[5],xmm11[5],xmm8[6],xmm11[6],xmm8[7],xmm11[7]
734 ; SSE-NEXT: pshufd {{.*#+}} xmm13 = xmm8[0,2,2,3]
735 ; SSE-NEXT: pshuflw {{.*#+}} xmm15 = xmm13[0,1,1,3,4,5,6,7]
736 ; SSE-NEXT: punpckldq {{.*#+}} xmm15 = xmm15[0],xmm12[0],xmm15[1],xmm12[1]
737 ; SSE-NEXT: movdqa %xmm1, %xmm12
738 ; SSE-NEXT: movdqa %xmm1, %xmm13
739 ; SSE-NEXT: punpckhbw {{.*#+}} xmm13 = xmm13[8],xmm11[8],xmm13[9],xmm11[9],xmm13[10],xmm11[10],xmm13[11],xmm11[11],xmm13[12],xmm11[12],xmm13[13],xmm11[13],xmm13[14],xmm11[14],xmm13[15],xmm11[15]
740 ; SSE-NEXT: punpcklbw {{.*#+}} xmm12 = xmm12[0],xmm11[0],xmm12[1],xmm11[1],xmm12[2],xmm11[2],xmm12[3],xmm11[3],xmm12[4],xmm11[4],xmm12[5],xmm11[5],xmm12[6],xmm11[6],xmm12[7],xmm11[7]
741 ; SSE-NEXT: pshufd {{.*#+}} xmm11 = xmm13[0,2,2,3]
742 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm11[1,3,2,3,4,5,6,7]
743 ; SSE-NEXT: pshufd {{.*#+}} xmm11 = xmm12[0,2,2,3]
744 ; SSE-NEXT: pshuflw {{.*#+}} xmm11 = xmm11[1,3,2,3,4,5,6,7]
745 ; SSE-NEXT: punpckldq {{.*#+}} xmm11 = xmm11[0],xmm0[0],xmm11[1],xmm0[1]
746 ; SSE-NEXT: packuswb %xmm15, %xmm11
747 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm10[3,1,2,3,4,5,6,7]
748 ; SSE-NEXT: pshuflw {{.*#+}} xmm9 = xmm9[3,1,2,3,4,5,6,7]
749 ; SSE-NEXT: shufps {{.*#+}} xmm11 = xmm11[0,3],xmm14[0,3]
750 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,5,6,7]
751 ; SSE-NEXT: movdqa {{.*#+}} xmm10 = [255,255,255,255,255,255,255,255]
752 ; SSE-NEXT: pand %xmm10, %xmm0
753 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,0]
754 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,6,5,4]
755 ; SSE-NEXT: pshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,7,5,6,7]
756 ; SSE-NEXT: pand %xmm10, %xmm9
757 ; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm9[0,2,2,3]
758 ; SSE-NEXT: pshuflw {{.*#+}} xmm9 = xmm9[1,0,3,2,4,5,6,7]
759 ; SSE-NEXT: packuswb %xmm0, %xmm9
760 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm3[3,1,2,3,4,5,6,7]
761 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[3,1,2,3,4,5,6,7]
762 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,5,6,7]
763 ; SSE-NEXT: pand %xmm10, %xmm0
764 ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,5,6,7]
765 ; SSE-NEXT: pand %xmm10, %xmm1
766 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,0]
767 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,6,5,4]
768 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
769 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[1,0,3,2,4,5,6,7]
770 ; SSE-NEXT: packuswb %xmm0, %xmm1
771 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm9[0,3]
772 ; SSE-NEXT: pshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
773 ; SSE-NEXT: # xmm0 = mem[3,1,2,3]
774 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,3,1,4,5,6,7]
775 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm4[3,1,2,3]
776 ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,1,3,1,4,5,6,7]
777 ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1]
778 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm5[3,1,2,3]
779 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,1,2,3,4,5,6,7]
780 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm6[3,1,2,3]
781 ; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[3,1,2,3,4,5,6,7]
782 ; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1]
783 ; SSE-NEXT: packuswb %xmm3, %xmm4
784 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm7[3,1,2,3]
785 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,3,1,4,5,6,7]
786 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm8[3,1,2,3]
787 ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,1,3,1,4,5,6,7]
788 ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1]
789 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm13[3,1,2,3]
790 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,1,2,3,4,5,6,7]
791 ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm12[3,1,2,3]
792 ; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[3,1,2,3,4,5,6,7]
793 ; SSE-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1]
794 ; SSE-NEXT: packuswb %xmm3, %xmm5
795 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,3],xmm4[0,3]
796 ; SSE-NEXT: movdqa %xmm2, (%rsi)
797 ; SSE-NEXT: movaps %xmm11, (%rdx)
798 ; SSE-NEXT: movaps %xmm1, (%rcx)
799 ; SSE-NEXT: movaps %xmm5, (%r8)
802 ; AVX-LABEL: load_i8_stride4_vf16:
804 ; AVX-NEXT: vbroadcastss {{.*#+}} xmm2 = [0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12]
805 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
806 ; AVX-NEXT: vmovdqa 16(%rdi), %xmm1
807 ; AVX-NEXT: vmovdqa 32(%rdi), %xmm3
808 ; AVX-NEXT: vmovdqa 48(%rdi), %xmm4
809 ; AVX-NEXT: vpshufb %xmm2, %xmm4, %xmm5
810 ; AVX-NEXT: vpshufb %xmm2, %xmm3, %xmm2
811 ; AVX-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1]
812 ; AVX-NEXT: vmovd {{.*#+}} xmm5 = [0,4,8,12,0,0,0,0,0,0,0,0,0,0,0,0]
813 ; AVX-NEXT: vpshufb %xmm5, %xmm1, %xmm6
814 ; AVX-NEXT: vpshufb %xmm5, %xmm0, %xmm5
815 ; AVX-NEXT: vpunpckldq {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1]
816 ; AVX-NEXT: vpblendw {{.*#+}} xmm2 = xmm5[0,1,2,3],xmm2[4,5,6,7]
817 ; AVX-NEXT: vbroadcastss {{.*#+}} xmm5 = [1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13]
818 ; AVX-NEXT: vpshufb %xmm5, %xmm4, %xmm6
819 ; AVX-NEXT: vpshufb %xmm5, %xmm3, %xmm5
820 ; AVX-NEXT: vpunpckldq {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1]
821 ; AVX-NEXT: vmovd {{.*#+}} xmm6 = [1,5,9,13,0,0,0,0,0,0,0,0,0,0,0,0]
822 ; AVX-NEXT: vpshufb %xmm6, %xmm1, %xmm7
823 ; AVX-NEXT: vpshufb %xmm6, %xmm0, %xmm6
824 ; AVX-NEXT: vpunpckldq {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1]
825 ; AVX-NEXT: vpblendw {{.*#+}} xmm5 = xmm6[0,1,2,3],xmm5[4,5,6,7]
826 ; AVX-NEXT: vbroadcastss {{.*#+}} xmm6 = [2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14]
827 ; AVX-NEXT: vpshufb %xmm6, %xmm4, %xmm7
828 ; AVX-NEXT: vpshufb %xmm6, %xmm3, %xmm6
829 ; AVX-NEXT: vpunpckldq {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1]
830 ; AVX-NEXT: vmovd {{.*#+}} xmm7 = [2,6,10,14,0,0,0,0,0,0,0,0,0,0,0,0]
831 ; AVX-NEXT: vpshufb %xmm7, %xmm1, %xmm8
832 ; AVX-NEXT: vpshufb %xmm7, %xmm0, %xmm7
833 ; AVX-NEXT: vpunpckldq {{.*#+}} xmm7 = xmm7[0],xmm8[0],xmm7[1],xmm8[1]
834 ; AVX-NEXT: vpblendw {{.*#+}} xmm6 = xmm7[0,1,2,3],xmm6[4,5,6,7]
835 ; AVX-NEXT: vbroadcastss {{.*#+}} xmm7 = [3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15]
836 ; AVX-NEXT: vpshufb %xmm7, %xmm4, %xmm4
837 ; AVX-NEXT: vpshufb %xmm7, %xmm3, %xmm3
838 ; AVX-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
839 ; AVX-NEXT: vmovd {{.*#+}} xmm4 = [3,7,11,15,0,0,0,0,0,0,0,0,0,0,0,0]
840 ; AVX-NEXT: vpshufb %xmm4, %xmm1, %xmm1
841 ; AVX-NEXT: vpshufb %xmm4, %xmm0, %xmm0
842 ; AVX-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
843 ; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm3[4,5,6,7]
844 ; AVX-NEXT: vmovdqa %xmm2, (%rsi)
845 ; AVX-NEXT: vmovdqa %xmm5, (%rdx)
846 ; AVX-NEXT: vmovdqa %xmm6, (%rcx)
847 ; AVX-NEXT: vmovdqa %xmm0, (%r8)
850 ; AVX2-LABEL: load_i8_stride4_vf16:
852 ; AVX2-NEXT: vmovdqa (%rdi), %xmm0
853 ; AVX2-NEXT: vmovdqa 16(%rdi), %xmm1
854 ; AVX2-NEXT: vmovdqa 32(%rdi), %xmm2
855 ; AVX2-NEXT: vmovdqa 48(%rdi), %xmm3
856 ; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm4 = [0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12]
857 ; AVX2-NEXT: vpshufb %xmm4, %xmm3, %xmm5
858 ; AVX2-NEXT: vpshufb %xmm4, %xmm2, %xmm4
859 ; AVX2-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
860 ; AVX2-NEXT: vmovd {{.*#+}} xmm5 = [0,4,8,12,0,0,0,0,0,0,0,0,0,0,0,0]
861 ; AVX2-NEXT: vpshufb %xmm5, %xmm1, %xmm6
862 ; AVX2-NEXT: vpshufb %xmm5, %xmm0, %xmm5
863 ; AVX2-NEXT: vpunpckldq {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1]
864 ; AVX2-NEXT: vpblendd {{.*#+}} xmm4 = xmm5[0,1],xmm4[2,3]
865 ; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm5 = [1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13]
866 ; AVX2-NEXT: vpshufb %xmm5, %xmm3, %xmm6
867 ; AVX2-NEXT: vpshufb %xmm5, %xmm2, %xmm5
868 ; AVX2-NEXT: vpunpckldq {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1]
869 ; AVX2-NEXT: vmovd {{.*#+}} xmm6 = [1,5,9,13,0,0,0,0,0,0,0,0,0,0,0,0]
870 ; AVX2-NEXT: vpshufb %xmm6, %xmm1, %xmm7
871 ; AVX2-NEXT: vpshufb %xmm6, %xmm0, %xmm6
872 ; AVX2-NEXT: vpunpckldq {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1]
873 ; AVX2-NEXT: vpblendd {{.*#+}} xmm5 = xmm6[0,1],xmm5[2,3]
874 ; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm6 = [2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14]
875 ; AVX2-NEXT: vpshufb %xmm6, %xmm3, %xmm7
876 ; AVX2-NEXT: vpshufb %xmm6, %xmm2, %xmm6
877 ; AVX2-NEXT: vpunpckldq {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1]
878 ; AVX2-NEXT: vmovd {{.*#+}} xmm7 = [2,6,10,14,0,0,0,0,0,0,0,0,0,0,0,0]
879 ; AVX2-NEXT: vpshufb %xmm7, %xmm1, %xmm8
880 ; AVX2-NEXT: vpshufb %xmm7, %xmm0, %xmm7
881 ; AVX2-NEXT: vpunpckldq {{.*#+}} xmm7 = xmm7[0],xmm8[0],xmm7[1],xmm8[1]
882 ; AVX2-NEXT: vpblendd {{.*#+}} xmm6 = xmm7[0,1],xmm6[2,3]
883 ; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm7 = [3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15]
884 ; AVX2-NEXT: vpshufb %xmm7, %xmm3, %xmm3
885 ; AVX2-NEXT: vpshufb %xmm7, %xmm2, %xmm2
886 ; AVX2-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
887 ; AVX2-NEXT: vmovd {{.*#+}} xmm3 = [3,7,11,15,0,0,0,0,0,0,0,0,0,0,0,0]
888 ; AVX2-NEXT: vpshufb %xmm3, %xmm1, %xmm1
889 ; AVX2-NEXT: vpshufb %xmm3, %xmm0, %xmm0
890 ; AVX2-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
891 ; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3]
892 ; AVX2-NEXT: vmovdqa %xmm4, (%rsi)
893 ; AVX2-NEXT: vmovdqa %xmm5, (%rdx)
894 ; AVX2-NEXT: vmovdqa %xmm6, (%rcx)
895 ; AVX2-NEXT: vmovdqa %xmm0, (%r8)
898 ; AVX2-FP-LABEL: load_i8_stride4_vf16:
900 ; AVX2-FP-NEXT: vmovdqa (%rdi), %xmm0
901 ; AVX2-FP-NEXT: vmovdqa 16(%rdi), %xmm1
902 ; AVX2-FP-NEXT: vmovdqa 32(%rdi), %xmm2
903 ; AVX2-FP-NEXT: vmovdqa 48(%rdi), %xmm3
904 ; AVX2-FP-NEXT: vpbroadcastd {{.*#+}} xmm4 = [0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12]
905 ; AVX2-FP-NEXT: vpshufb %xmm4, %xmm3, %xmm5
906 ; AVX2-FP-NEXT: vpshufb %xmm4, %xmm2, %xmm4
907 ; AVX2-FP-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
908 ; AVX2-FP-NEXT: vmovd {{.*#+}} xmm5 = [0,4,8,12,0,0,0,0,0,0,0,0,0,0,0,0]
909 ; AVX2-FP-NEXT: vpshufb %xmm5, %xmm1, %xmm6
910 ; AVX2-FP-NEXT: vpshufb %xmm5, %xmm0, %xmm5
911 ; AVX2-FP-NEXT: vpunpckldq {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1]
912 ; AVX2-FP-NEXT: vpblendd {{.*#+}} xmm4 = xmm5[0,1],xmm4[2,3]
913 ; AVX2-FP-NEXT: vpbroadcastd {{.*#+}} xmm5 = [1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13]
914 ; AVX2-FP-NEXT: vpshufb %xmm5, %xmm3, %xmm6
915 ; AVX2-FP-NEXT: vpshufb %xmm5, %xmm2, %xmm5
916 ; AVX2-FP-NEXT: vpunpckldq {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1]
917 ; AVX2-FP-NEXT: vmovd {{.*#+}} xmm6 = [1,5,9,13,0,0,0,0,0,0,0,0,0,0,0,0]
918 ; AVX2-FP-NEXT: vpshufb %xmm6, %xmm1, %xmm7
919 ; AVX2-FP-NEXT: vpshufb %xmm6, %xmm0, %xmm6
920 ; AVX2-FP-NEXT: vpunpckldq {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1]
921 ; AVX2-FP-NEXT: vpblendd {{.*#+}} xmm5 = xmm6[0,1],xmm5[2,3]
922 ; AVX2-FP-NEXT: vpbroadcastd {{.*#+}} xmm6 = [2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14]
923 ; AVX2-FP-NEXT: vpshufb %xmm6, %xmm3, %xmm7
924 ; AVX2-FP-NEXT: vpshufb %xmm6, %xmm2, %xmm6
925 ; AVX2-FP-NEXT: vpunpckldq {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1]
926 ; AVX2-FP-NEXT: vmovd {{.*#+}} xmm7 = [2,6,10,14,0,0,0,0,0,0,0,0,0,0,0,0]
927 ; AVX2-FP-NEXT: vpshufb %xmm7, %xmm1, %xmm8
928 ; AVX2-FP-NEXT: vpshufb %xmm7, %xmm0, %xmm7
929 ; AVX2-FP-NEXT: vpunpckldq {{.*#+}} xmm7 = xmm7[0],xmm8[0],xmm7[1],xmm8[1]
930 ; AVX2-FP-NEXT: vpblendd {{.*#+}} xmm6 = xmm7[0,1],xmm6[2,3]
931 ; AVX2-FP-NEXT: vpbroadcastd {{.*#+}} xmm7 = [3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15]
932 ; AVX2-FP-NEXT: vpshufb %xmm7, %xmm3, %xmm3
933 ; AVX2-FP-NEXT: vpshufb %xmm7, %xmm2, %xmm2
934 ; AVX2-FP-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
935 ; AVX2-FP-NEXT: vmovd {{.*#+}} xmm3 = [3,7,11,15,0,0,0,0,0,0,0,0,0,0,0,0]
936 ; AVX2-FP-NEXT: vpshufb %xmm3, %xmm1, %xmm1
937 ; AVX2-FP-NEXT: vpshufb %xmm3, %xmm0, %xmm0
938 ; AVX2-FP-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
939 ; AVX2-FP-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3]
940 ; AVX2-FP-NEXT: vmovdqa %xmm4, (%rsi)
941 ; AVX2-FP-NEXT: vmovdqa %xmm5, (%rdx)
942 ; AVX2-FP-NEXT: vmovdqa %xmm6, (%rcx)
943 ; AVX2-FP-NEXT: vmovdqa %xmm0, (%r8)
946 ; AVX2-FCP-LABEL: load_i8_stride4_vf16:
948 ; AVX2-FCP-NEXT: vmovdqa (%rdi), %xmm0
949 ; AVX2-FCP-NEXT: vmovdqa 16(%rdi), %xmm1
950 ; AVX2-FCP-NEXT: vmovdqa 32(%rdi), %xmm2
951 ; AVX2-FCP-NEXT: vmovdqa 48(%rdi), %xmm3
952 ; AVX2-FCP-NEXT: vpbroadcastd {{.*#+}} xmm4 = [0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12]
953 ; AVX2-FCP-NEXT: vpshufb %xmm4, %xmm3, %xmm5
954 ; AVX2-FCP-NEXT: vpshufb %xmm4, %xmm2, %xmm4
955 ; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
956 ; AVX2-FCP-NEXT: vmovd {{.*#+}} xmm5 = [0,4,8,12,0,0,0,0,0,0,0,0,0,0,0,0]
957 ; AVX2-FCP-NEXT: vpshufb %xmm5, %xmm1, %xmm6
958 ; AVX2-FCP-NEXT: vpshufb %xmm5, %xmm0, %xmm5
959 ; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1]
960 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm4 = xmm5[0,1],xmm4[2,3]
961 ; AVX2-FCP-NEXT: vpbroadcastd {{.*#+}} xmm5 = [1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13]
962 ; AVX2-FCP-NEXT: vpshufb %xmm5, %xmm3, %xmm6
963 ; AVX2-FCP-NEXT: vpshufb %xmm5, %xmm2, %xmm5
964 ; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1]
965 ; AVX2-FCP-NEXT: vmovd {{.*#+}} xmm6 = [1,5,9,13,0,0,0,0,0,0,0,0,0,0,0,0]
966 ; AVX2-FCP-NEXT: vpshufb %xmm6, %xmm1, %xmm7
967 ; AVX2-FCP-NEXT: vpshufb %xmm6, %xmm0, %xmm6
968 ; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1]
969 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm5 = xmm6[0,1],xmm5[2,3]
970 ; AVX2-FCP-NEXT: vpbroadcastd {{.*#+}} xmm6 = [2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14]
971 ; AVX2-FCP-NEXT: vpshufb %xmm6, %xmm3, %xmm7
972 ; AVX2-FCP-NEXT: vpshufb %xmm6, %xmm2, %xmm6
973 ; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1]
974 ; AVX2-FCP-NEXT: vmovd {{.*#+}} xmm7 = [2,6,10,14,0,0,0,0,0,0,0,0,0,0,0,0]
975 ; AVX2-FCP-NEXT: vpshufb %xmm7, %xmm1, %xmm8
976 ; AVX2-FCP-NEXT: vpshufb %xmm7, %xmm0, %xmm7
977 ; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm7 = xmm7[0],xmm8[0],xmm7[1],xmm8[1]
978 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm6 = xmm7[0,1],xmm6[2,3]
979 ; AVX2-FCP-NEXT: vpbroadcastd {{.*#+}} xmm7 = [3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15]
980 ; AVX2-FCP-NEXT: vpshufb %xmm7, %xmm3, %xmm3
981 ; AVX2-FCP-NEXT: vpshufb %xmm7, %xmm2, %xmm2
982 ; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
983 ; AVX2-FCP-NEXT: vmovd {{.*#+}} xmm3 = [3,7,11,15,0,0,0,0,0,0,0,0,0,0,0,0]
984 ; AVX2-FCP-NEXT: vpshufb %xmm3, %xmm1, %xmm1
985 ; AVX2-FCP-NEXT: vpshufb %xmm3, %xmm0, %xmm0
986 ; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
987 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3]
988 ; AVX2-FCP-NEXT: vmovdqa %xmm4, (%rsi)
989 ; AVX2-FCP-NEXT: vmovdqa %xmm5, (%rdx)
990 ; AVX2-FCP-NEXT: vmovdqa %xmm6, (%rcx)
991 ; AVX2-FCP-NEXT: vmovdqa %xmm0, (%r8)
992 ; AVX2-FCP-NEXT: retq
994 ; AVX512-LABEL: load_i8_stride4_vf16:
996 ; AVX512-NEXT: vmovdqa64 (%rdi), %zmm0
997 ; AVX512-NEXT: vpsrld $8, %zmm0, %zmm1
998 ; AVX512-NEXT: vpsrld $16, %zmm0, %zmm2
999 ; AVX512-NEXT: vpsrld $24, %zmm0, %zmm3
1000 ; AVX512-NEXT: vpmovdb %zmm0, (%rsi)
1001 ; AVX512-NEXT: vpmovdb %zmm1, (%rdx)
1002 ; AVX512-NEXT: vpmovdb %zmm2, (%rcx)
1003 ; AVX512-NEXT: vpmovdb %zmm3, (%r8)
1004 ; AVX512-NEXT: vzeroupper
1007 ; AVX512-FCP-LABEL: load_i8_stride4_vf16:
1008 ; AVX512-FCP: # %bb.0:
1009 ; AVX512-FCP-NEXT: vmovdqa64 (%rdi), %zmm0
1010 ; AVX512-FCP-NEXT: vpsrld $8, %zmm0, %zmm1
1011 ; AVX512-FCP-NEXT: vpsrld $16, %zmm0, %zmm2
1012 ; AVX512-FCP-NEXT: vpsrld $24, %zmm0, %zmm3
1013 ; AVX512-FCP-NEXT: vpmovdb %zmm0, (%rsi)
1014 ; AVX512-FCP-NEXT: vpmovdb %zmm1, (%rdx)
1015 ; AVX512-FCP-NEXT: vpmovdb %zmm2, (%rcx)
1016 ; AVX512-FCP-NEXT: vpmovdb %zmm3, (%r8)
1017 ; AVX512-FCP-NEXT: vzeroupper
1018 ; AVX512-FCP-NEXT: retq
1020 ; AVX512DQ-LABEL: load_i8_stride4_vf16:
1021 ; AVX512DQ: # %bb.0:
1022 ; AVX512DQ-NEXT: vmovdqa64 (%rdi), %zmm0
1023 ; AVX512DQ-NEXT: vpsrld $8, %zmm0, %zmm1
1024 ; AVX512DQ-NEXT: vpsrld $16, %zmm0, %zmm2
1025 ; AVX512DQ-NEXT: vpsrld $24, %zmm0, %zmm3
1026 ; AVX512DQ-NEXT: vpmovdb %zmm0, (%rsi)
1027 ; AVX512DQ-NEXT: vpmovdb %zmm1, (%rdx)
1028 ; AVX512DQ-NEXT: vpmovdb %zmm2, (%rcx)
1029 ; AVX512DQ-NEXT: vpmovdb %zmm3, (%r8)
1030 ; AVX512DQ-NEXT: vzeroupper
1031 ; AVX512DQ-NEXT: retq
1033 ; AVX512DQ-FCP-LABEL: load_i8_stride4_vf16:
1034 ; AVX512DQ-FCP: # %bb.0:
1035 ; AVX512DQ-FCP-NEXT: vmovdqa64 (%rdi), %zmm0
1036 ; AVX512DQ-FCP-NEXT: vpsrld $8, %zmm0, %zmm1
1037 ; AVX512DQ-FCP-NEXT: vpsrld $16, %zmm0, %zmm2
1038 ; AVX512DQ-FCP-NEXT: vpsrld $24, %zmm0, %zmm3
1039 ; AVX512DQ-FCP-NEXT: vpmovdb %zmm0, (%rsi)
1040 ; AVX512DQ-FCP-NEXT: vpmovdb %zmm1, (%rdx)
1041 ; AVX512DQ-FCP-NEXT: vpmovdb %zmm2, (%rcx)
1042 ; AVX512DQ-FCP-NEXT: vpmovdb %zmm3, (%r8)
1043 ; AVX512DQ-FCP-NEXT: vzeroupper
1044 ; AVX512DQ-FCP-NEXT: retq
1046 ; AVX512BW-LABEL: load_i8_stride4_vf16:
1047 ; AVX512BW: # %bb.0:
1048 ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0
1049 ; AVX512BW-NEXT: vpsrld $8, %zmm0, %zmm1
1050 ; AVX512BW-NEXT: vpsrld $16, %zmm0, %zmm2
1051 ; AVX512BW-NEXT: vpsrld $24, %zmm0, %zmm3
1052 ; AVX512BW-NEXT: vpmovdb %zmm0, (%rsi)
1053 ; AVX512BW-NEXT: vpmovdb %zmm1, (%rdx)
1054 ; AVX512BW-NEXT: vpmovdb %zmm2, (%rcx)
1055 ; AVX512BW-NEXT: vpmovdb %zmm3, (%r8)
1056 ; AVX512BW-NEXT: vzeroupper
1057 ; AVX512BW-NEXT: retq
1059 ; AVX512BW-FCP-LABEL: load_i8_stride4_vf16:
1060 ; AVX512BW-FCP: # %bb.0:
1061 ; AVX512BW-FCP-NEXT: vmovdqa64 (%rdi), %zmm0
1062 ; AVX512BW-FCP-NEXT: vpsrld $8, %zmm0, %zmm1
1063 ; AVX512BW-FCP-NEXT: vpsrld $16, %zmm0, %zmm2
1064 ; AVX512BW-FCP-NEXT: vpsrld $24, %zmm0, %zmm3
1065 ; AVX512BW-FCP-NEXT: vpmovdb %zmm0, (%rsi)
1066 ; AVX512BW-FCP-NEXT: vpmovdb %zmm1, (%rdx)
1067 ; AVX512BW-FCP-NEXT: vpmovdb %zmm2, (%rcx)
1068 ; AVX512BW-FCP-NEXT: vpmovdb %zmm3, (%r8)
1069 ; AVX512BW-FCP-NEXT: vzeroupper
1070 ; AVX512BW-FCP-NEXT: retq
1072 ; AVX512DQ-BW-LABEL: load_i8_stride4_vf16:
1073 ; AVX512DQ-BW: # %bb.0:
1074 ; AVX512DQ-BW-NEXT: vmovdqa64 (%rdi), %zmm0
1075 ; AVX512DQ-BW-NEXT: vpsrld $8, %zmm0, %zmm1
1076 ; AVX512DQ-BW-NEXT: vpsrld $16, %zmm0, %zmm2
1077 ; AVX512DQ-BW-NEXT: vpsrld $24, %zmm0, %zmm3
1078 ; AVX512DQ-BW-NEXT: vpmovdb %zmm0, (%rsi)
1079 ; AVX512DQ-BW-NEXT: vpmovdb %zmm1, (%rdx)
1080 ; AVX512DQ-BW-NEXT: vpmovdb %zmm2, (%rcx)
1081 ; AVX512DQ-BW-NEXT: vpmovdb %zmm3, (%r8)
1082 ; AVX512DQ-BW-NEXT: vzeroupper
1083 ; AVX512DQ-BW-NEXT: retq
1085 ; AVX512DQ-BW-FCP-LABEL: load_i8_stride4_vf16:
1086 ; AVX512DQ-BW-FCP: # %bb.0:
1087 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rdi), %zmm0
1088 ; AVX512DQ-BW-FCP-NEXT: vpsrld $8, %zmm0, %zmm1
1089 ; AVX512DQ-BW-FCP-NEXT: vpsrld $16, %zmm0, %zmm2
1090 ; AVX512DQ-BW-FCP-NEXT: vpsrld $24, %zmm0, %zmm3
1091 ; AVX512DQ-BW-FCP-NEXT: vpmovdb %zmm0, (%rsi)
1092 ; AVX512DQ-BW-FCP-NEXT: vpmovdb %zmm1, (%rdx)
1093 ; AVX512DQ-BW-FCP-NEXT: vpmovdb %zmm2, (%rcx)
1094 ; AVX512DQ-BW-FCP-NEXT: vpmovdb %zmm3, (%r8)
1095 ; AVX512DQ-BW-FCP-NEXT: vzeroupper
1096 ; AVX512DQ-BW-FCP-NEXT: retq
1097 %wide.vec = load <64 x i8>, ptr %in.vec, align 64
1098 %strided.vec0 = shufflevector <64 x i8> %wide.vec, <64 x i8> poison, <16 x i32> <i32 0, i32 4, i32 8, i32 12, i32 16, i32 20, i32 24, i32 28, i32 32, i32 36, i32 40, i32 44, i32 48, i32 52, i32 56, i32 60>
1099 %strided.vec1 = shufflevector <64 x i8> %wide.vec, <64 x i8> poison, <16 x i32> <i32 1, i32 5, i32 9, i32 13, i32 17, i32 21, i32 25, i32 29, i32 33, i32 37, i32 41, i32 45, i32 49, i32 53, i32 57, i32 61>
1100 %strided.vec2 = shufflevector <64 x i8> %wide.vec, <64 x i8> poison, <16 x i32> <i32 2, i32 6, i32 10, i32 14, i32 18, i32 22, i32 26, i32 30, i32 34, i32 38, i32 42, i32 46, i32 50, i32 54, i32 58, i32 62>
1101 %strided.vec3 = shufflevector <64 x i8> %wide.vec, <64 x i8> poison, <16 x i32> <i32 3, i32 7, i32 11, i32 15, i32 19, i32 23, i32 27, i32 31, i32 35, i32 39, i32 43, i32 47, i32 51, i32 55, i32 59, i32 63>
1102 store <16 x i8> %strided.vec0, ptr %out.vec0, align 64
1103 store <16 x i8> %strided.vec1, ptr %out.vec1, align 64
1104 store <16 x i8> %strided.vec2, ptr %out.vec2, align 64
1105 store <16 x i8> %strided.vec3, ptr %out.vec3, align 64
1109 define void @load_i8_stride4_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2, ptr %out.vec3) nounwind {
1110 ; SSE-LABEL: load_i8_stride4_vf32:
1112 ; SSE-NEXT: subq $136, %rsp
1113 ; SSE-NEXT: movdqa 64(%rdi), %xmm4
1114 ; SSE-NEXT: movdqa 80(%rdi), %xmm9
1115 ; SSE-NEXT: movdqa 96(%rdi), %xmm11
1116 ; SSE-NEXT: movdqa 112(%rdi), %xmm12
1117 ; SSE-NEXT: movdqa (%rdi), %xmm13
1118 ; SSE-NEXT: movdqa 16(%rdi), %xmm14
1119 ; SSE-NEXT: movdqa 32(%rdi), %xmm2
1120 ; SSE-NEXT: movdqa 48(%rdi), %xmm10
1121 ; SSE-NEXT: movdqa {{.*#+}} xmm8 = [255,0,255,0,255,0,255,0]
1122 ; SSE-NEXT: movdqa %xmm10, %xmm3
1123 ; SSE-NEXT: movdqa %xmm10, %xmm5
1124 ; SSE-NEXT: movdqa %xmm10, %xmm0
1125 ; SSE-NEXT: pand %xmm8, %xmm0
1126 ; SSE-NEXT: movdqa %xmm2, %xmm6
1127 ; SSE-NEXT: movdqa %xmm2, %xmm1
1128 ; SSE-NEXT: movdqa %xmm2, %xmm15
1129 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1130 ; SSE-NEXT: pand %xmm8, %xmm1
1131 ; SSE-NEXT: packuswb %xmm0, %xmm1
1132 ; SSE-NEXT: movdqa %xmm14, %xmm0
1133 ; SSE-NEXT: pand %xmm8, %xmm0
1134 ; SSE-NEXT: movdqa %xmm13, %xmm7
1135 ; SSE-NEXT: pand %xmm8, %xmm7
1136 ; SSE-NEXT: packuswb %xmm0, %xmm7
1137 ; SSE-NEXT: packuswb %xmm1, %xmm7
1138 ; SSE-NEXT: movdqa %xmm12, %xmm0
1139 ; SSE-NEXT: pand %xmm8, %xmm0
1140 ; SSE-NEXT: movdqa %xmm11, %xmm1
1141 ; SSE-NEXT: pand %xmm8, %xmm1
1142 ; SSE-NEXT: packuswb %xmm0, %xmm1
1143 ; SSE-NEXT: movdqa %xmm9, %xmm0
1144 ; SSE-NEXT: pand %xmm8, %xmm0
1145 ; SSE-NEXT: pand %xmm4, %xmm8
1146 ; SSE-NEXT: packuswb %xmm0, %xmm8
1147 ; SSE-NEXT: packuswb %xmm1, %xmm8
1148 ; SSE-NEXT: pxor %xmm2, %xmm2
1149 ; SSE-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm2[8],xmm3[9],xmm2[9],xmm3[10],xmm2[10],xmm3[11],xmm2[11],xmm3[12],xmm2[12],xmm3[13],xmm2[13],xmm3[14],xmm2[14],xmm3[15],xmm2[15]
1150 ; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1151 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm3[0,2,2,3]
1152 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,1,3,4,5,6,7]
1153 ; SSE-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm2[0],xmm5[1],xmm2[1],xmm5[2],xmm2[2],xmm5[3],xmm2[3],xmm5[4],xmm2[4],xmm5[5],xmm2[5],xmm5[6],xmm2[6],xmm5[7],xmm2[7]
1154 ; SSE-NEXT: movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1155 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm5[0,2,2,3]
1156 ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,1,1,3,4,5,6,7]
1157 ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1]
1158 ; SSE-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm2[8],xmm6[9],xmm2[9],xmm6[10],xmm2[10],xmm6[11],xmm2[11],xmm6[12],xmm2[12],xmm6[13],xmm2[13],xmm6[14],xmm2[14],xmm6[15],xmm2[15]
1159 ; SSE-NEXT: movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1160 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm6[0,2,2,3]
1161 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,3,2,3,4,5,6,7]
1162 ; SSE-NEXT: movdqa %xmm15, %xmm1
1163 ; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
1164 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1165 ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm1[0,2,2,3]
1166 ; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[1,3,2,3,4,5,6,7]
1167 ; SSE-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1]
1168 ; SSE-NEXT: packuswb %xmm3, %xmm5
1169 ; SSE-NEXT: movdqa %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1170 ; SSE-NEXT: movdqa %xmm14, %xmm0
1171 ; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm2[8],xmm0[9],xmm2[9],xmm0[10],xmm2[10],xmm0[11],xmm2[11],xmm0[12],xmm2[12],xmm0[13],xmm2[13],xmm0[14],xmm2[14],xmm0[15],xmm2[15]
1172 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1173 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
1174 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,1,3,4,5,6,7]
1175 ; SSE-NEXT: movdqa %xmm14, %xmm1
1176 ; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
1177 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1178 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[0,2,2,3]
1179 ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,1,1,3,4,5,6,7]
1180 ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1]
1181 ; SSE-NEXT: movdqa %xmm13, %xmm14
1182 ; SSE-NEXT: movdqa %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1183 ; SSE-NEXT: movdqa %xmm13, %xmm0
1184 ; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm2[8],xmm0[9],xmm2[9],xmm0[10],xmm2[10],xmm0[11],xmm2[11],xmm0[12],xmm2[12],xmm0[13],xmm2[13],xmm0[14],xmm2[14],xmm0[15],xmm2[15]
1185 ; SSE-NEXT: movdqa %xmm0, (%rsp) # 16-byte Spill
1186 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
1187 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,3,2,3,4,5,6,7]
1188 ; SSE-NEXT: punpcklbw {{.*#+}} xmm14 = xmm14[0],xmm2[0],xmm14[1],xmm2[1],xmm14[2],xmm2[2],xmm14[3],xmm2[3],xmm14[4],xmm2[4],xmm14[5],xmm2[5],xmm14[6],xmm2[6],xmm14[7],xmm2[7]
1189 ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm14[0,2,2,3]
1190 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm6[1,3,2,3,4,5,6,7]
1191 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
1192 ; SSE-NEXT: packuswb %xmm3, %xmm1
1193 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm5[0,3]
1194 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1195 ; SSE-NEXT: movdqa %xmm12, %xmm15
1196 ; SSE-NEXT: movdqa %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1197 ; SSE-NEXT: movdqa %xmm12, %xmm0
1198 ; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm2[8],xmm0[9],xmm2[9],xmm0[10],xmm2[10],xmm0[11],xmm2[11],xmm0[12],xmm2[12],xmm0[13],xmm2[13],xmm0[14],xmm2[14],xmm0[15],xmm2[15]
1199 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1200 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
1201 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,1,3,4,5,6,7]
1202 ; SSE-NEXT: punpcklbw {{.*#+}} xmm15 = xmm15[0],xmm2[0],xmm15[1],xmm2[1],xmm15[2],xmm2[2],xmm15[3],xmm2[3],xmm15[4],xmm2[4],xmm15[5],xmm2[5],xmm15[6],xmm2[6],xmm15[7],xmm2[7]
1203 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm15[0,2,2,3]
1204 ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,1,1,3,4,5,6,7]
1205 ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1]
1206 ; SSE-NEXT: movdqa %xmm11, %xmm12
1207 ; SSE-NEXT: movdqa %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1208 ; SSE-NEXT: movdqa %xmm11, %xmm13
1209 ; SSE-NEXT: punpckhbw {{.*#+}} xmm13 = xmm13[8],xmm2[8],xmm13[9],xmm2[9],xmm13[10],xmm2[10],xmm13[11],xmm2[11],xmm13[12],xmm2[12],xmm13[13],xmm2[13],xmm13[14],xmm2[14],xmm13[15],xmm2[15]
1210 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm13[0,2,2,3]
1211 ; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm0[1,3,2,3,4,5,6,7]
1212 ; SSE-NEXT: punpcklbw {{.*#+}} xmm12 = xmm12[0],xmm2[0],xmm12[1],xmm2[1],xmm12[2],xmm2[2],xmm12[3],xmm2[3],xmm12[4],xmm2[4],xmm12[5],xmm2[5],xmm12[6],xmm2[6],xmm12[7],xmm2[7]
1213 ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm12[0,2,2,3]
1214 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm5[1,3,2,3,4,5,6,7]
1215 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1]
1216 ; SSE-NEXT: packuswb %xmm3, %xmm0
1217 ; SSE-NEXT: movdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1218 ; SSE-NEXT: movdqa %xmm9, %xmm11
1219 ; SSE-NEXT: punpckhbw {{.*#+}} xmm11 = xmm11[8],xmm2[8],xmm11[9],xmm2[9],xmm11[10],xmm2[10],xmm11[11],xmm2[11],xmm11[12],xmm2[12],xmm11[13],xmm2[13],xmm11[14],xmm2[14],xmm11[15],xmm2[15]
1220 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm11[0,2,2,3]
1221 ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,1,1,3,4,5,6,7]
1222 ; SSE-NEXT: punpcklbw {{.*#+}} xmm9 = xmm9[0],xmm2[0],xmm9[1],xmm2[1],xmm9[2],xmm2[2],xmm9[3],xmm2[3],xmm9[4],xmm2[4],xmm9[5],xmm2[5],xmm9[6],xmm2[6],xmm9[7],xmm2[7]
1223 ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm9[0,2,2,3]
1224 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm5[0,1,1,3,4,5,6,7]
1225 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
1226 ; SSE-NEXT: movdqa %xmm4, %xmm5
1227 ; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1228 ; SSE-NEXT: movdqa %xmm4, %xmm6
1229 ; SSE-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm2[8],xmm6[9],xmm2[9],xmm6[10],xmm2[10],xmm6[11],xmm2[11],xmm6[12],xmm2[12],xmm6[13],xmm2[13],xmm6[14],xmm2[14],xmm6[15],xmm2[15]
1230 ; SSE-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm2[0],xmm5[1],xmm2[1],xmm5[2],xmm2[2],xmm5[3],xmm2[3],xmm5[4],xmm2[4],xmm5[5],xmm2[5],xmm5[6],xmm2[6],xmm5[7],xmm2[7]
1231 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm6[0,2,2,3]
1232 ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[1,3,2,3,4,5,6,7]
1233 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm5[0,2,2,3]
1234 ; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm3[1,3,2,3,4,5,6,7]
1235 ; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1]
1236 ; SSE-NEXT: packuswb %xmm1, %xmm4
1237 ; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,3],xmm0[0,3]
1238 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm10[3,1,2,3,4,5,6,7]
1239 ; SSE-NEXT: pshuflw $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
1240 ; SSE-NEXT: # xmm1 = mem[3,1,2,3,4,5,6,7]
1241 ; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm0[0,1,2,3,7,5,6,7]
1242 ; SSE-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255]
1243 ; SSE-NEXT: pand %xmm3, %xmm2
1244 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,2,0]
1245 ; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,6,5,4]
1246 ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,5,6,7]
1247 ; SSE-NEXT: pand %xmm3, %xmm1
1248 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
1249 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[1,0,3,2,4,5,6,7]
1250 ; SSE-NEXT: packuswb %xmm2, %xmm1
1251 ; SSE-NEXT: pshuflw $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
1252 ; SSE-NEXT: # xmm2 = mem[3,1,2,3,4,5,6,7]
1253 ; SSE-NEXT: pshuflw $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Folded Reload
1254 ; SSE-NEXT: # xmm10 = mem[3,1,2,3,4,5,6,7]
1255 ; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,5,6,7]
1256 ; SSE-NEXT: pand %xmm3, %xmm2
1257 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,2,0]
1258 ; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,6,5,4]
1259 ; SSE-NEXT: pshufhw {{.*#+}} xmm10 = xmm10[0,1,2,3,7,5,6,7]
1260 ; SSE-NEXT: pand %xmm3, %xmm10
1261 ; SSE-NEXT: pshufd {{.*#+}} xmm10 = xmm10[0,2,2,3]
1262 ; SSE-NEXT: pshuflw {{.*#+}} xmm10 = xmm10[1,0,3,2,4,5,6,7]
1263 ; SSE-NEXT: packuswb %xmm2, %xmm10
1264 ; SSE-NEXT: shufps {{.*#+}} xmm10 = xmm10[0,3],xmm1[0,3]
1265 ; SSE-NEXT: pshuflw $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
1266 ; SSE-NEXT: # xmm1 = mem[3,1,2,3,4,5,6,7]
1267 ; SSE-NEXT: pshuflw $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
1268 ; SSE-NEXT: # xmm2 = mem[3,1,2,3,4,5,6,7]
1269 ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,5,6,7]
1270 ; SSE-NEXT: pand %xmm3, %xmm1
1271 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,2,0]
1272 ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,6,5,4]
1273 ; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,5,6,7]
1274 ; SSE-NEXT: pand %xmm3, %xmm2
1275 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
1276 ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[1,0,3,2,4,5,6,7]
1277 ; SSE-NEXT: packuswb %xmm1, %xmm2
1278 ; SSE-NEXT: pshuflw $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
1279 ; SSE-NEXT: # xmm1 = mem[3,1,2,3,4,5,6,7]
1280 ; SSE-NEXT: pshuflw $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
1281 ; SSE-NEXT: # xmm0 = mem[3,1,2,3,4,5,6,7]
1282 ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,5,6,7]
1283 ; SSE-NEXT: pand %xmm3, %xmm1
1284 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,5,6,7]
1285 ; SSE-NEXT: pand %xmm3, %xmm0
1286 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,2,0]
1287 ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,6,5,4]
1288 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
1289 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,0,3,2,4,5,6,7]
1290 ; SSE-NEXT: packuswb %xmm1, %xmm0
1291 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,3],xmm2[0,3]
1292 ; SSE-NEXT: pshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
1293 ; SSE-NEXT: # xmm1 = mem[3,1,2,3]
1294 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,3,1,4,5,6,7]
1295 ; SSE-NEXT: pshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
1296 ; SSE-NEXT: # xmm2 = mem[3,1,2,3]
1297 ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,1,3,1,4,5,6,7]
1298 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
1299 ; SSE-NEXT: pshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
1300 ; SSE-NEXT: # xmm1 = mem[3,1,2,3]
1301 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[3,1,2,3,4,5,6,7]
1302 ; SSE-NEXT: pshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
1303 ; SSE-NEXT: # xmm3 = mem[3,1,2,3]
1304 ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[3,1,2,3,4,5,6,7]
1305 ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
1306 ; SSE-NEXT: packuswb %xmm2, %xmm3
1307 ; SSE-NEXT: pshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
1308 ; SSE-NEXT: # xmm1 = mem[3,1,2,3]
1309 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,3,1,4,5,6,7]
1310 ; SSE-NEXT: pshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
1311 ; SSE-NEXT: # xmm2 = mem[3,1,2,3]
1312 ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,1,3,1,4,5,6,7]
1313 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
1314 ; SSE-NEXT: pshufd $231, (%rsp), %xmm1 # 16-byte Folded Reload
1315 ; SSE-NEXT: # xmm1 = mem[3,1,2,3]
1316 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[3,1,2,3,4,5,6,7]
1317 ; SSE-NEXT: pshufd {{.*#+}} xmm14 = xmm14[3,1,2,3]
1318 ; SSE-NEXT: pshuflw {{.*#+}} xmm14 = xmm14[3,1,2,3,4,5,6,7]
1319 ; SSE-NEXT: punpckldq {{.*#+}} xmm14 = xmm14[0],xmm1[0],xmm14[1],xmm1[1]
1320 ; SSE-NEXT: packuswb %xmm2, %xmm14
1321 ; SSE-NEXT: shufps {{.*#+}} xmm14 = xmm14[0,3],xmm3[0,3]
1322 ; SSE-NEXT: pshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
1323 ; SSE-NEXT: # xmm1 = mem[3,1,2,3]
1324 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,3,1,4,5,6,7]
1325 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm15[3,1,2,3]
1326 ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,1,3,1,4,5,6,7]
1327 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
1328 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm13[3,1,2,3]
1329 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[3,1,2,3,4,5,6,7]
1330 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm12[3,1,2,3]
1331 ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[3,1,2,3,4,5,6,7]
1332 ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
1333 ; SSE-NEXT: packuswb %xmm2, %xmm3
1334 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm11[3,1,2,3]
1335 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,3,1,4,5,6,7]
1336 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm9[3,1,2,3]
1337 ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,1,3,1,4,5,6,7]
1338 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
1339 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm6[3,1,2,3]
1340 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[3,1,2,3,4,5,6,7]
1341 ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[3,1,2,3]
1342 ; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[3,1,2,3,4,5,6,7]
1343 ; SSE-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm1[0],xmm5[1],xmm1[1]
1344 ; SSE-NEXT: packuswb %xmm2, %xmm5
1345 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,3],xmm3[0,3]
1346 ; SSE-NEXT: movdqa %xmm8, 16(%rsi)
1347 ; SSE-NEXT: movdqa %xmm7, (%rsi)
1348 ; SSE-NEXT: movaps %xmm4, 16(%rdx)
1349 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
1350 ; SSE-NEXT: movaps %xmm1, (%rdx)
1351 ; SSE-NEXT: movaps %xmm0, 16(%rcx)
1352 ; SSE-NEXT: movaps %xmm10, (%rcx)
1353 ; SSE-NEXT: movaps %xmm5, 16(%r8)
1354 ; SSE-NEXT: movaps %xmm14, (%r8)
1355 ; SSE-NEXT: addq $136, %rsp
1358 ; AVX-LABEL: load_i8_stride4_vf32:
1360 ; AVX-NEXT: vbroadcastss {{.*#+}} xmm8 = [0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12]
1361 ; AVX-NEXT: vmovdqa 112(%rdi), %xmm0
1362 ; AVX-NEXT: vpshufb %xmm8, %xmm0, %xmm1
1363 ; AVX-NEXT: vmovdqa 96(%rdi), %xmm2
1364 ; AVX-NEXT: vpshufb %xmm8, %xmm2, %xmm3
1365 ; AVX-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
1366 ; AVX-NEXT: vmovd {{.*#+}} xmm9 = [0,4,8,12,0,0,0,0,0,0,0,0,0,0,0,0]
1367 ; AVX-NEXT: vmovdqa 80(%rdi), %xmm3
1368 ; AVX-NEXT: vpshufb %xmm9, %xmm3, %xmm4
1369 ; AVX-NEXT: vmovdqa 64(%rdi), %xmm5
1370 ; AVX-NEXT: vpshufb %xmm9, %xmm5, %xmm6
1371 ; AVX-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm6[0],xmm4[0],xmm6[1],xmm4[1]
1372 ; AVX-NEXT: vpblendw {{.*#+}} xmm1 = xmm4[0,1,2,3],xmm1[4,5,6,7]
1373 ; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm10
1374 ; AVX-NEXT: vmovdqa (%rdi), %xmm1
1375 ; AVX-NEXT: vmovdqa 16(%rdi), %xmm4
1376 ; AVX-NEXT: vmovdqa 32(%rdi), %xmm6
1377 ; AVX-NEXT: vmovdqa 48(%rdi), %xmm7
1378 ; AVX-NEXT: vpshufb %xmm8, %xmm7, %xmm11
1379 ; AVX-NEXT: vpshufb %xmm8, %xmm6, %xmm8
1380 ; AVX-NEXT: vpunpckldq {{.*#+}} xmm8 = xmm8[0],xmm11[0],xmm8[1],xmm11[1]
1381 ; AVX-NEXT: vpshufb %xmm9, %xmm4, %xmm11
1382 ; AVX-NEXT: vpshufb %xmm9, %xmm1, %xmm9
1383 ; AVX-NEXT: vpunpckldq {{.*#+}} xmm9 = xmm9[0],xmm11[0],xmm9[1],xmm11[1]
1384 ; AVX-NEXT: vpblendw {{.*#+}} xmm8 = xmm9[0,1,2,3],xmm8[4,5,6,7]
1385 ; AVX-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm10[4,5,6,7]
1386 ; AVX-NEXT: vbroadcastss {{.*#+}} xmm9 = [1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13]
1387 ; AVX-NEXT: vpshufb %xmm9, %xmm0, %xmm10
1388 ; AVX-NEXT: vpshufb %xmm9, %xmm2, %xmm11
1389 ; AVX-NEXT: vpunpckldq {{.*#+}} xmm10 = xmm11[0],xmm10[0],xmm11[1],xmm10[1]
1390 ; AVX-NEXT: vmovd {{.*#+}} xmm11 = [1,5,9,13,0,0,0,0,0,0,0,0,0,0,0,0]
1391 ; AVX-NEXT: vpshufb %xmm11, %xmm3, %xmm12
1392 ; AVX-NEXT: vpshufb %xmm11, %xmm5, %xmm13
1393 ; AVX-NEXT: vpunpckldq {{.*#+}} xmm12 = xmm13[0],xmm12[0],xmm13[1],xmm12[1]
1394 ; AVX-NEXT: vpblendw {{.*#+}} xmm10 = xmm12[0,1,2,3],xmm10[4,5,6,7]
1395 ; AVX-NEXT: vinsertf128 $1, %xmm10, %ymm0, %ymm10
1396 ; AVX-NEXT: vpshufb %xmm9, %xmm7, %xmm12
1397 ; AVX-NEXT: vpshufb %xmm9, %xmm6, %xmm9
1398 ; AVX-NEXT: vpunpckldq {{.*#+}} xmm9 = xmm9[0],xmm12[0],xmm9[1],xmm12[1]
1399 ; AVX-NEXT: vpshufb %xmm11, %xmm4, %xmm12
1400 ; AVX-NEXT: vpshufb %xmm11, %xmm1, %xmm11
1401 ; AVX-NEXT: vpunpckldq {{.*#+}} xmm11 = xmm11[0],xmm12[0],xmm11[1],xmm12[1]
1402 ; AVX-NEXT: vpblendw {{.*#+}} xmm9 = xmm11[0,1,2,3],xmm9[4,5,6,7]
1403 ; AVX-NEXT: vblendps {{.*#+}} ymm9 = ymm9[0,1,2,3],ymm10[4,5,6,7]
1404 ; AVX-NEXT: vbroadcastss {{.*#+}} xmm10 = [2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14]
1405 ; AVX-NEXT: vpshufb %xmm10, %xmm0, %xmm11
1406 ; AVX-NEXT: vpshufb %xmm10, %xmm2, %xmm12
1407 ; AVX-NEXT: vpunpckldq {{.*#+}} xmm11 = xmm12[0],xmm11[0],xmm12[1],xmm11[1]
1408 ; AVX-NEXT: vmovd {{.*#+}} xmm12 = [2,6,10,14,0,0,0,0,0,0,0,0,0,0,0,0]
1409 ; AVX-NEXT: vpshufb %xmm12, %xmm3, %xmm13
1410 ; AVX-NEXT: vpshufb %xmm12, %xmm5, %xmm14
1411 ; AVX-NEXT: vpunpckldq {{.*#+}} xmm13 = xmm14[0],xmm13[0],xmm14[1],xmm13[1]
1412 ; AVX-NEXT: vpblendw {{.*#+}} xmm11 = xmm13[0,1,2,3],xmm11[4,5,6,7]
1413 ; AVX-NEXT: vinsertf128 $1, %xmm11, %ymm0, %ymm11
1414 ; AVX-NEXT: vpshufb %xmm10, %xmm7, %xmm13
1415 ; AVX-NEXT: vpshufb %xmm10, %xmm6, %xmm10
1416 ; AVX-NEXT: vpunpckldq {{.*#+}} xmm10 = xmm10[0],xmm13[0],xmm10[1],xmm13[1]
1417 ; AVX-NEXT: vpshufb %xmm12, %xmm4, %xmm13
1418 ; AVX-NEXT: vpshufb %xmm12, %xmm1, %xmm12
1419 ; AVX-NEXT: vpunpckldq {{.*#+}} xmm12 = xmm12[0],xmm13[0],xmm12[1],xmm13[1]
1420 ; AVX-NEXT: vpblendw {{.*#+}} xmm10 = xmm12[0,1,2,3],xmm10[4,5,6,7]
1421 ; AVX-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0,1,2,3],ymm11[4,5,6,7]
1422 ; AVX-NEXT: vbroadcastss {{.*#+}} xmm11 = [3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15]
1423 ; AVX-NEXT: vpshufb %xmm11, %xmm0, %xmm0
1424 ; AVX-NEXT: vpshufb %xmm11, %xmm2, %xmm2
1425 ; AVX-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
1426 ; AVX-NEXT: vmovd {{.*#+}} xmm2 = [3,7,11,15,0,0,0,0,0,0,0,0,0,0,0,0]
1427 ; AVX-NEXT: vpshufb %xmm2, %xmm3, %xmm3
1428 ; AVX-NEXT: vpshufb %xmm2, %xmm5, %xmm5
1429 ; AVX-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm5[0],xmm3[0],xmm5[1],xmm3[1]
1430 ; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm3[0,1,2,3],xmm0[4,5,6,7]
1431 ; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
1432 ; AVX-NEXT: vpshufb %xmm11, %xmm7, %xmm3
1433 ; AVX-NEXT: vpshufb %xmm11, %xmm6, %xmm5
1434 ; AVX-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm5[0],xmm3[0],xmm5[1],xmm3[1]
1435 ; AVX-NEXT: vpshufb %xmm2, %xmm4, %xmm4
1436 ; AVX-NEXT: vpshufb %xmm2, %xmm1, %xmm1
1437 ; AVX-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1]
1438 ; AVX-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm3[4,5,6,7]
1439 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
1440 ; AVX-NEXT: vmovaps %ymm8, (%rsi)
1441 ; AVX-NEXT: vmovaps %ymm9, (%rdx)
1442 ; AVX-NEXT: vmovaps %ymm10, (%rcx)
1443 ; AVX-NEXT: vmovaps %ymm0, (%r8)
1444 ; AVX-NEXT: vzeroupper
1447 ; AVX2-LABEL: load_i8_stride4_vf32:
1449 ; AVX2-NEXT: vmovdqa 64(%rdi), %ymm0
1450 ; AVX2-NEXT: vmovdqa 96(%rdi), %ymm1
1451 ; AVX2-NEXT: vmovdqa (%rdi), %xmm2
1452 ; AVX2-NEXT: vmovdqa 16(%rdi), %xmm3
1453 ; AVX2-NEXT: vmovdqa 32(%rdi), %xmm4
1454 ; AVX2-NEXT: vmovdqa 48(%rdi), %xmm5
1455 ; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm6 = [0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12]
1456 ; AVX2-NEXT: vpshufb %xmm6, %xmm5, %xmm7
1457 ; AVX2-NEXT: vpshufb %xmm6, %xmm4, %xmm6
1458 ; AVX2-NEXT: vpunpckldq {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1]
1459 ; AVX2-NEXT: vpbroadcastd {{.*#+}} ymm7 = [0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12]
1460 ; AVX2-NEXT: vpshufb %xmm7, %xmm3, %xmm8
1461 ; AVX2-NEXT: vpshufb %xmm7, %xmm2, %xmm9
1462 ; AVX2-NEXT: vpunpckldq {{.*#+}} xmm8 = xmm9[0],xmm8[0],xmm9[1],xmm8[1]
1463 ; AVX2-NEXT: vpblendd {{.*#+}} xmm8 = xmm8[0,1],xmm6[2,3]
1464 ; AVX2-NEXT: vpshufb %ymm7, %ymm1, %ymm9
1465 ; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm6 = [0,4,0,4,0,4,0,4]
1466 ; AVX2-NEXT: vpermd %ymm9, %ymm6, %ymm9
1467 ; AVX2-NEXT: vpshufb %ymm7, %ymm0, %ymm7
1468 ; AVX2-NEXT: vpermd %ymm7, %ymm6, %ymm7
1469 ; AVX2-NEXT: vpblendd {{.*#+}} ymm7 = ymm7[0,1,2,3,4,5],ymm9[6,7]
1470 ; AVX2-NEXT: vpblendd {{.*#+}} ymm7 = ymm8[0,1,2,3],ymm7[4,5,6,7]
1471 ; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm8 = [1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13]
1472 ; AVX2-NEXT: vpshufb %xmm8, %xmm5, %xmm9
1473 ; AVX2-NEXT: vpshufb %xmm8, %xmm4, %xmm8
1474 ; AVX2-NEXT: vpunpckldq {{.*#+}} xmm8 = xmm8[0],xmm9[0],xmm8[1],xmm9[1]
1475 ; AVX2-NEXT: vpbroadcastd {{.*#+}} ymm9 = [1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13]
1476 ; AVX2-NEXT: vpshufb %xmm9, %xmm3, %xmm10
1477 ; AVX2-NEXT: vpshufb %xmm9, %xmm2, %xmm11
1478 ; AVX2-NEXT: vpunpckldq {{.*#+}} xmm10 = xmm11[0],xmm10[0],xmm11[1],xmm10[1]
1479 ; AVX2-NEXT: vpblendd {{.*#+}} xmm8 = xmm10[0,1],xmm8[2,3]
1480 ; AVX2-NEXT: vpshufb %ymm9, %ymm1, %ymm10
1481 ; AVX2-NEXT: vpermd %ymm10, %ymm6, %ymm10
1482 ; AVX2-NEXT: vpshufb %ymm9, %ymm0, %ymm9
1483 ; AVX2-NEXT: vpermd %ymm9, %ymm6, %ymm9
1484 ; AVX2-NEXT: vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3,4,5],ymm10[6,7]
1485 ; AVX2-NEXT: vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm9[4,5,6,7]
1486 ; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm9 = [2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14]
1487 ; AVX2-NEXT: vpshufb %xmm9, %xmm5, %xmm10
1488 ; AVX2-NEXT: vpshufb %xmm9, %xmm4, %xmm9
1489 ; AVX2-NEXT: vpunpckldq {{.*#+}} xmm9 = xmm9[0],xmm10[0],xmm9[1],xmm10[1]
1490 ; AVX2-NEXT: vpbroadcastd {{.*#+}} ymm10 = [2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14]
1491 ; AVX2-NEXT: vpshufb %xmm10, %xmm3, %xmm11
1492 ; AVX2-NEXT: vpshufb %xmm10, %xmm2, %xmm12
1493 ; AVX2-NEXT: vpunpckldq {{.*#+}} xmm11 = xmm12[0],xmm11[0],xmm12[1],xmm11[1]
1494 ; AVX2-NEXT: vpblendd {{.*#+}} xmm9 = xmm11[0,1],xmm9[2,3]
1495 ; AVX2-NEXT: vpshufb %ymm10, %ymm1, %ymm11
1496 ; AVX2-NEXT: vpermd %ymm11, %ymm6, %ymm11
1497 ; AVX2-NEXT: vpshufb %ymm10, %ymm0, %ymm10
1498 ; AVX2-NEXT: vpermd %ymm10, %ymm6, %ymm10
1499 ; AVX2-NEXT: vpblendd {{.*#+}} ymm10 = ymm10[0,1,2,3,4,5],ymm11[6,7]
1500 ; AVX2-NEXT: vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3],ymm10[4,5,6,7]
1501 ; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm10 = [3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15]
1502 ; AVX2-NEXT: vpshufb %xmm10, %xmm5, %xmm5
1503 ; AVX2-NEXT: vpshufb %xmm10, %xmm4, %xmm4
1504 ; AVX2-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
1505 ; AVX2-NEXT: vpbroadcastd {{.*#+}} ymm5 = [3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15]
1506 ; AVX2-NEXT: vpshufb %xmm5, %xmm3, %xmm3
1507 ; AVX2-NEXT: vpshufb %xmm5, %xmm2, %xmm2
1508 ; AVX2-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
1509 ; AVX2-NEXT: vpblendd {{.*#+}} xmm2 = xmm2[0,1],xmm4[2,3]
1510 ; AVX2-NEXT: vpshufb %ymm5, %ymm1, %ymm1
1511 ; AVX2-NEXT: vpermd %ymm1, %ymm6, %ymm1
1512 ; AVX2-NEXT: vpshufb %ymm5, %ymm0, %ymm0
1513 ; AVX2-NEXT: vpermd %ymm0, %ymm6, %ymm0
1514 ; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
1515 ; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
1516 ; AVX2-NEXT: vmovdqa %ymm7, (%rsi)
1517 ; AVX2-NEXT: vmovdqa %ymm8, (%rdx)
1518 ; AVX2-NEXT: vmovdqa %ymm9, (%rcx)
1519 ; AVX2-NEXT: vmovdqa %ymm0, (%r8)
1520 ; AVX2-NEXT: vzeroupper
1523 ; AVX2-FP-LABEL: load_i8_stride4_vf32:
1525 ; AVX2-FP-NEXT: vmovdqa 64(%rdi), %ymm0
1526 ; AVX2-FP-NEXT: vmovdqa 96(%rdi), %ymm1
1527 ; AVX2-FP-NEXT: vmovdqa (%rdi), %xmm2
1528 ; AVX2-FP-NEXT: vmovdqa 16(%rdi), %xmm3
1529 ; AVX2-FP-NEXT: vmovdqa 32(%rdi), %xmm4
1530 ; AVX2-FP-NEXT: vmovdqa 48(%rdi), %xmm5
1531 ; AVX2-FP-NEXT: vpbroadcastd {{.*#+}} xmm6 = [0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12]
1532 ; AVX2-FP-NEXT: vpshufb %xmm6, %xmm5, %xmm7
1533 ; AVX2-FP-NEXT: vpshufb %xmm6, %xmm4, %xmm6
1534 ; AVX2-FP-NEXT: vpunpckldq {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1]
1535 ; AVX2-FP-NEXT: vpbroadcastd {{.*#+}} ymm7 = [0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12]
1536 ; AVX2-FP-NEXT: vpshufb %xmm7, %xmm3, %xmm8
1537 ; AVX2-FP-NEXT: vpshufb %xmm7, %xmm2, %xmm9
1538 ; AVX2-FP-NEXT: vpunpckldq {{.*#+}} xmm8 = xmm9[0],xmm8[0],xmm9[1],xmm8[1]
1539 ; AVX2-FP-NEXT: vpblendd {{.*#+}} xmm8 = xmm8[0,1],xmm6[2,3]
1540 ; AVX2-FP-NEXT: vpshufb %ymm7, %ymm1, %ymm9
1541 ; AVX2-FP-NEXT: vpbroadcastq {{.*#+}} ymm6 = [0,4,0,4,0,4,0,4]
1542 ; AVX2-FP-NEXT: vpermd %ymm9, %ymm6, %ymm9
1543 ; AVX2-FP-NEXT: vpshufb %ymm7, %ymm0, %ymm7
1544 ; AVX2-FP-NEXT: vpermd %ymm7, %ymm6, %ymm7
1545 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm7 = ymm7[0,1,2,3,4,5],ymm9[6,7]
1546 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm7 = ymm8[0,1,2,3],ymm7[4,5,6,7]
1547 ; AVX2-FP-NEXT: vpbroadcastd {{.*#+}} xmm8 = [1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13]
1548 ; AVX2-FP-NEXT: vpshufb %xmm8, %xmm5, %xmm9
1549 ; AVX2-FP-NEXT: vpshufb %xmm8, %xmm4, %xmm8
1550 ; AVX2-FP-NEXT: vpunpckldq {{.*#+}} xmm8 = xmm8[0],xmm9[0],xmm8[1],xmm9[1]
1551 ; AVX2-FP-NEXT: vpbroadcastd {{.*#+}} ymm9 = [1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13]
1552 ; AVX2-FP-NEXT: vpshufb %xmm9, %xmm3, %xmm10
1553 ; AVX2-FP-NEXT: vpshufb %xmm9, %xmm2, %xmm11
1554 ; AVX2-FP-NEXT: vpunpckldq {{.*#+}} xmm10 = xmm11[0],xmm10[0],xmm11[1],xmm10[1]
1555 ; AVX2-FP-NEXT: vpblendd {{.*#+}} xmm8 = xmm10[0,1],xmm8[2,3]
1556 ; AVX2-FP-NEXT: vpshufb %ymm9, %ymm1, %ymm10
1557 ; AVX2-FP-NEXT: vpermd %ymm10, %ymm6, %ymm10
1558 ; AVX2-FP-NEXT: vpshufb %ymm9, %ymm0, %ymm9
1559 ; AVX2-FP-NEXT: vpermd %ymm9, %ymm6, %ymm9
1560 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3,4,5],ymm10[6,7]
1561 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm9[4,5,6,7]
1562 ; AVX2-FP-NEXT: vpbroadcastd {{.*#+}} xmm9 = [2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14]
1563 ; AVX2-FP-NEXT: vpshufb %xmm9, %xmm5, %xmm10
1564 ; AVX2-FP-NEXT: vpshufb %xmm9, %xmm4, %xmm9
1565 ; AVX2-FP-NEXT: vpunpckldq {{.*#+}} xmm9 = xmm9[0],xmm10[0],xmm9[1],xmm10[1]
1566 ; AVX2-FP-NEXT: vpbroadcastd {{.*#+}} ymm10 = [2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14]
1567 ; AVX2-FP-NEXT: vpshufb %xmm10, %xmm3, %xmm11
1568 ; AVX2-FP-NEXT: vpshufb %xmm10, %xmm2, %xmm12
1569 ; AVX2-FP-NEXT: vpunpckldq {{.*#+}} xmm11 = xmm12[0],xmm11[0],xmm12[1],xmm11[1]
1570 ; AVX2-FP-NEXT: vpblendd {{.*#+}} xmm9 = xmm11[0,1],xmm9[2,3]
1571 ; AVX2-FP-NEXT: vpshufb %ymm10, %ymm1, %ymm11
1572 ; AVX2-FP-NEXT: vpermd %ymm11, %ymm6, %ymm11
1573 ; AVX2-FP-NEXT: vpshufb %ymm10, %ymm0, %ymm10
1574 ; AVX2-FP-NEXT: vpermd %ymm10, %ymm6, %ymm10
1575 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm10 = ymm10[0,1,2,3,4,5],ymm11[6,7]
1576 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3],ymm10[4,5,6,7]
1577 ; AVX2-FP-NEXT: vpbroadcastd {{.*#+}} xmm10 = [3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15]
1578 ; AVX2-FP-NEXT: vpshufb %xmm10, %xmm5, %xmm5
1579 ; AVX2-FP-NEXT: vpshufb %xmm10, %xmm4, %xmm4
1580 ; AVX2-FP-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
1581 ; AVX2-FP-NEXT: vpbroadcastd {{.*#+}} ymm5 = [3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15]
1582 ; AVX2-FP-NEXT: vpshufb %xmm5, %xmm3, %xmm3
1583 ; AVX2-FP-NEXT: vpshufb %xmm5, %xmm2, %xmm2
1584 ; AVX2-FP-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
1585 ; AVX2-FP-NEXT: vpblendd {{.*#+}} xmm2 = xmm2[0,1],xmm4[2,3]
1586 ; AVX2-FP-NEXT: vpshufb %ymm5, %ymm1, %ymm1
1587 ; AVX2-FP-NEXT: vpermd %ymm1, %ymm6, %ymm1
1588 ; AVX2-FP-NEXT: vpshufb %ymm5, %ymm0, %ymm0
1589 ; AVX2-FP-NEXT: vpermd %ymm0, %ymm6, %ymm0
1590 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
1591 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
1592 ; AVX2-FP-NEXT: vmovdqa %ymm7, (%rsi)
1593 ; AVX2-FP-NEXT: vmovdqa %ymm8, (%rdx)
1594 ; AVX2-FP-NEXT: vmovdqa %ymm9, (%rcx)
1595 ; AVX2-FP-NEXT: vmovdqa %ymm0, (%r8)
1596 ; AVX2-FP-NEXT: vzeroupper
1597 ; AVX2-FP-NEXT: retq
1599 ; AVX2-FCP-LABEL: load_i8_stride4_vf32:
1600 ; AVX2-FCP: # %bb.0:
1601 ; AVX2-FCP-NEXT: vmovdqa 64(%rdi), %ymm0
1602 ; AVX2-FCP-NEXT: vmovdqa 96(%rdi), %ymm1
1603 ; AVX2-FCP-NEXT: vmovdqa (%rdi), %xmm2
1604 ; AVX2-FCP-NEXT: vmovdqa 16(%rdi), %xmm3
1605 ; AVX2-FCP-NEXT: vmovdqa 32(%rdi), %xmm4
1606 ; AVX2-FCP-NEXT: vmovdqa 48(%rdi), %xmm5
1607 ; AVX2-FCP-NEXT: vpbroadcastd {{.*#+}} xmm6 = [0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12]
1608 ; AVX2-FCP-NEXT: vpshufb %xmm6, %xmm5, %xmm7
1609 ; AVX2-FCP-NEXT: vpshufb %xmm6, %xmm4, %xmm6
1610 ; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1]
1611 ; AVX2-FCP-NEXT: vpbroadcastd {{.*#+}} ymm7 = [0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12]
1612 ; AVX2-FCP-NEXT: vpshufb %xmm7, %xmm3, %xmm8
1613 ; AVX2-FCP-NEXT: vpshufb %xmm7, %xmm2, %xmm9
1614 ; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm8 = xmm9[0],xmm8[0],xmm9[1],xmm8[1]
1615 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm8 = xmm8[0,1],xmm6[2,3]
1616 ; AVX2-FCP-NEXT: vpshufb %ymm7, %ymm1, %ymm9
1617 ; AVX2-FCP-NEXT: vpbroadcastq {{.*#+}} ymm6 = [0,4,0,4,0,4,0,4]
1618 ; AVX2-FCP-NEXT: vpermd %ymm9, %ymm6, %ymm9
1619 ; AVX2-FCP-NEXT: vpshufb %ymm7, %ymm0, %ymm7
1620 ; AVX2-FCP-NEXT: vpermd %ymm7, %ymm6, %ymm7
1621 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm7 = ymm7[0,1,2,3,4,5],ymm9[6,7]
1622 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm7 = ymm8[0,1,2,3],ymm7[4,5,6,7]
1623 ; AVX2-FCP-NEXT: vpbroadcastd {{.*#+}} xmm8 = [1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13]
1624 ; AVX2-FCP-NEXT: vpshufb %xmm8, %xmm5, %xmm9
1625 ; AVX2-FCP-NEXT: vpshufb %xmm8, %xmm4, %xmm8
1626 ; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm8 = xmm8[0],xmm9[0],xmm8[1],xmm9[1]
1627 ; AVX2-FCP-NEXT: vpbroadcastd {{.*#+}} ymm9 = [1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13]
1628 ; AVX2-FCP-NEXT: vpshufb %xmm9, %xmm3, %xmm10
1629 ; AVX2-FCP-NEXT: vpshufb %xmm9, %xmm2, %xmm11
1630 ; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm10 = xmm11[0],xmm10[0],xmm11[1],xmm10[1]
1631 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm8 = xmm10[0,1],xmm8[2,3]
1632 ; AVX2-FCP-NEXT: vpshufb %ymm9, %ymm1, %ymm10
1633 ; AVX2-FCP-NEXT: vpermd %ymm10, %ymm6, %ymm10
1634 ; AVX2-FCP-NEXT: vpshufb %ymm9, %ymm0, %ymm9
1635 ; AVX2-FCP-NEXT: vpermd %ymm9, %ymm6, %ymm9
1636 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3,4,5],ymm10[6,7]
1637 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm9[4,5,6,7]
1638 ; AVX2-FCP-NEXT: vpbroadcastd {{.*#+}} xmm9 = [2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14]
1639 ; AVX2-FCP-NEXT: vpshufb %xmm9, %xmm5, %xmm10
1640 ; AVX2-FCP-NEXT: vpshufb %xmm9, %xmm4, %xmm9
1641 ; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm9 = xmm9[0],xmm10[0],xmm9[1],xmm10[1]
1642 ; AVX2-FCP-NEXT: vpbroadcastd {{.*#+}} ymm10 = [2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14]
1643 ; AVX2-FCP-NEXT: vpshufb %xmm10, %xmm3, %xmm11
1644 ; AVX2-FCP-NEXT: vpshufb %xmm10, %xmm2, %xmm12
1645 ; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm11 = xmm12[0],xmm11[0],xmm12[1],xmm11[1]
1646 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm9 = xmm11[0,1],xmm9[2,3]
1647 ; AVX2-FCP-NEXT: vpshufb %ymm10, %ymm1, %ymm11
1648 ; AVX2-FCP-NEXT: vpermd %ymm11, %ymm6, %ymm11
1649 ; AVX2-FCP-NEXT: vpshufb %ymm10, %ymm0, %ymm10
1650 ; AVX2-FCP-NEXT: vpermd %ymm10, %ymm6, %ymm10
1651 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm10 = ymm10[0,1,2,3,4,5],ymm11[6,7]
1652 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3],ymm10[4,5,6,7]
1653 ; AVX2-FCP-NEXT: vpbroadcastd {{.*#+}} xmm10 = [3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15]
1654 ; AVX2-FCP-NEXT: vpshufb %xmm10, %xmm5, %xmm5
1655 ; AVX2-FCP-NEXT: vpshufb %xmm10, %xmm4, %xmm4
1656 ; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
1657 ; AVX2-FCP-NEXT: vpbroadcastd {{.*#+}} ymm5 = [3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15]
1658 ; AVX2-FCP-NEXT: vpshufb %xmm5, %xmm3, %xmm3
1659 ; AVX2-FCP-NEXT: vpshufb %xmm5, %xmm2, %xmm2
1660 ; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
1661 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm2 = xmm2[0,1],xmm4[2,3]
1662 ; AVX2-FCP-NEXT: vpshufb %ymm5, %ymm1, %ymm1
1663 ; AVX2-FCP-NEXT: vpermd %ymm1, %ymm6, %ymm1
1664 ; AVX2-FCP-NEXT: vpshufb %ymm5, %ymm0, %ymm0
1665 ; AVX2-FCP-NEXT: vpermd %ymm0, %ymm6, %ymm0
1666 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
1667 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
1668 ; AVX2-FCP-NEXT: vmovdqa %ymm7, (%rsi)
1669 ; AVX2-FCP-NEXT: vmovdqa %ymm8, (%rdx)
1670 ; AVX2-FCP-NEXT: vmovdqa %ymm9, (%rcx)
1671 ; AVX2-FCP-NEXT: vmovdqa %ymm0, (%r8)
1672 ; AVX2-FCP-NEXT: vzeroupper
1673 ; AVX2-FCP-NEXT: retq
1675 ; AVX512-LABEL: load_i8_stride4_vf32:
1677 ; AVX512-NEXT: vpbroadcastd {{.*#+}} ymm0 = [0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12]
1678 ; AVX512-NEXT: vmovdqa 96(%rdi), %ymm1
1679 ; AVX512-NEXT: vpshufb %ymm0, %ymm1, %ymm2
1680 ; AVX512-NEXT: vmovdqa 64(%rdi), %ymm3
1681 ; AVX512-NEXT: vpshufb %ymm0, %ymm3, %ymm0
1682 ; AVX512-NEXT: vpmovsxbd {{.*#+}} ymm4 = [0,4,0,4,0,4,8,12]
1683 ; AVX512-NEXT: vpermt2d %ymm2, %ymm4, %ymm0
1684 ; AVX512-NEXT: vmovdqa64 (%rdi), %zmm2
1685 ; AVX512-NEXT: vpmovdb %zmm2, %xmm5
1686 ; AVX512-NEXT: vpblendd {{.*#+}} ymm0 = ymm5[0,1,2,3],ymm0[4,5,6,7]
1687 ; AVX512-NEXT: vpbroadcastd {{.*#+}} ymm5 = [1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13]
1688 ; AVX512-NEXT: vpshufb %ymm5, %ymm1, %ymm6
1689 ; AVX512-NEXT: vpshufb %ymm5, %ymm3, %ymm5
1690 ; AVX512-NEXT: vpermt2d %ymm6, %ymm4, %ymm5
1691 ; AVX512-NEXT: vpsrld $8, %zmm2, %zmm6
1692 ; AVX512-NEXT: vpmovdb %zmm6, %xmm6
1693 ; AVX512-NEXT: vpblendd {{.*#+}} ymm5 = ymm6[0,1,2,3],ymm5[4,5,6,7]
1694 ; AVX512-NEXT: vpbroadcastd {{.*#+}} ymm6 = [2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14]
1695 ; AVX512-NEXT: vpshufb %ymm6, %ymm1, %ymm7
1696 ; AVX512-NEXT: vpshufb %ymm6, %ymm3, %ymm6
1697 ; AVX512-NEXT: vpermt2d %ymm7, %ymm4, %ymm6
1698 ; AVX512-NEXT: vpsrld $16, %zmm2, %zmm7
1699 ; AVX512-NEXT: vpmovdb %zmm7, %xmm7
1700 ; AVX512-NEXT: vpblendd {{.*#+}} ymm6 = ymm7[0,1,2,3],ymm6[4,5,6,7]
1701 ; AVX512-NEXT: vpbroadcastd {{.*#+}} ymm7 = [3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15]
1702 ; AVX512-NEXT: vpshufb %ymm7, %ymm1, %ymm1
1703 ; AVX512-NEXT: vpshufb %ymm7, %ymm3, %ymm3
1704 ; AVX512-NEXT: vpermt2d %ymm1, %ymm4, %ymm3
1705 ; AVX512-NEXT: vpsrld $24, %zmm2, %zmm1
1706 ; AVX512-NEXT: vpmovdb %zmm1, %xmm1
1707 ; AVX512-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm3[4,5,6,7]
1708 ; AVX512-NEXT: vmovdqa %ymm0, (%rsi)
1709 ; AVX512-NEXT: vmovdqa %ymm5, (%rdx)
1710 ; AVX512-NEXT: vmovdqa %ymm6, (%rcx)
1711 ; AVX512-NEXT: vmovdqa %ymm1, (%r8)
1712 ; AVX512-NEXT: vzeroupper
1715 ; AVX512-FCP-LABEL: load_i8_stride4_vf32:
1716 ; AVX512-FCP: # %bb.0:
1717 ; AVX512-FCP-NEXT: vpbroadcastd {{.*#+}} ymm0 = [0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12]
1718 ; AVX512-FCP-NEXT: vmovdqa 96(%rdi), %ymm1
1719 ; AVX512-FCP-NEXT: vpshufb %ymm0, %ymm1, %ymm2
1720 ; AVX512-FCP-NEXT: vmovdqa 64(%rdi), %ymm3
1721 ; AVX512-FCP-NEXT: vpshufb %ymm0, %ymm3, %ymm0
1722 ; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm4 = [0,4,0,4,0,4,8,12]
1723 ; AVX512-FCP-NEXT: vpermt2d %ymm2, %ymm4, %ymm0
1724 ; AVX512-FCP-NEXT: vmovdqa64 (%rdi), %zmm2
1725 ; AVX512-FCP-NEXT: vpmovdb %zmm2, %xmm5
1726 ; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm5[0,1,2,3],ymm0[4,5,6,7]
1727 ; AVX512-FCP-NEXT: vpbroadcastd {{.*#+}} ymm5 = [1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13]
1728 ; AVX512-FCP-NEXT: vpshufb %ymm5, %ymm1, %ymm6
1729 ; AVX512-FCP-NEXT: vpshufb %ymm5, %ymm3, %ymm5
1730 ; AVX512-FCP-NEXT: vpermt2d %ymm6, %ymm4, %ymm5
1731 ; AVX512-FCP-NEXT: vpsrld $8, %zmm2, %zmm6
1732 ; AVX512-FCP-NEXT: vpmovdb %zmm6, %xmm6
1733 ; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm5 = ymm6[0,1,2,3],ymm5[4,5,6,7]
1734 ; AVX512-FCP-NEXT: vpbroadcastd {{.*#+}} ymm6 = [2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14]
1735 ; AVX512-FCP-NEXT: vpshufb %ymm6, %ymm1, %ymm7
1736 ; AVX512-FCP-NEXT: vpshufb %ymm6, %ymm3, %ymm6
1737 ; AVX512-FCP-NEXT: vpermt2d %ymm7, %ymm4, %ymm6
1738 ; AVX512-FCP-NEXT: vpsrld $16, %zmm2, %zmm7
1739 ; AVX512-FCP-NEXT: vpmovdb %zmm7, %xmm7
1740 ; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm7[0,1,2,3],ymm6[4,5,6,7]
1741 ; AVX512-FCP-NEXT: vpbroadcastd {{.*#+}} ymm7 = [3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15]
1742 ; AVX512-FCP-NEXT: vpshufb %ymm7, %ymm1, %ymm1
1743 ; AVX512-FCP-NEXT: vpshufb %ymm7, %ymm3, %ymm3
1744 ; AVX512-FCP-NEXT: vpermt2d %ymm1, %ymm4, %ymm3
1745 ; AVX512-FCP-NEXT: vpsrld $24, %zmm2, %zmm1
1746 ; AVX512-FCP-NEXT: vpmovdb %zmm1, %xmm1
1747 ; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm3[4,5,6,7]
1748 ; AVX512-FCP-NEXT: vmovdqa %ymm0, (%rsi)
1749 ; AVX512-FCP-NEXT: vmovdqa %ymm5, (%rdx)
1750 ; AVX512-FCP-NEXT: vmovdqa %ymm6, (%rcx)
1751 ; AVX512-FCP-NEXT: vmovdqa %ymm1, (%r8)
1752 ; AVX512-FCP-NEXT: vzeroupper
1753 ; AVX512-FCP-NEXT: retq
1755 ; AVX512DQ-LABEL: load_i8_stride4_vf32:
1756 ; AVX512DQ: # %bb.0:
1757 ; AVX512DQ-NEXT: vpbroadcastd {{.*#+}} ymm0 = [0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12]
1758 ; AVX512DQ-NEXT: vmovdqa 96(%rdi), %ymm1
1759 ; AVX512DQ-NEXT: vpshufb %ymm0, %ymm1, %ymm2
1760 ; AVX512DQ-NEXT: vmovdqa 64(%rdi), %ymm3
1761 ; AVX512DQ-NEXT: vpshufb %ymm0, %ymm3, %ymm0
1762 ; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} ymm4 = [0,4,0,4,0,4,8,12]
1763 ; AVX512DQ-NEXT: vpermt2d %ymm2, %ymm4, %ymm0
1764 ; AVX512DQ-NEXT: vmovdqa64 (%rdi), %zmm2
1765 ; AVX512DQ-NEXT: vpmovdb %zmm2, %xmm5
1766 ; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm0 = ymm5[0,1,2,3],ymm0[4,5,6,7]
1767 ; AVX512DQ-NEXT: vpbroadcastd {{.*#+}} ymm5 = [1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13]
1768 ; AVX512DQ-NEXT: vpshufb %ymm5, %ymm1, %ymm6
1769 ; AVX512DQ-NEXT: vpshufb %ymm5, %ymm3, %ymm5
1770 ; AVX512DQ-NEXT: vpermt2d %ymm6, %ymm4, %ymm5
1771 ; AVX512DQ-NEXT: vpsrld $8, %zmm2, %zmm6
1772 ; AVX512DQ-NEXT: vpmovdb %zmm6, %xmm6
1773 ; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm5 = ymm6[0,1,2,3],ymm5[4,5,6,7]
1774 ; AVX512DQ-NEXT: vpbroadcastd {{.*#+}} ymm6 = [2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14]
1775 ; AVX512DQ-NEXT: vpshufb %ymm6, %ymm1, %ymm7
1776 ; AVX512DQ-NEXT: vpshufb %ymm6, %ymm3, %ymm6
1777 ; AVX512DQ-NEXT: vpermt2d %ymm7, %ymm4, %ymm6
1778 ; AVX512DQ-NEXT: vpsrld $16, %zmm2, %zmm7
1779 ; AVX512DQ-NEXT: vpmovdb %zmm7, %xmm7
1780 ; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm6 = ymm7[0,1,2,3],ymm6[4,5,6,7]
1781 ; AVX512DQ-NEXT: vpbroadcastd {{.*#+}} ymm7 = [3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15]
1782 ; AVX512DQ-NEXT: vpshufb %ymm7, %ymm1, %ymm1
1783 ; AVX512DQ-NEXT: vpshufb %ymm7, %ymm3, %ymm3
1784 ; AVX512DQ-NEXT: vpermt2d %ymm1, %ymm4, %ymm3
1785 ; AVX512DQ-NEXT: vpsrld $24, %zmm2, %zmm1
1786 ; AVX512DQ-NEXT: vpmovdb %zmm1, %xmm1
1787 ; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm3[4,5,6,7]
1788 ; AVX512DQ-NEXT: vmovdqa %ymm0, (%rsi)
1789 ; AVX512DQ-NEXT: vmovdqa %ymm5, (%rdx)
1790 ; AVX512DQ-NEXT: vmovdqa %ymm6, (%rcx)
1791 ; AVX512DQ-NEXT: vmovdqa %ymm1, (%r8)
1792 ; AVX512DQ-NEXT: vzeroupper
1793 ; AVX512DQ-NEXT: retq
1795 ; AVX512DQ-FCP-LABEL: load_i8_stride4_vf32:
1796 ; AVX512DQ-FCP: # %bb.0:
1797 ; AVX512DQ-FCP-NEXT: vpbroadcastd {{.*#+}} ymm0 = [0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12]
1798 ; AVX512DQ-FCP-NEXT: vmovdqa 96(%rdi), %ymm1
1799 ; AVX512DQ-FCP-NEXT: vpshufb %ymm0, %ymm1, %ymm2
1800 ; AVX512DQ-FCP-NEXT: vmovdqa 64(%rdi), %ymm3
1801 ; AVX512DQ-FCP-NEXT: vpshufb %ymm0, %ymm3, %ymm0
1802 ; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm4 = [0,4,0,4,0,4,8,12]
1803 ; AVX512DQ-FCP-NEXT: vpermt2d %ymm2, %ymm4, %ymm0
1804 ; AVX512DQ-FCP-NEXT: vmovdqa64 (%rdi), %zmm2
1805 ; AVX512DQ-FCP-NEXT: vpmovdb %zmm2, %xmm5
1806 ; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm5[0,1,2,3],ymm0[4,5,6,7]
1807 ; AVX512DQ-FCP-NEXT: vpbroadcastd {{.*#+}} ymm5 = [1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13]
1808 ; AVX512DQ-FCP-NEXT: vpshufb %ymm5, %ymm1, %ymm6
1809 ; AVX512DQ-FCP-NEXT: vpshufb %ymm5, %ymm3, %ymm5
1810 ; AVX512DQ-FCP-NEXT: vpermt2d %ymm6, %ymm4, %ymm5
1811 ; AVX512DQ-FCP-NEXT: vpsrld $8, %zmm2, %zmm6
1812 ; AVX512DQ-FCP-NEXT: vpmovdb %zmm6, %xmm6
1813 ; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm5 = ymm6[0,1,2,3],ymm5[4,5,6,7]
1814 ; AVX512DQ-FCP-NEXT: vpbroadcastd {{.*#+}} ymm6 = [2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14]
1815 ; AVX512DQ-FCP-NEXT: vpshufb %ymm6, %ymm1, %ymm7
1816 ; AVX512DQ-FCP-NEXT: vpshufb %ymm6, %ymm3, %ymm6
1817 ; AVX512DQ-FCP-NEXT: vpermt2d %ymm7, %ymm4, %ymm6
1818 ; AVX512DQ-FCP-NEXT: vpsrld $16, %zmm2, %zmm7
1819 ; AVX512DQ-FCP-NEXT: vpmovdb %zmm7, %xmm7
1820 ; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm7[0,1,2,3],ymm6[4,5,6,7]
1821 ; AVX512DQ-FCP-NEXT: vpbroadcastd {{.*#+}} ymm7 = [3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15]
1822 ; AVX512DQ-FCP-NEXT: vpshufb %ymm7, %ymm1, %ymm1
1823 ; AVX512DQ-FCP-NEXT: vpshufb %ymm7, %ymm3, %ymm3
1824 ; AVX512DQ-FCP-NEXT: vpermt2d %ymm1, %ymm4, %ymm3
1825 ; AVX512DQ-FCP-NEXT: vpsrld $24, %zmm2, %zmm1
1826 ; AVX512DQ-FCP-NEXT: vpmovdb %zmm1, %xmm1
1827 ; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm3[4,5,6,7]
1828 ; AVX512DQ-FCP-NEXT: vmovdqa %ymm0, (%rsi)
1829 ; AVX512DQ-FCP-NEXT: vmovdqa %ymm5, (%rdx)
1830 ; AVX512DQ-FCP-NEXT: vmovdqa %ymm6, (%rcx)
1831 ; AVX512DQ-FCP-NEXT: vmovdqa %ymm1, (%r8)
1832 ; AVX512DQ-FCP-NEXT: vzeroupper
1833 ; AVX512DQ-FCP-NEXT: retq
1835 ; AVX512BW-LABEL: load_i8_stride4_vf32:
1836 ; AVX512BW: # %bb.0:
1837 ; AVX512BW-NEXT: vpmovsxbd {{.*#+}} ymm0 = [0,4,8,12,1,5,9,13]
1838 ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm1
1839 ; AVX512BW-NEXT: vmovdqa64 64(%rdi), %zmm2
1840 ; AVX512BW-NEXT: vpshufb {{.*#+}} zmm3 = zero,zero,zero,zero,zmm2[0,4,8,12,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm2[16,20,24,28,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm2[32,36,40,44,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm2[48,52,56,60,u,u,u,u,u,u,u,u]
1841 ; AVX512BW-NEXT: vpshufb {{.*#+}} zmm4 = zmm1[0,4,8,12],zero,zero,zero,zero,zmm1[u,u,u,u,u,u,u,u,16,20,24,28],zero,zero,zero,zero,zmm1[u,u,u,u,u,u,u,u,32,36,40,44],zero,zero,zero,zero,zmm1[u,u,u,u,u,u,u,u,48,52,56,60],zero,zero,zero,zero,zmm1[u,u,u,u,u,u,u,u]
1842 ; AVX512BW-NEXT: vporq %zmm3, %zmm4, %zmm3
1843 ; AVX512BW-NEXT: vpermd %zmm3, %zmm0, %zmm3
1844 ; AVX512BW-NEXT: vpshufb {{.*#+}} zmm4 = zero,zero,zero,zero,zmm2[1,5,9,13,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm2[17,21,25,29,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm2[33,37,41,45,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm2[49,53,57,61,u,u,u,u,u,u,u,u]
1845 ; AVX512BW-NEXT: vpshufb {{.*#+}} zmm5 = zmm1[1,5,9,13],zero,zero,zero,zero,zmm1[u,u,u,u,u,u,u,u,17,21,25,29],zero,zero,zero,zero,zmm1[u,u,u,u,u,u,u,u,33,37,41,45],zero,zero,zero,zero,zmm1[u,u,u,u,u,u,u,u,49,53,57,61],zero,zero,zero,zero,zmm1[u,u,u,u,u,u,u,u]
1846 ; AVX512BW-NEXT: vporq %zmm4, %zmm5, %zmm4
1847 ; AVX512BW-NEXT: vpermd %zmm4, %zmm0, %zmm4
1848 ; AVX512BW-NEXT: vpshufb {{.*#+}} zmm5 = zero,zero,zero,zero,zmm2[2,6,10,14,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm2[18,22,26,30,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm2[34,38,42,46,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm2[50,54,58,62,u,u,u,u,u,u,u,u]
1849 ; AVX512BW-NEXT: vpshufb {{.*#+}} zmm6 = zmm1[2,6,10,14],zero,zero,zero,zero,zmm1[u,u,u,u,u,u,u,u,18,22,26,30],zero,zero,zero,zero,zmm1[u,u,u,u,u,u,u,u,34,38,42,46],zero,zero,zero,zero,zmm1[u,u,u,u,u,u,u,u,50,54,58,62],zero,zero,zero,zero,zmm1[u,u,u,u,u,u,u,u]
1850 ; AVX512BW-NEXT: vporq %zmm5, %zmm6, %zmm5
1851 ; AVX512BW-NEXT: vpermd %zmm5, %zmm0, %zmm5
1852 ; AVX512BW-NEXT: vpshufb {{.*#+}} zmm2 = zero,zero,zero,zero,zmm2[3,7,11,15,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm2[19,23,27,31,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm2[35,39,43,47,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm2[51,55,59,63,u,u,u,u,u,u,u,u]
1853 ; AVX512BW-NEXT: vpshufb {{.*#+}} zmm1 = zmm1[3,7,11,15],zero,zero,zero,zero,zmm1[u,u,u,u,u,u,u,u,19,23,27,31],zero,zero,zero,zero,zmm1[u,u,u,u,u,u,u,u,35,39,43,47],zero,zero,zero,zero,zmm1[u,u,u,u,u,u,u,u,51,55,59,63],zero,zero,zero,zero,zmm1[u,u,u,u,u,u,u,u]
1854 ; AVX512BW-NEXT: vporq %zmm2, %zmm1, %zmm1
1855 ; AVX512BW-NEXT: vpermd %zmm1, %zmm0, %zmm0
1856 ; AVX512BW-NEXT: vmovdqa %ymm3, (%rsi)
1857 ; AVX512BW-NEXT: vmovdqa %ymm4, (%rdx)
1858 ; AVX512BW-NEXT: vmovdqa %ymm5, (%rcx)
1859 ; AVX512BW-NEXT: vmovdqa %ymm0, (%r8)
1860 ; AVX512BW-NEXT: vzeroupper
1861 ; AVX512BW-NEXT: retq
1863 ; AVX512BW-FCP-LABEL: load_i8_stride4_vf32:
1864 ; AVX512BW-FCP: # %bb.0:
1865 ; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm0 = [0,4,8,12,1,5,9,13]
1866 ; AVX512BW-FCP-NEXT: vmovdqa64 (%rdi), %zmm1
1867 ; AVX512BW-FCP-NEXT: vmovdqa64 64(%rdi), %zmm2
1868 ; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} zmm3 = zero,zero,zero,zero,zmm2[0,4,8,12,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm2[16,20,24,28,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm2[32,36,40,44,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm2[48,52,56,60,u,u,u,u,u,u,u,u]
1869 ; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} zmm4 = zmm1[0,4,8,12],zero,zero,zero,zero,zmm1[u,u,u,u,u,u,u,u,16,20,24,28],zero,zero,zero,zero,zmm1[u,u,u,u,u,u,u,u,32,36,40,44],zero,zero,zero,zero,zmm1[u,u,u,u,u,u,u,u,48,52,56,60],zero,zero,zero,zero,zmm1[u,u,u,u,u,u,u,u]
1870 ; AVX512BW-FCP-NEXT: vporq %zmm3, %zmm4, %zmm3
1871 ; AVX512BW-FCP-NEXT: vpermd %zmm3, %zmm0, %zmm3
1872 ; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} zmm4 = zero,zero,zero,zero,zmm2[1,5,9,13,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm2[17,21,25,29,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm2[33,37,41,45,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm2[49,53,57,61,u,u,u,u,u,u,u,u]
1873 ; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} zmm5 = zmm1[1,5,9,13],zero,zero,zero,zero,zmm1[u,u,u,u,u,u,u,u,17,21,25,29],zero,zero,zero,zero,zmm1[u,u,u,u,u,u,u,u,33,37,41,45],zero,zero,zero,zero,zmm1[u,u,u,u,u,u,u,u,49,53,57,61],zero,zero,zero,zero,zmm1[u,u,u,u,u,u,u,u]
1874 ; AVX512BW-FCP-NEXT: vporq %zmm4, %zmm5, %zmm4
1875 ; AVX512BW-FCP-NEXT: vpermd %zmm4, %zmm0, %zmm4
1876 ; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} zmm5 = zero,zero,zero,zero,zmm2[2,6,10,14,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm2[18,22,26,30,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm2[34,38,42,46,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm2[50,54,58,62,u,u,u,u,u,u,u,u]
1877 ; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} zmm6 = zmm1[2,6,10,14],zero,zero,zero,zero,zmm1[u,u,u,u,u,u,u,u,18,22,26,30],zero,zero,zero,zero,zmm1[u,u,u,u,u,u,u,u,34,38,42,46],zero,zero,zero,zero,zmm1[u,u,u,u,u,u,u,u,50,54,58,62],zero,zero,zero,zero,zmm1[u,u,u,u,u,u,u,u]
1878 ; AVX512BW-FCP-NEXT: vporq %zmm5, %zmm6, %zmm5
1879 ; AVX512BW-FCP-NEXT: vpermd %zmm5, %zmm0, %zmm5
1880 ; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} zmm2 = zero,zero,zero,zero,zmm2[3,7,11,15,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm2[19,23,27,31,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm2[35,39,43,47,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm2[51,55,59,63,u,u,u,u,u,u,u,u]
1881 ; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} zmm1 = zmm1[3,7,11,15],zero,zero,zero,zero,zmm1[u,u,u,u,u,u,u,u,19,23,27,31],zero,zero,zero,zero,zmm1[u,u,u,u,u,u,u,u,35,39,43,47],zero,zero,zero,zero,zmm1[u,u,u,u,u,u,u,u,51,55,59,63],zero,zero,zero,zero,zmm1[u,u,u,u,u,u,u,u]
1882 ; AVX512BW-FCP-NEXT: vporq %zmm2, %zmm1, %zmm1
1883 ; AVX512BW-FCP-NEXT: vpermd %zmm1, %zmm0, %zmm0
1884 ; AVX512BW-FCP-NEXT: vmovdqa %ymm3, (%rsi)
1885 ; AVX512BW-FCP-NEXT: vmovdqa %ymm4, (%rdx)
1886 ; AVX512BW-FCP-NEXT: vmovdqa %ymm5, (%rcx)
1887 ; AVX512BW-FCP-NEXT: vmovdqa %ymm0, (%r8)
1888 ; AVX512BW-FCP-NEXT: vzeroupper
1889 ; AVX512BW-FCP-NEXT: retq
1891 ; AVX512DQ-BW-LABEL: load_i8_stride4_vf32:
1892 ; AVX512DQ-BW: # %bb.0:
1893 ; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} ymm0 = [0,4,8,12,1,5,9,13]
1894 ; AVX512DQ-BW-NEXT: vmovdqa64 (%rdi), %zmm1
1895 ; AVX512DQ-BW-NEXT: vmovdqa64 64(%rdi), %zmm2
1896 ; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} zmm3 = zero,zero,zero,zero,zmm2[0,4,8,12,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm2[16,20,24,28,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm2[32,36,40,44,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm2[48,52,56,60,u,u,u,u,u,u,u,u]
1897 ; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} zmm4 = zmm1[0,4,8,12],zero,zero,zero,zero,zmm1[u,u,u,u,u,u,u,u,16,20,24,28],zero,zero,zero,zero,zmm1[u,u,u,u,u,u,u,u,32,36,40,44],zero,zero,zero,zero,zmm1[u,u,u,u,u,u,u,u,48,52,56,60],zero,zero,zero,zero,zmm1[u,u,u,u,u,u,u,u]
1898 ; AVX512DQ-BW-NEXT: vporq %zmm3, %zmm4, %zmm3
1899 ; AVX512DQ-BW-NEXT: vpermd %zmm3, %zmm0, %zmm3
1900 ; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} zmm4 = zero,zero,zero,zero,zmm2[1,5,9,13,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm2[17,21,25,29,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm2[33,37,41,45,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm2[49,53,57,61,u,u,u,u,u,u,u,u]
1901 ; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} zmm5 = zmm1[1,5,9,13],zero,zero,zero,zero,zmm1[u,u,u,u,u,u,u,u,17,21,25,29],zero,zero,zero,zero,zmm1[u,u,u,u,u,u,u,u,33,37,41,45],zero,zero,zero,zero,zmm1[u,u,u,u,u,u,u,u,49,53,57,61],zero,zero,zero,zero,zmm1[u,u,u,u,u,u,u,u]
1902 ; AVX512DQ-BW-NEXT: vporq %zmm4, %zmm5, %zmm4
1903 ; AVX512DQ-BW-NEXT: vpermd %zmm4, %zmm0, %zmm4
1904 ; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} zmm5 = zero,zero,zero,zero,zmm2[2,6,10,14,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm2[18,22,26,30,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm2[34,38,42,46,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm2[50,54,58,62,u,u,u,u,u,u,u,u]
1905 ; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} zmm6 = zmm1[2,6,10,14],zero,zero,zero,zero,zmm1[u,u,u,u,u,u,u,u,18,22,26,30],zero,zero,zero,zero,zmm1[u,u,u,u,u,u,u,u,34,38,42,46],zero,zero,zero,zero,zmm1[u,u,u,u,u,u,u,u,50,54,58,62],zero,zero,zero,zero,zmm1[u,u,u,u,u,u,u,u]
1906 ; AVX512DQ-BW-NEXT: vporq %zmm5, %zmm6, %zmm5
1907 ; AVX512DQ-BW-NEXT: vpermd %zmm5, %zmm0, %zmm5
1908 ; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} zmm2 = zero,zero,zero,zero,zmm2[3,7,11,15,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm2[19,23,27,31,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm2[35,39,43,47,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm2[51,55,59,63,u,u,u,u,u,u,u,u]
1909 ; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} zmm1 = zmm1[3,7,11,15],zero,zero,zero,zero,zmm1[u,u,u,u,u,u,u,u,19,23,27,31],zero,zero,zero,zero,zmm1[u,u,u,u,u,u,u,u,35,39,43,47],zero,zero,zero,zero,zmm1[u,u,u,u,u,u,u,u,51,55,59,63],zero,zero,zero,zero,zmm1[u,u,u,u,u,u,u,u]
1910 ; AVX512DQ-BW-NEXT: vporq %zmm2, %zmm1, %zmm1
1911 ; AVX512DQ-BW-NEXT: vpermd %zmm1, %zmm0, %zmm0
1912 ; AVX512DQ-BW-NEXT: vmovdqa %ymm3, (%rsi)
1913 ; AVX512DQ-BW-NEXT: vmovdqa %ymm4, (%rdx)
1914 ; AVX512DQ-BW-NEXT: vmovdqa %ymm5, (%rcx)
1915 ; AVX512DQ-BW-NEXT: vmovdqa %ymm0, (%r8)
1916 ; AVX512DQ-BW-NEXT: vzeroupper
1917 ; AVX512DQ-BW-NEXT: retq
1919 ; AVX512DQ-BW-FCP-LABEL: load_i8_stride4_vf32:
1920 ; AVX512DQ-BW-FCP: # %bb.0:
1921 ; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm0 = [0,4,8,12,1,5,9,13]
1922 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rdi), %zmm1
1923 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 64(%rdi), %zmm2
1924 ; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} zmm3 = zero,zero,zero,zero,zmm2[0,4,8,12,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm2[16,20,24,28,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm2[32,36,40,44,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm2[48,52,56,60,u,u,u,u,u,u,u,u]
1925 ; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} zmm4 = zmm1[0,4,8,12],zero,zero,zero,zero,zmm1[u,u,u,u,u,u,u,u,16,20,24,28],zero,zero,zero,zero,zmm1[u,u,u,u,u,u,u,u,32,36,40,44],zero,zero,zero,zero,zmm1[u,u,u,u,u,u,u,u,48,52,56,60],zero,zero,zero,zero,zmm1[u,u,u,u,u,u,u,u]
1926 ; AVX512DQ-BW-FCP-NEXT: vporq %zmm3, %zmm4, %zmm3
1927 ; AVX512DQ-BW-FCP-NEXT: vpermd %zmm3, %zmm0, %zmm3
1928 ; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} zmm4 = zero,zero,zero,zero,zmm2[1,5,9,13,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm2[17,21,25,29,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm2[33,37,41,45,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm2[49,53,57,61,u,u,u,u,u,u,u,u]
1929 ; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} zmm5 = zmm1[1,5,9,13],zero,zero,zero,zero,zmm1[u,u,u,u,u,u,u,u,17,21,25,29],zero,zero,zero,zero,zmm1[u,u,u,u,u,u,u,u,33,37,41,45],zero,zero,zero,zero,zmm1[u,u,u,u,u,u,u,u,49,53,57,61],zero,zero,zero,zero,zmm1[u,u,u,u,u,u,u,u]
1930 ; AVX512DQ-BW-FCP-NEXT: vporq %zmm4, %zmm5, %zmm4
1931 ; AVX512DQ-BW-FCP-NEXT: vpermd %zmm4, %zmm0, %zmm4
1932 ; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} zmm5 = zero,zero,zero,zero,zmm2[2,6,10,14,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm2[18,22,26,30,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm2[34,38,42,46,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm2[50,54,58,62,u,u,u,u,u,u,u,u]
1933 ; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} zmm6 = zmm1[2,6,10,14],zero,zero,zero,zero,zmm1[u,u,u,u,u,u,u,u,18,22,26,30],zero,zero,zero,zero,zmm1[u,u,u,u,u,u,u,u,34,38,42,46],zero,zero,zero,zero,zmm1[u,u,u,u,u,u,u,u,50,54,58,62],zero,zero,zero,zero,zmm1[u,u,u,u,u,u,u,u]
1934 ; AVX512DQ-BW-FCP-NEXT: vporq %zmm5, %zmm6, %zmm5
1935 ; AVX512DQ-BW-FCP-NEXT: vpermd %zmm5, %zmm0, %zmm5
1936 ; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} zmm2 = zero,zero,zero,zero,zmm2[3,7,11,15,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm2[19,23,27,31,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm2[35,39,43,47,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm2[51,55,59,63,u,u,u,u,u,u,u,u]
1937 ; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} zmm1 = zmm1[3,7,11,15],zero,zero,zero,zero,zmm1[u,u,u,u,u,u,u,u,19,23,27,31],zero,zero,zero,zero,zmm1[u,u,u,u,u,u,u,u,35,39,43,47],zero,zero,zero,zero,zmm1[u,u,u,u,u,u,u,u,51,55,59,63],zero,zero,zero,zero,zmm1[u,u,u,u,u,u,u,u]
1938 ; AVX512DQ-BW-FCP-NEXT: vporq %zmm2, %zmm1, %zmm1
1939 ; AVX512DQ-BW-FCP-NEXT: vpermd %zmm1, %zmm0, %zmm0
1940 ; AVX512DQ-BW-FCP-NEXT: vmovdqa %ymm3, (%rsi)
1941 ; AVX512DQ-BW-FCP-NEXT: vmovdqa %ymm4, (%rdx)
1942 ; AVX512DQ-BW-FCP-NEXT: vmovdqa %ymm5, (%rcx)
1943 ; AVX512DQ-BW-FCP-NEXT: vmovdqa %ymm0, (%r8)
1944 ; AVX512DQ-BW-FCP-NEXT: vzeroupper
1945 ; AVX512DQ-BW-FCP-NEXT: retq
1946 %wide.vec = load <128 x i8>, ptr %in.vec, align 64
1947 %strided.vec0 = shufflevector <128 x i8> %wide.vec, <128 x i8> poison, <32 x i32> <i32 0, i32 4, i32 8, i32 12, i32 16, i32 20, i32 24, i32 28, i32 32, i32 36, i32 40, i32 44, i32 48, i32 52, i32 56, i32 60, i32 64, i32 68, i32 72, i32 76, i32 80, i32 84, i32 88, i32 92, i32 96, i32 100, i32 104, i32 108, i32 112, i32 116, i32 120, i32 124>
1948 %strided.vec1 = shufflevector <128 x i8> %wide.vec, <128 x i8> poison, <32 x i32> <i32 1, i32 5, i32 9, i32 13, i32 17, i32 21, i32 25, i32 29, i32 33, i32 37, i32 41, i32 45, i32 49, i32 53, i32 57, i32 61, i32 65, i32 69, i32 73, i32 77, i32 81, i32 85, i32 89, i32 93, i32 97, i32 101, i32 105, i32 109, i32 113, i32 117, i32 121, i32 125>
1949 %strided.vec2 = shufflevector <128 x i8> %wide.vec, <128 x i8> poison, <32 x i32> <i32 2, i32 6, i32 10, i32 14, i32 18, i32 22, i32 26, i32 30, i32 34, i32 38, i32 42, i32 46, i32 50, i32 54, i32 58, i32 62, i32 66, i32 70, i32 74, i32 78, i32 82, i32 86, i32 90, i32 94, i32 98, i32 102, i32 106, i32 110, i32 114, i32 118, i32 122, i32 126>
1950 %strided.vec3 = shufflevector <128 x i8> %wide.vec, <128 x i8> poison, <32 x i32> <i32 3, i32 7, i32 11, i32 15, i32 19, i32 23, i32 27, i32 31, i32 35, i32 39, i32 43, i32 47, i32 51, i32 55, i32 59, i32 63, i32 67, i32 71, i32 75, i32 79, i32 83, i32 87, i32 91, i32 95, i32 99, i32 103, i32 107, i32 111, i32 115, i32 119, i32 123, i32 127>
1951 store <32 x i8> %strided.vec0, ptr %out.vec0, align 64
1952 store <32 x i8> %strided.vec1, ptr %out.vec1, align 64
1953 store <32 x i8> %strided.vec2, ptr %out.vec2, align 64
1954 store <32 x i8> %strided.vec3, ptr %out.vec3, align 64
1958 define void @load_i8_stride4_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2, ptr %out.vec3) nounwind {
1959 ; SSE-LABEL: load_i8_stride4_vf64:
1961 ; SSE-NEXT: subq $600, %rsp # imm = 0x258
1962 ; SSE-NEXT: movdqa 16(%rdi), %xmm8
1963 ; SSE-NEXT: movdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1964 ; SSE-NEXT: movdqa 32(%rdi), %xmm15
1965 ; SSE-NEXT: movdqa 48(%rdi), %xmm14
1966 ; SSE-NEXT: movdqa 128(%rdi), %xmm4
1967 ; SSE-NEXT: movdqa 144(%rdi), %xmm7
1968 ; SSE-NEXT: movdqa 160(%rdi), %xmm10
1969 ; SSE-NEXT: movdqa 176(%rdi), %xmm12
1970 ; SSE-NEXT: movdqa 64(%rdi), %xmm6
1971 ; SSE-NEXT: movdqa 80(%rdi), %xmm13
1972 ; SSE-NEXT: movdqa 96(%rdi), %xmm2
1973 ; SSE-NEXT: movdqa 112(%rdi), %xmm1
1974 ; SSE-NEXT: movdqa {{.*#+}} xmm9 = [255,0,255,0,255,0,255,0]
1975 ; SSE-NEXT: movdqa %xmm1, %xmm0
1976 ; SSE-NEXT: movdqa %xmm1, %xmm3
1977 ; SSE-NEXT: pand %xmm9, %xmm0
1978 ; SSE-NEXT: movdqa %xmm2, %xmm1
1979 ; SSE-NEXT: movdqa %xmm2, %xmm5
1980 ; SSE-NEXT: pand %xmm9, %xmm1
1981 ; SSE-NEXT: packuswb %xmm0, %xmm1
1982 ; SSE-NEXT: movdqa %xmm13, %xmm0
1983 ; SSE-NEXT: pand %xmm9, %xmm0
1984 ; SSE-NEXT: movdqa %xmm6, %xmm2
1985 ; SSE-NEXT: pand %xmm9, %xmm2
1986 ; SSE-NEXT: packuswb %xmm0, %xmm2
1987 ; SSE-NEXT: packuswb %xmm1, %xmm2
1988 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1989 ; SSE-NEXT: movdqa %xmm12, %xmm0
1990 ; SSE-NEXT: pand %xmm9, %xmm0
1991 ; SSE-NEXT: movdqa %xmm10, %xmm1
1992 ; SSE-NEXT: pand %xmm9, %xmm1
1993 ; SSE-NEXT: packuswb %xmm0, %xmm1
1994 ; SSE-NEXT: movdqa %xmm7, %xmm0
1995 ; SSE-NEXT: pand %xmm9, %xmm0
1996 ; SSE-NEXT: movdqa %xmm4, %xmm2
1997 ; SSE-NEXT: pand %xmm9, %xmm2
1998 ; SSE-NEXT: packuswb %xmm0, %xmm2
1999 ; SSE-NEXT: packuswb %xmm1, %xmm2
2000 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2001 ; SSE-NEXT: movdqa %xmm14, %xmm0
2002 ; SSE-NEXT: pand %xmm9, %xmm0
2003 ; SSE-NEXT: movdqa %xmm15, %xmm1
2004 ; SSE-NEXT: pand %xmm9, %xmm1
2005 ; SSE-NEXT: packuswb %xmm0, %xmm1
2006 ; SSE-NEXT: movdqa %xmm8, %xmm0
2007 ; SSE-NEXT: pand %xmm9, %xmm0
2008 ; SSE-NEXT: movdqa (%rdi), %xmm11
2009 ; SSE-NEXT: movdqa %xmm11, %xmm2
2010 ; SSE-NEXT: pand %xmm9, %xmm2
2011 ; SSE-NEXT: packuswb %xmm0, %xmm2
2012 ; SSE-NEXT: packuswb %xmm1, %xmm2
2013 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2014 ; SSE-NEXT: movdqa 240(%rdi), %xmm0
2015 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2016 ; SSE-NEXT: pand %xmm9, %xmm0
2017 ; SSE-NEXT: movdqa 224(%rdi), %xmm1
2018 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2019 ; SSE-NEXT: pand %xmm9, %xmm1
2020 ; SSE-NEXT: packuswb %xmm0, %xmm1
2021 ; SSE-NEXT: movdqa 208(%rdi), %xmm0
2022 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2023 ; SSE-NEXT: pand %xmm9, %xmm0
2024 ; SSE-NEXT: movdqa 192(%rdi), %xmm2
2025 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2026 ; SSE-NEXT: pand %xmm2, %xmm9
2027 ; SSE-NEXT: packuswb %xmm0, %xmm9
2028 ; SSE-NEXT: packuswb %xmm1, %xmm9
2029 ; SSE-NEXT: movdqa %xmm3, %xmm1
2030 ; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2031 ; SSE-NEXT: movdqa %xmm3, %xmm0
2032 ; SSE-NEXT: pxor %xmm8, %xmm8
2033 ; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm8[8],xmm0[9],xmm8[9],xmm0[10],xmm8[10],xmm0[11],xmm8[11],xmm0[12],xmm8[12],xmm0[13],xmm8[13],xmm0[14],xmm8[14],xmm0[15],xmm8[15]
2034 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2035 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
2036 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,1,3,4,5,6,7]
2037 ; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm8[0],xmm1[1],xmm8[1],xmm1[2],xmm8[2],xmm1[3],xmm8[3],xmm1[4],xmm8[4],xmm1[5],xmm8[5],xmm1[6],xmm8[6],xmm1[7],xmm8[7]
2038 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2039 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[0,2,2,3]
2040 ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,1,1,3,4,5,6,7]
2041 ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1]
2042 ; SSE-NEXT: movdqa %xmm5, %xmm1
2043 ; SSE-NEXT: movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2044 ; SSE-NEXT: movdqa %xmm5, %xmm0
2045 ; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm8[8],xmm0[9],xmm8[9],xmm0[10],xmm8[10],xmm0[11],xmm8[11],xmm0[12],xmm8[12],xmm0[13],xmm8[13],xmm0[14],xmm8[14],xmm0[15],xmm8[15]
2046 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2047 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
2048 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,3,2,3,4,5,6,7]
2049 ; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm8[0],xmm1[1],xmm8[1],xmm1[2],xmm8[2],xmm1[3],xmm8[3],xmm1[4],xmm8[4],xmm1[5],xmm8[5],xmm1[6],xmm8[6],xmm1[7],xmm8[7]
2050 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2051 ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm1[0,2,2,3]
2052 ; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[1,3,2,3,4,5,6,7]
2053 ; SSE-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1]
2054 ; SSE-NEXT: packuswb %xmm3, %xmm5
2055 ; SSE-NEXT: movdqa %xmm13, %xmm1
2056 ; SSE-NEXT: movdqa %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2057 ; SSE-NEXT: movdqa %xmm13, %xmm0
2058 ; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm8[8],xmm0[9],xmm8[9],xmm0[10],xmm8[10],xmm0[11],xmm8[11],xmm0[12],xmm8[12],xmm0[13],xmm8[13],xmm0[14],xmm8[14],xmm0[15],xmm8[15]
2059 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2060 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
2061 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,1,3,4,5,6,7]
2062 ; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm8[0],xmm1[1],xmm8[1],xmm1[2],xmm8[2],xmm1[3],xmm8[3],xmm1[4],xmm8[4],xmm1[5],xmm8[5],xmm1[6],xmm8[6],xmm1[7],xmm8[7]
2063 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2064 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[0,2,2,3]
2065 ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,1,1,3,4,5,6,7]
2066 ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1]
2067 ; SSE-NEXT: movdqa %xmm6, %xmm1
2068 ; SSE-NEXT: movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2069 ; SSE-NEXT: movdqa %xmm6, %xmm0
2070 ; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm8[8],xmm0[9],xmm8[9],xmm0[10],xmm8[10],xmm0[11],xmm8[11],xmm0[12],xmm8[12],xmm0[13],xmm8[13],xmm0[14],xmm8[14],xmm0[15],xmm8[15]
2071 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2072 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
2073 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,3,2,3,4,5,6,7]
2074 ; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm8[0],xmm1[1],xmm8[1],xmm1[2],xmm8[2],xmm1[3],xmm8[3],xmm1[4],xmm8[4],xmm1[5],xmm8[5],xmm1[6],xmm8[6],xmm1[7],xmm8[7]
2075 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2076 ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm1[0,2,2,3]
2077 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm6[1,3,2,3,4,5,6,7]
2078 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
2079 ; SSE-NEXT: packuswb %xmm3, %xmm1
2080 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm5[0,3]
2081 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2082 ; SSE-NEXT: movdqa %xmm12, %xmm1
2083 ; SSE-NEXT: movdqa %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2084 ; SSE-NEXT: movdqa %xmm12, %xmm0
2085 ; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm8[8],xmm0[9],xmm8[9],xmm0[10],xmm8[10],xmm0[11],xmm8[11],xmm0[12],xmm8[12],xmm0[13],xmm8[13],xmm0[14],xmm8[14],xmm0[15],xmm8[15]
2086 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2087 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
2088 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,1,3,4,5,6,7]
2089 ; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm8[0],xmm1[1],xmm8[1],xmm1[2],xmm8[2],xmm1[3],xmm8[3],xmm1[4],xmm8[4],xmm1[5],xmm8[5],xmm1[6],xmm8[6],xmm1[7],xmm8[7]
2090 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2091 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[0,2,2,3]
2092 ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,1,1,3,4,5,6,7]
2093 ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1]
2094 ; SSE-NEXT: movdqa %xmm10, %xmm1
2095 ; SSE-NEXT: movdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2096 ; SSE-NEXT: movdqa %xmm10, %xmm0
2097 ; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm8[8],xmm0[9],xmm8[9],xmm0[10],xmm8[10],xmm0[11],xmm8[11],xmm0[12],xmm8[12],xmm0[13],xmm8[13],xmm0[14],xmm8[14],xmm0[15],xmm8[15]
2098 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2099 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
2100 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,3,2,3,4,5,6,7]
2101 ; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm8[0],xmm1[1],xmm8[1],xmm1[2],xmm8[2],xmm1[3],xmm8[3],xmm1[4],xmm8[4],xmm1[5],xmm8[5],xmm1[6],xmm8[6],xmm1[7],xmm8[7]
2102 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2103 ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm1[0,2,2,3]
2104 ; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm5[1,3,2,3,4,5,6,7]
2105 ; SSE-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm0[0],xmm6[1],xmm0[1]
2106 ; SSE-NEXT: packuswb %xmm3, %xmm6
2107 ; SSE-NEXT: movdqa %xmm7, (%rsp) # 16-byte Spill
2108 ; SSE-NEXT: movdqa %xmm7, %xmm0
2109 ; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm8[8],xmm0[9],xmm8[9],xmm0[10],xmm8[10],xmm0[11],xmm8[11],xmm0[12],xmm8[12],xmm0[13],xmm8[13],xmm0[14],xmm8[14],xmm0[15],xmm8[15]
2110 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2111 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
2112 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,1,3,4,5,6,7]
2113 ; SSE-NEXT: movdqa %xmm7, %xmm1
2114 ; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm8[0],xmm1[1],xmm8[1],xmm1[2],xmm8[2],xmm1[3],xmm8[3],xmm1[4],xmm8[4],xmm1[5],xmm8[5],xmm1[6],xmm8[6],xmm1[7],xmm8[7]
2115 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2116 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[0,2,2,3]
2117 ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,1,1,3,4,5,6,7]
2118 ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1]
2119 ; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2120 ; SSE-NEXT: movdqa %xmm4, %xmm0
2121 ; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm8[8],xmm0[9],xmm8[9],xmm0[10],xmm8[10],xmm0[11],xmm8[11],xmm0[12],xmm8[12],xmm0[13],xmm8[13],xmm0[14],xmm8[14],xmm0[15],xmm8[15]
2122 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2123 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
2124 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,3,2,3,4,5,6,7]
2125 ; SSE-NEXT: movdqa %xmm4, %xmm1
2126 ; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm8[0],xmm1[1],xmm8[1],xmm1[2],xmm8[2],xmm1[3],xmm8[3],xmm1[4],xmm8[4],xmm1[5],xmm8[5],xmm1[6],xmm8[6],xmm1[7],xmm8[7]
2127 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2128 ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm1[0,2,2,3]
2129 ; SSE-NEXT: pshuflw {{.*#+}} xmm10 = xmm5[1,3,2,3,4,5,6,7]
2130 ; SSE-NEXT: punpckldq {{.*#+}} xmm10 = xmm10[0],xmm0[0],xmm10[1],xmm0[1]
2131 ; SSE-NEXT: packuswb %xmm3, %xmm10
2132 ; SSE-NEXT: shufps {{.*#+}} xmm10 = xmm10[0,3],xmm6[0,3]
2133 ; SSE-NEXT: movdqa %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2134 ; SSE-NEXT: movdqa %xmm14, %xmm0
2135 ; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm8[8],xmm0[9],xmm8[9],xmm0[10],xmm8[10],xmm0[11],xmm8[11],xmm0[12],xmm8[12],xmm0[13],xmm8[13],xmm0[14],xmm8[14],xmm0[15],xmm8[15]
2136 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2137 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
2138 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,1,3,4,5,6,7]
2139 ; SSE-NEXT: punpcklbw {{.*#+}} xmm14 = xmm14[0],xmm8[0],xmm14[1],xmm8[1],xmm14[2],xmm8[2],xmm14[3],xmm8[3],xmm14[4],xmm8[4],xmm14[5],xmm8[5],xmm14[6],xmm8[6],xmm14[7],xmm8[7]
2140 ; SSE-NEXT: movdqa %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2141 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm14[0,2,2,3]
2142 ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,1,1,3,4,5,6,7]
2143 ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1]
2144 ; SSE-NEXT: movdqa %xmm15, %xmm0
2145 ; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm8[8],xmm0[9],xmm8[9],xmm0[10],xmm8[10],xmm0[11],xmm8[11],xmm0[12],xmm8[12],xmm0[13],xmm8[13],xmm0[14],xmm8[14],xmm0[15],xmm8[15]
2146 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2147 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
2148 ; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm0[1,3,2,3,4,5,6,7]
2149 ; SSE-NEXT: movdqa %xmm15, %xmm12
2150 ; SSE-NEXT: movdqa %xmm15, %xmm14
2151 ; SSE-NEXT: punpcklbw {{.*#+}} xmm12 = xmm12[0],xmm8[0],xmm12[1],xmm8[1],xmm12[2],xmm8[2],xmm12[3],xmm8[3],xmm12[4],xmm8[4],xmm12[5],xmm8[5],xmm12[6],xmm8[6],xmm12[7],xmm8[7]
2152 ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm12[0,2,2,3]
2153 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm6[1,3,2,3,4,5,6,7]
2154 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm7[0],xmm0[1],xmm7[1]
2155 ; SSE-NEXT: packuswb %xmm3, %xmm0
2156 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
2157 ; SSE-NEXT: movdqa %xmm15, %xmm13
2158 ; SSE-NEXT: punpckhbw {{.*#+}} xmm13 = xmm13[8],xmm8[8],xmm13[9],xmm8[9],xmm13[10],xmm8[10],xmm13[11],xmm8[11],xmm13[12],xmm8[12],xmm13[13],xmm8[13],xmm13[14],xmm8[14],xmm13[15],xmm8[15]
2159 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm13[0,2,2,3]
2160 ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,1,1,3,4,5,6,7]
2161 ; SSE-NEXT: punpcklbw {{.*#+}} xmm15 = xmm15[0],xmm8[0],xmm15[1],xmm8[1],xmm15[2],xmm8[2],xmm15[3],xmm8[3],xmm15[4],xmm8[4],xmm15[5],xmm8[5],xmm15[6],xmm8[6],xmm15[7],xmm8[7]
2162 ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm15[0,2,2,3]
2163 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm6[0,1,1,3,4,5,6,7]
2164 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
2165 ; SSE-NEXT: movdqa %xmm11, %xmm7
2166 ; SSE-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm8[8],xmm7[9],xmm8[9],xmm7[10],xmm8[10],xmm7[11],xmm8[11],xmm7[12],xmm8[12],xmm7[13],xmm8[13],xmm7[14],xmm8[14],xmm7[15],xmm8[15]
2167 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm7[0,2,2,3]
2168 ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[1,3,2,3,4,5,6,7]
2169 ; SSE-NEXT: movdqa %xmm11, %xmm6
2170 ; SSE-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm8[0],xmm6[1],xmm8[1],xmm6[2],xmm8[2],xmm6[3],xmm8[3],xmm6[4],xmm8[4],xmm6[5],xmm8[5],xmm6[6],xmm8[6],xmm6[7],xmm8[7]
2171 ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm6[0,2,2,3]
2172 ; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[1,3,2,3,4,5,6,7]
2173 ; SSE-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm3[0],xmm5[1],xmm3[1]
2174 ; SSE-NEXT: packuswb %xmm1, %xmm5
2175 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,3],xmm0[0,3]
2176 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
2177 ; SSE-NEXT: movdqa %xmm1, %xmm0
2178 ; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm8[8],xmm0[9],xmm8[9],xmm0[10],xmm8[10],xmm0[11],xmm8[11],xmm0[12],xmm8[12],xmm0[13],xmm8[13],xmm0[14],xmm8[14],xmm0[15],xmm8[15]
2179 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2180 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
2181 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,1,3,4,5,6,7]
2182 ; SSE-NEXT: movdqa %xmm1, %xmm3
2183 ; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm8[0],xmm3[1],xmm8[1],xmm3[2],xmm8[2],xmm3[3],xmm8[3],xmm3[4],xmm8[4],xmm3[5],xmm8[5],xmm3[6],xmm8[6],xmm3[7],xmm8[7]
2184 ; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2185 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm3[0,2,2,3]
2186 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,1,3,4,5,6,7]
2187 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
2188 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
2189 ; SSE-NEXT: movdqa %xmm2, %xmm0
2190 ; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm8[8],xmm0[9],xmm8[9],xmm0[10],xmm8[10],xmm0[11],xmm8[11],xmm0[12],xmm8[12],xmm0[13],xmm8[13],xmm0[14],xmm8[14],xmm0[15],xmm8[15]
2191 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2192 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
2193 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,3,2,3,4,5,6,7]
2194 ; SSE-NEXT: movdqa %xmm2, %xmm4
2195 ; SSE-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm8[0],xmm4[1],xmm8[1],xmm4[2],xmm8[2],xmm4[3],xmm8[3],xmm4[4],xmm8[4],xmm4[5],xmm8[5],xmm4[6],xmm8[6],xmm4[7],xmm8[7]
2196 ; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2197 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm4[0,2,2,3]
2198 ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[1,3,2,3,4,5,6,7]
2199 ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1]
2200 ; SSE-NEXT: packuswb %xmm1, %xmm3
2201 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
2202 ; SSE-NEXT: movdqa %xmm1, %xmm0
2203 ; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm8[8],xmm0[9],xmm8[9],xmm0[10],xmm8[10],xmm0[11],xmm8[11],xmm0[12],xmm8[12],xmm0[13],xmm8[13],xmm0[14],xmm8[14],xmm0[15],xmm8[15]
2204 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2205 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
2206 ; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm0[0,1,1,3,4,5,6,7]
2207 ; SSE-NEXT: movdqa %xmm1, %xmm0
2208 ; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm8[0],xmm0[1],xmm8[1],xmm0[2],xmm8[2],xmm0[3],xmm8[3],xmm0[4],xmm8[4],xmm0[5],xmm8[5],xmm0[6],xmm8[6],xmm0[7],xmm8[7]
2209 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2210 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,2,2,3]
2211 ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[0,1,1,3,4,5,6,7]
2212 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1]
2213 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2214 ; SSE-NEXT: movdqa %xmm0, %xmm4
2215 ; SSE-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm8[8],xmm4[9],xmm8[9],xmm4[10],xmm8[10],xmm4[11],xmm8[11],xmm4[12],xmm8[12],xmm4[13],xmm8[13],xmm4[14],xmm8[14],xmm4[15],xmm8[15]
2216 ; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2217 ; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm8[0],xmm0[1],xmm8[1],xmm0[2],xmm8[2],xmm0[3],xmm8[3],xmm0[4],xmm8[4],xmm0[5],xmm8[5],xmm0[6],xmm8[6],xmm0[7],xmm8[7]
2218 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2219 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm4[0,2,2,3]
2220 ; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm1[1,3,2,3,4,5,6,7]
2221 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,2,2,3]
2222 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[1,3,2,3,4,5,6,7]
2223 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1]
2224 ; SSE-NEXT: packuswb %xmm2, %xmm1
2225 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm3[0,3]
2226 ; SSE-NEXT: pshuflw $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
2227 ; SSE-NEXT: # xmm0 = mem[3,1,2,3,4,5,6,7]
2228 ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm14[3,1,2,3,4,5,6,7]
2229 ; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm0[0,1,2,3,7,5,6,7]
2230 ; SSE-NEXT: movdqa {{.*#+}} xmm0 = [255,255,255,255,255,255,255,255]
2231 ; SSE-NEXT: pand %xmm0, %xmm3
2232 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,1,2,0]
2233 ; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,7,6,5,4]
2234 ; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,5,6,7]
2235 ; SSE-NEXT: pand %xmm0, %xmm2
2236 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
2237 ; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm2[1,0,3,2,4,5,6,7]
2238 ; SSE-NEXT: packuswb %xmm3, %xmm4
2239 ; SSE-NEXT: pshuflw $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
2240 ; SSE-NEXT: # xmm2 = mem[3,1,2,3,4,5,6,7]
2241 ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm11[3,1,2,3,4,5,6,7]
2242 ; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,5,6,7]
2243 ; SSE-NEXT: pand %xmm0, %xmm2
2244 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,2,0]
2245 ; SSE-NEXT: pshufhw {{.*#+}} xmm8 = xmm2[0,1,2,3,7,6,5,4]
2246 ; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm3[0,1,2,3,7,5,6,7]
2247 ; SSE-NEXT: pand %xmm0, %xmm2
2248 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
2249 ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[1,0,3,2,4,5,6,7]
2250 ; SSE-NEXT: packuswb %xmm8, %xmm2
2251 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,3],xmm4[0,3]
2252 ; SSE-NEXT: pshuflw $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
2253 ; SSE-NEXT: # xmm3 = mem[3,1,2,3,4,5,6,7]
2254 ; SSE-NEXT: pshuflw $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
2255 ; SSE-NEXT: # xmm4 = mem[3,1,2,3,4,5,6,7]
2256 ; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,7,5,6,7]
2257 ; SSE-NEXT: pand %xmm0, %xmm3
2258 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,1,2,0]
2259 ; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,7,6,5,4]
2260 ; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,7,5,6,7]
2261 ; SSE-NEXT: pand %xmm0, %xmm4
2262 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,2,2,3]
2263 ; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[1,0,3,2,4,5,6,7]
2264 ; SSE-NEXT: packuswb %xmm3, %xmm4
2265 ; SSE-NEXT: pshuflw $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
2266 ; SSE-NEXT: # xmm3 = mem[3,1,2,3,4,5,6,7]
2267 ; SSE-NEXT: pshuflw $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Folded Reload
2268 ; SSE-NEXT: # xmm8 = mem[3,1,2,3,4,5,6,7]
2269 ; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,7,5,6,7]
2270 ; SSE-NEXT: pand %xmm0, %xmm3
2271 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,1,2,0]
2272 ; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,7,6,5,4]
2273 ; SSE-NEXT: pshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,7,5,6,7]
2274 ; SSE-NEXT: pand %xmm0, %xmm8
2275 ; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm8[0,2,2,3]
2276 ; SSE-NEXT: pshuflw {{.*#+}} xmm11 = xmm8[1,0,3,2,4,5,6,7]
2277 ; SSE-NEXT: packuswb %xmm3, %xmm11
2278 ; SSE-NEXT: shufps {{.*#+}} xmm11 = xmm11[0,3],xmm4[0,3]
2279 ; SSE-NEXT: pshuflw $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
2280 ; SSE-NEXT: # xmm3 = mem[3,1,2,3,4,5,6,7]
2281 ; SSE-NEXT: pshuflw $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
2282 ; SSE-NEXT: # xmm4 = mem[3,1,2,3,4,5,6,7]
2283 ; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,7,5,6,7]
2284 ; SSE-NEXT: pand %xmm0, %xmm3
2285 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,1,2,0]
2286 ; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,7,6,5,4]
2287 ; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,7,5,6,7]
2288 ; SSE-NEXT: pand %xmm0, %xmm4
2289 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,2,2,3]
2290 ; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[1,0,3,2,4,5,6,7]
2291 ; SSE-NEXT: packuswb %xmm3, %xmm4
2292 ; SSE-NEXT: pshuflw $231, (%rsp), %xmm3 # 16-byte Folded Reload
2293 ; SSE-NEXT: # xmm3 = mem[3,1,2,3,4,5,6,7]
2294 ; SSE-NEXT: pshuflw $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Folded Reload
2295 ; SSE-NEXT: # xmm8 = mem[3,1,2,3,4,5,6,7]
2296 ; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,7,5,6,7]
2297 ; SSE-NEXT: pand %xmm0, %xmm3
2298 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,1,2,0]
2299 ; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,7,6,5,4]
2300 ; SSE-NEXT: pshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,7,5,6,7]
2301 ; SSE-NEXT: pand %xmm0, %xmm8
2302 ; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm8[0,2,2,3]
2303 ; SSE-NEXT: pshuflw {{.*#+}} xmm14 = xmm8[1,0,3,2,4,5,6,7]
2304 ; SSE-NEXT: packuswb %xmm3, %xmm14
2305 ; SSE-NEXT: shufps {{.*#+}} xmm14 = xmm14[0,3],xmm4[0,3]
2306 ; SSE-NEXT: pshuflw $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
2307 ; SSE-NEXT: # xmm3 = mem[3,1,2,3,4,5,6,7]
2308 ; SSE-NEXT: pshuflw $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
2309 ; SSE-NEXT: # xmm4 = mem[3,1,2,3,4,5,6,7]
2310 ; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,7,5,6,7]
2311 ; SSE-NEXT: pand %xmm0, %xmm3
2312 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,1,2,0]
2313 ; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,7,6,5,4]
2314 ; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,7,5,6,7]
2315 ; SSE-NEXT: pand %xmm0, %xmm4
2316 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,2,2,3]
2317 ; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[1,0,3,2,4,5,6,7]
2318 ; SSE-NEXT: packuswb %xmm3, %xmm4
2319 ; SSE-NEXT: pshuflw $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
2320 ; SSE-NEXT: # xmm3 = mem[3,1,2,3,4,5,6,7]
2321 ; SSE-NEXT: pshuflw $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Folded Reload
2322 ; SSE-NEXT: # xmm8 = mem[3,1,2,3,4,5,6,7]
2323 ; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,7,5,6,7]
2324 ; SSE-NEXT: pand %xmm0, %xmm3
2325 ; SSE-NEXT: pshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,7,5,6,7]
2326 ; SSE-NEXT: pand %xmm0, %xmm8
2327 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm3[0,1,2,0]
2328 ; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm0[0,1,2,3,7,6,5,4]
2329 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm8[0,2,2,3]
2330 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,0,3,2,4,5,6,7]
2331 ; SSE-NEXT: packuswb %xmm3, %xmm0
2332 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,3],xmm4[0,3]
2333 ; SSE-NEXT: pshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
2334 ; SSE-NEXT: # xmm3 = mem[3,1,2,3]
2335 ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,1,3,1,4,5,6,7]
2336 ; SSE-NEXT: pshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
2337 ; SSE-NEXT: # xmm4 = mem[3,1,2,3]
2338 ; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[0,1,3,1,4,5,6,7]
2339 ; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
2340 ; SSE-NEXT: pshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
2341 ; SSE-NEXT: # xmm3 = mem[3,1,2,3]
2342 ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[3,1,2,3,4,5,6,7]
2343 ; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm12[3,1,2,3]
2344 ; SSE-NEXT: pshuflw {{.*#+}} xmm8 = xmm8[3,1,2,3,4,5,6,7]
2345 ; SSE-NEXT: punpckldq {{.*#+}} xmm8 = xmm8[0],xmm3[0],xmm8[1],xmm3[1]
2346 ; SSE-NEXT: packuswb %xmm4, %xmm8
2347 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm13[3,1,2,3]
2348 ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,1,3,1,4,5,6,7]
2349 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm15[3,1,2,3]
2350 ; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[0,1,3,1,4,5,6,7]
2351 ; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
2352 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm7[3,1,2,3]
2353 ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[3,1,2,3,4,5,6,7]
2354 ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[3,1,2,3]
2355 ; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[3,1,2,3,4,5,6,7]
2356 ; SSE-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm3[0],xmm6[1],xmm3[1]
2357 ; SSE-NEXT: packuswb %xmm4, %xmm6
2358 ; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,3],xmm8[0,3]
2359 ; SSE-NEXT: pshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
2360 ; SSE-NEXT: # xmm3 = mem[3,1,2,3]
2361 ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,1,3,1,4,5,6,7]
2362 ; SSE-NEXT: pshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
2363 ; SSE-NEXT: # xmm4 = mem[3,1,2,3]
2364 ; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[0,1,3,1,4,5,6,7]
2365 ; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
2366 ; SSE-NEXT: pshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
2367 ; SSE-NEXT: # xmm3 = mem[3,1,2,3]
2368 ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[3,1,2,3,4,5,6,7]
2369 ; SSE-NEXT: pshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload
2370 ; SSE-NEXT: # xmm7 = mem[3,1,2,3]
2371 ; SSE-NEXT: pshuflw {{.*#+}} xmm8 = xmm7[3,1,2,3,4,5,6,7]
2372 ; SSE-NEXT: punpckldq {{.*#+}} xmm8 = xmm8[0],xmm3[0],xmm8[1],xmm3[1]
2373 ; SSE-NEXT: packuswb %xmm4, %xmm8
2374 ; SSE-NEXT: pshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
2375 ; SSE-NEXT: # xmm3 = mem[3,1,2,3]
2376 ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,1,3,1,4,5,6,7]
2377 ; SSE-NEXT: pshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
2378 ; SSE-NEXT: # xmm4 = mem[3,1,2,3]
2379 ; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[0,1,3,1,4,5,6,7]
2380 ; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
2381 ; SSE-NEXT: pshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
2382 ; SSE-NEXT: # xmm3 = mem[3,1,2,3]
2383 ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[3,1,2,3,4,5,6,7]
2384 ; SSE-NEXT: pshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload
2385 ; SSE-NEXT: # xmm7 = mem[3,1,2,3]
2386 ; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm7[3,1,2,3,4,5,6,7]
2387 ; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm3[0],xmm7[1],xmm3[1]
2388 ; SSE-NEXT: packuswb %xmm4, %xmm7
2389 ; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,3],xmm8[0,3]
2390 ; SSE-NEXT: pshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
2391 ; SSE-NEXT: # xmm3 = mem[3,1,2,3]
2392 ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,1,3,1,4,5,6,7]
2393 ; SSE-NEXT: pshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
2394 ; SSE-NEXT: # xmm4 = mem[3,1,2,3]
2395 ; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[0,1,3,1,4,5,6,7]
2396 ; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
2397 ; SSE-NEXT: pshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
2398 ; SSE-NEXT: # xmm3 = mem[3,1,2,3]
2399 ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[3,1,2,3,4,5,6,7]
2400 ; SSE-NEXT: pshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Folded Reload
2401 ; SSE-NEXT: # xmm8 = mem[3,1,2,3]
2402 ; SSE-NEXT: pshuflw {{.*#+}} xmm12 = xmm8[3,1,2,3,4,5,6,7]
2403 ; SSE-NEXT: punpckldq {{.*#+}} xmm12 = xmm12[0],xmm3[0],xmm12[1],xmm3[1]
2404 ; SSE-NEXT: packuswb %xmm4, %xmm12
2405 ; SSE-NEXT: pshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
2406 ; SSE-NEXT: # xmm3 = mem[3,1,2,3]
2407 ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,1,3,1,4,5,6,7]
2408 ; SSE-NEXT: pshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
2409 ; SSE-NEXT: # xmm4 = mem[3,1,2,3]
2410 ; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[0,1,3,1,4,5,6,7]
2411 ; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
2412 ; SSE-NEXT: pshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
2413 ; SSE-NEXT: # xmm3 = mem[3,1,2,3]
2414 ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[3,1,2,3,4,5,6,7]
2415 ; SSE-NEXT: pshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Folded Reload
2416 ; SSE-NEXT: # xmm8 = mem[3,1,2,3]
2417 ; SSE-NEXT: pshuflw {{.*#+}} xmm8 = xmm8[3,1,2,3,4,5,6,7]
2418 ; SSE-NEXT: punpckldq {{.*#+}} xmm8 = xmm8[0],xmm3[0],xmm8[1],xmm3[1]
2419 ; SSE-NEXT: packuswb %xmm4, %xmm8
2420 ; SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[0,3],xmm12[0,3]
2421 ; SSE-NEXT: pshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
2422 ; SSE-NEXT: # xmm3 = mem[3,1,2,3]
2423 ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,1,3,1,4,5,6,7]
2424 ; SSE-NEXT: pshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
2425 ; SSE-NEXT: # xmm4 = mem[3,1,2,3]
2426 ; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[0,1,3,1,4,5,6,7]
2427 ; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
2428 ; SSE-NEXT: pshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
2429 ; SSE-NEXT: # xmm3 = mem[3,1,2,3]
2430 ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[3,1,2,3,4,5,6,7]
2431 ; SSE-NEXT: pshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Folded Reload
2432 ; SSE-NEXT: # xmm12 = mem[3,1,2,3]
2433 ; SSE-NEXT: pshuflw {{.*#+}} xmm12 = xmm12[3,1,2,3,4,5,6,7]
2434 ; SSE-NEXT: punpckldq {{.*#+}} xmm12 = xmm12[0],xmm3[0],xmm12[1],xmm3[1]
2435 ; SSE-NEXT: packuswb %xmm4, %xmm12
2436 ; SSE-NEXT: pshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
2437 ; SSE-NEXT: # xmm3 = mem[3,1,2,3]
2438 ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,1,3,1,4,5,6,7]
2439 ; SSE-NEXT: pshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
2440 ; SSE-NEXT: # xmm4 = mem[3,1,2,3]
2441 ; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[0,1,3,1,4,5,6,7]
2442 ; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
2443 ; SSE-NEXT: pshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
2444 ; SSE-NEXT: # xmm3 = mem[3,1,2,3]
2445 ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[3,1,2,3,4,5,6,7]
2446 ; SSE-NEXT: pshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Folded Reload
2447 ; SSE-NEXT: # xmm13 = mem[3,1,2,3]
2448 ; SSE-NEXT: pshuflw {{.*#+}} xmm13 = xmm13[3,1,2,3,4,5,6,7]
2449 ; SSE-NEXT: punpckldq {{.*#+}} xmm13 = xmm13[0],xmm3[0],xmm13[1],xmm3[1]
2450 ; SSE-NEXT: packuswb %xmm4, %xmm13
2451 ; SSE-NEXT: shufps {{.*#+}} xmm13 = xmm13[0,3],xmm12[0,3]
2452 ; SSE-NEXT: movdqa %xmm9, 48(%rsi)
2453 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
2454 ; SSE-NEXT: movaps %xmm3, (%rsi)
2455 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
2456 ; SSE-NEXT: movaps %xmm3, 32(%rsi)
2457 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
2458 ; SSE-NEXT: movaps %xmm3, 16(%rsi)
2459 ; SSE-NEXT: movaps %xmm1, 48(%rdx)
2460 ; SSE-NEXT: movaps %xmm5, (%rdx)
2461 ; SSE-NEXT: movaps %xmm10, 32(%rdx)
2462 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
2463 ; SSE-NEXT: movaps %xmm1, 16(%rdx)
2464 ; SSE-NEXT: movaps %xmm0, 48(%rcx)
2465 ; SSE-NEXT: movaps %xmm14, 32(%rcx)
2466 ; SSE-NEXT: movaps %xmm11, 16(%rcx)
2467 ; SSE-NEXT: movaps %xmm2, (%rcx)
2468 ; SSE-NEXT: movaps %xmm13, 48(%r8)
2469 ; SSE-NEXT: movaps %xmm8, 32(%r8)
2470 ; SSE-NEXT: movaps %xmm7, 16(%r8)
2471 ; SSE-NEXT: movaps %xmm6, (%r8)
2472 ; SSE-NEXT: addq $600, %rsp # imm = 0x258
2475 ; AVX-LABEL: load_i8_stride4_vf64:
2477 ; AVX-NEXT: subq $328, %rsp # imm = 0x148
2478 ; AVX-NEXT: vbroadcastss {{.*#+}} xmm1 = [0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12]
2479 ; AVX-NEXT: vmovdqa 112(%rdi), %xmm2
2480 ; AVX-NEXT: vpshufb %xmm1, %xmm2, %xmm0
2481 ; AVX-NEXT: vmovdqa %xmm2, %xmm6
2482 ; AVX-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2483 ; AVX-NEXT: vmovdqa 96(%rdi), %xmm3
2484 ; AVX-NEXT: vpshufb %xmm1, %xmm3, %xmm2
2485 ; AVX-NEXT: vmovdqa %xmm3, %xmm8
2486 ; AVX-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
2487 ; AVX-NEXT: vmovd {{.*#+}} xmm3 = [0,4,8,12,0,0,0,0,0,0,0,0,0,0,0,0]
2488 ; AVX-NEXT: vmovdqa 80(%rdi), %xmm0
2489 ; AVX-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2490 ; AVX-NEXT: vpshufb %xmm3, %xmm0, %xmm5
2491 ; AVX-NEXT: vmovdqa 64(%rdi), %xmm0
2492 ; AVX-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2493 ; AVX-NEXT: vpshufb %xmm3, %xmm0, %xmm7
2494 ; AVX-NEXT: vpunpckldq {{.*#+}} xmm5 = xmm7[0],xmm5[0],xmm7[1],xmm5[1]
2495 ; AVX-NEXT: vpblendw {{.*#+}} xmm2 = xmm5[0,1,2,3],xmm2[4,5,6,7]
2496 ; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm9
2497 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
2498 ; AVX-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2499 ; AVX-NEXT: vmovdqa 16(%rdi), %xmm2
2500 ; AVX-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2501 ; AVX-NEXT: vmovdqa 32(%rdi), %xmm4
2502 ; AVX-NEXT: vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2503 ; AVX-NEXT: vmovdqa 48(%rdi), %xmm5
2504 ; AVX-NEXT: vmovdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2505 ; AVX-NEXT: vpshufb %xmm1, %xmm5, %xmm10
2506 ; AVX-NEXT: vpshufb %xmm1, %xmm4, %xmm11
2507 ; AVX-NEXT: vpunpckldq {{.*#+}} xmm10 = xmm11[0],xmm10[0],xmm11[1],xmm10[1]
2508 ; AVX-NEXT: vpshufb %xmm3, %xmm2, %xmm11
2509 ; AVX-NEXT: vpshufb %xmm3, %xmm0, %xmm12
2510 ; AVX-NEXT: vpunpckldq {{.*#+}} xmm11 = xmm12[0],xmm11[0],xmm12[1],xmm11[1]
2511 ; AVX-NEXT: vpblendw {{.*#+}} xmm10 = xmm11[0,1,2,3],xmm10[4,5,6,7]
2512 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm10[0,1,2,3],ymm9[4,5,6,7]
2513 ; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2514 ; AVX-NEXT: vmovdqa 240(%rdi), %xmm0
2515 ; AVX-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2516 ; AVX-NEXT: vpshufb %xmm1, %xmm0, %xmm11
2517 ; AVX-NEXT: vmovdqa 224(%rdi), %xmm0
2518 ; AVX-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2519 ; AVX-NEXT: vpshufb %xmm1, %xmm0, %xmm12
2520 ; AVX-NEXT: vpunpckldq {{.*#+}} xmm13 = xmm12[0],xmm11[0],xmm12[1],xmm11[1]
2521 ; AVX-NEXT: vmovdqa 208(%rdi), %xmm0
2522 ; AVX-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2523 ; AVX-NEXT: vpshufb %xmm3, %xmm0, %xmm14
2524 ; AVX-NEXT: vmovdqa 192(%rdi), %xmm0
2525 ; AVX-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2526 ; AVX-NEXT: vpshufb %xmm3, %xmm0, %xmm15
2527 ; AVX-NEXT: vpunpckldq {{.*#+}} xmm14 = xmm15[0],xmm14[0],xmm15[1],xmm14[1]
2528 ; AVX-NEXT: vpblendw {{.*#+}} xmm2 = xmm14[0,1,2,3],xmm13[4,5,6,7]
2529 ; AVX-NEXT: vmovdqa 176(%rdi), %xmm0
2530 ; AVX-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill
2531 ; AVX-NEXT: vpshufb %xmm1, %xmm0, %xmm15
2532 ; AVX-NEXT: vmovdqa 160(%rdi), %xmm0
2533 ; AVX-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2534 ; AVX-NEXT: vpshufb %xmm1, %xmm0, %xmm1
2535 ; AVX-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],xmm15[0],xmm1[1],xmm15[1]
2536 ; AVX-NEXT: vmovdqa 144(%rdi), %xmm0
2537 ; AVX-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2538 ; AVX-NEXT: vpshufb %xmm3, %xmm0, %xmm0
2539 ; AVX-NEXT: vmovdqa 128(%rdi), %xmm4
2540 ; AVX-NEXT: vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2541 ; AVX-NEXT: vpshufb %xmm3, %xmm4, %xmm3
2542 ; AVX-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm3[0],xmm0[0],xmm3[1],xmm0[1]
2543 ; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
2544 ; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm1
2545 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
2546 ; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2547 ; AVX-NEXT: vbroadcastss {{.*#+}} xmm0 = [1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13]
2548 ; AVX-NEXT: vpshufb %xmm0, %xmm6, %xmm1
2549 ; AVX-NEXT: vmovdqa %xmm8, %xmm4
2550 ; AVX-NEXT: vmovdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2551 ; AVX-NEXT: vpshufb %xmm0, %xmm8, %xmm2
2552 ; AVX-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
2553 ; AVX-NEXT: vmovd {{.*#+}} xmm2 = [1,5,9,13,0,0,0,0,0,0,0,0,0,0,0,0]
2554 ; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
2555 ; AVX-NEXT: vpshufb %xmm2, %xmm7, %xmm3
2556 ; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
2557 ; AVX-NEXT: vpshufb %xmm2, %xmm8, %xmm5
2558 ; AVX-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm5[0],xmm3[0],xmm5[1],xmm3[1]
2559 ; AVX-NEXT: vpblendw {{.*#+}} xmm1 = xmm3[0,1,2,3],xmm1[4,5,6,7]
2560 ; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
2561 ; AVX-NEXT: vpshufb %xmm0, %xmm12, %xmm3
2562 ; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
2563 ; AVX-NEXT: vpshufb %xmm0, %xmm11, %xmm5
2564 ; AVX-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm5[0],xmm3[0],xmm5[1],xmm3[1]
2565 ; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
2566 ; AVX-NEXT: vpshufb %xmm2, %xmm10, %xmm5
2567 ; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
2568 ; AVX-NEXT: vpshufb %xmm2, %xmm9, %xmm6
2569 ; AVX-NEXT: vpunpckldq {{.*#+}} xmm5 = xmm6[0],xmm5[0],xmm6[1],xmm5[1]
2570 ; AVX-NEXT: vpblendw {{.*#+}} xmm3 = xmm5[0,1,2,3],xmm3[4,5,6,7]
2571 ; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
2572 ; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm1[4,5,6,7]
2573 ; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2574 ; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
2575 ; AVX-NEXT: vpshufb %xmm0, %xmm13, %xmm1
2576 ; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
2577 ; AVX-NEXT: vpshufb %xmm0, %xmm14, %xmm3
2578 ; AVX-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
2579 ; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
2580 ; AVX-NEXT: vpshufb %xmm2, %xmm15, %xmm3
2581 ; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
2582 ; AVX-NEXT: vpshufb %xmm2, %xmm5, %xmm5
2583 ; AVX-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm5[0],xmm3[0],xmm5[1],xmm3[1]
2584 ; AVX-NEXT: vpblendw {{.*#+}} xmm1 = xmm3[0,1,2,3],xmm1[4,5,6,7]
2585 ; AVX-NEXT: vmovdqa (%rsp), %xmm3 # 16-byte Reload
2586 ; AVX-NEXT: vpshufb %xmm0, %xmm3, %xmm3
2587 ; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
2588 ; AVX-NEXT: vpshufb %xmm0, %xmm5, %xmm0
2589 ; AVX-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
2590 ; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
2591 ; AVX-NEXT: vpshufb %xmm2, %xmm3, %xmm3
2592 ; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
2593 ; AVX-NEXT: vpshufb %xmm2, %xmm5, %xmm2
2594 ; AVX-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
2595 ; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm2[0,1,2,3],xmm0[4,5,6,7]
2596 ; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
2597 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
2598 ; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2599 ; AVX-NEXT: vbroadcastss {{.*#+}} xmm0 = [2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14]
2600 ; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
2601 ; AVX-NEXT: vpshufb %xmm0, %xmm1, %xmm1
2602 ; AVX-NEXT: vpshufb %xmm0, %xmm4, %xmm2
2603 ; AVX-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
2604 ; AVX-NEXT: vmovd {{.*#+}} xmm2 = [2,6,10,14,0,0,0,0,0,0,0,0,0,0,0,0]
2605 ; AVX-NEXT: vpshufb %xmm2, %xmm7, %xmm3
2606 ; AVX-NEXT: vpshufb %xmm2, %xmm8, %xmm5
2607 ; AVX-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm5[0],xmm3[0],xmm5[1],xmm3[1]
2608 ; AVX-NEXT: vpblendw {{.*#+}} xmm1 = xmm3[0,1,2,3],xmm1[4,5,6,7]
2609 ; AVX-NEXT: vpshufb %xmm0, %xmm12, %xmm3
2610 ; AVX-NEXT: vpshufb %xmm0, %xmm11, %xmm5
2611 ; AVX-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm5[0],xmm3[0],xmm5[1],xmm3[1]
2612 ; AVX-NEXT: vpshufb %xmm2, %xmm10, %xmm5
2613 ; AVX-NEXT: vpshufb %xmm2, %xmm9, %xmm6
2614 ; AVX-NEXT: vpunpckldq {{.*#+}} xmm5 = xmm6[0],xmm5[0],xmm6[1],xmm5[1]
2615 ; AVX-NEXT: vpblendw {{.*#+}} xmm3 = xmm5[0,1,2,3],xmm3[4,5,6,7]
2616 ; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
2617 ; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm1[4,5,6,7]
2618 ; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2619 ; AVX-NEXT: vpshufb %xmm0, %xmm13, %xmm1
2620 ; AVX-NEXT: vpshufb %xmm0, %xmm14, %xmm3
2621 ; AVX-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
2622 ; AVX-NEXT: vpshufb %xmm2, %xmm15, %xmm3
2623 ; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
2624 ; AVX-NEXT: vpshufb %xmm2, %xmm4, %xmm5
2625 ; AVX-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm5[0],xmm3[0],xmm5[1],xmm3[1]
2626 ; AVX-NEXT: vpblendw {{.*#+}} xmm1 = xmm3[0,1,2,3],xmm1[4,5,6,7]
2627 ; AVX-NEXT: vmovdqa (%rsp), %xmm7 # 16-byte Reload
2628 ; AVX-NEXT: vpshufb %xmm0, %xmm7, %xmm3
2629 ; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
2630 ; AVX-NEXT: vpshufb %xmm0, %xmm8, %xmm0
2631 ; AVX-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
2632 ; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
2633 ; AVX-NEXT: vpshufb %xmm2, %xmm9, %xmm3
2634 ; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
2635 ; AVX-NEXT: vpshufb %xmm2, %xmm10, %xmm2
2636 ; AVX-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
2637 ; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm2[0,1,2,3],xmm0[4,5,6,7]
2638 ; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
2639 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
2640 ; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2641 ; AVX-NEXT: vbroadcastss {{.*#+}} xmm0 = [3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15]
2642 ; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
2643 ; AVX-NEXT: vpshufb %xmm0, %xmm1, %xmm1
2644 ; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
2645 ; AVX-NEXT: vpshufb %xmm0, %xmm2, %xmm2
2646 ; AVX-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
2647 ; AVX-NEXT: vmovd {{.*#+}} xmm2 = [3,7,11,15,0,0,0,0,0,0,0,0,0,0,0,0]
2648 ; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
2649 ; AVX-NEXT: vpshufb %xmm2, %xmm3, %xmm3
2650 ; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
2651 ; AVX-NEXT: vpshufb %xmm2, %xmm5, %xmm5
2652 ; AVX-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm5[0],xmm3[0],xmm5[1],xmm3[1]
2653 ; AVX-NEXT: vpblendw {{.*#+}} xmm1 = xmm3[0,1,2,3],xmm1[4,5,6,7]
2654 ; AVX-NEXT: vpshufb %xmm0, %xmm12, %xmm3
2655 ; AVX-NEXT: vpshufb %xmm0, %xmm11, %xmm5
2656 ; AVX-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm5[0],xmm3[0],xmm5[1],xmm3[1]
2657 ; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
2658 ; AVX-NEXT: vpshufb %xmm2, %xmm5, %xmm5
2659 ; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
2660 ; AVX-NEXT: vpshufb %xmm2, %xmm6, %xmm6
2661 ; AVX-NEXT: vpunpckldq {{.*#+}} xmm5 = xmm6[0],xmm5[0],xmm6[1],xmm5[1]
2662 ; AVX-NEXT: vpblendw {{.*#+}} xmm3 = xmm5[0,1,2,3],xmm3[4,5,6,7]
2663 ; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
2664 ; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm1[4,5,6,7]
2665 ; AVX-NEXT: vpshufb %xmm0, %xmm13, %xmm3
2666 ; AVX-NEXT: vpshufb %xmm0, %xmm14, %xmm5
2667 ; AVX-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm5[0],xmm3[0],xmm5[1],xmm3[1]
2668 ; AVX-NEXT: vpshufb %xmm2, %xmm15, %xmm5
2669 ; AVX-NEXT: vpshufb %xmm2, %xmm4, %xmm6
2670 ; AVX-NEXT: vpunpckldq {{.*#+}} xmm5 = xmm6[0],xmm5[0],xmm6[1],xmm5[1]
2671 ; AVX-NEXT: vpblendw {{.*#+}} xmm3 = xmm5[0,1,2,3],xmm3[4,5,6,7]
2672 ; AVX-NEXT: vpshufb %xmm0, %xmm7, %xmm5
2673 ; AVX-NEXT: vpshufb %xmm0, %xmm8, %xmm0
2674 ; AVX-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1]
2675 ; AVX-NEXT: vpshufb %xmm2, %xmm9, %xmm5
2676 ; AVX-NEXT: vpshufb %xmm2, %xmm10, %xmm2
2677 ; AVX-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1]
2678 ; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm2[0,1,2,3],xmm0[4,5,6,7]
2679 ; AVX-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm2
2680 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7]
2681 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
2682 ; AVX-NEXT: vmovaps %ymm2, 32(%rsi)
2683 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
2684 ; AVX-NEXT: vmovaps %ymm2, (%rsi)
2685 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
2686 ; AVX-NEXT: vmovaps %ymm2, 32(%rdx)
2687 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
2688 ; AVX-NEXT: vmovaps %ymm2, (%rdx)
2689 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
2690 ; AVX-NEXT: vmovaps %ymm2, 32(%rcx)
2691 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
2692 ; AVX-NEXT: vmovaps %ymm2, (%rcx)
2693 ; AVX-NEXT: vmovaps %ymm0, 32(%r8)
2694 ; AVX-NEXT: vmovaps %ymm1, (%r8)
2695 ; AVX-NEXT: addq $328, %rsp # imm = 0x148
2696 ; AVX-NEXT: vzeroupper
2699 ; AVX2-LABEL: load_i8_stride4_vf64:
2701 ; AVX2-NEXT: subq $168, %rsp
2702 ; AVX2-NEXT: vmovdqa 64(%rdi), %ymm0
2703 ; AVX2-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2704 ; AVX2-NEXT: vmovdqa 96(%rdi), %ymm2
2705 ; AVX2-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2706 ; AVX2-NEXT: vmovdqa (%rdi), %xmm4
2707 ; AVX2-NEXT: vmovdqa 16(%rdi), %xmm5
2708 ; AVX2-NEXT: vmovdqa 32(%rdi), %xmm12
2709 ; AVX2-NEXT: vmovdqa 48(%rdi), %xmm7
2710 ; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm3 = [0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12]
2711 ; AVX2-NEXT: vpshufb %xmm3, %xmm7, %xmm1
2712 ; AVX2-NEXT: vmovdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2713 ; AVX2-NEXT: vpshufb %xmm3, %xmm12, %xmm8
2714 ; AVX2-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm8[0],xmm1[0],xmm8[1],xmm1[1]
2715 ; AVX2-NEXT: vpbroadcastd {{.*#+}} ymm13 = [0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12]
2716 ; AVX2-NEXT: vpshufb %xmm13, %xmm5, %xmm8
2717 ; AVX2-NEXT: vpshufb %xmm13, %xmm4, %xmm9
2718 ; AVX2-NEXT: vpunpckldq {{.*#+}} xmm8 = xmm9[0],xmm8[0],xmm9[1],xmm8[1]
2719 ; AVX2-NEXT: vpblendd {{.*#+}} xmm8 = xmm8[0,1],xmm1[2,3]
2720 ; AVX2-NEXT: vpshufb %ymm13, %ymm2, %ymm9
2721 ; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm1 = [0,4,0,4,0,4,0,4]
2722 ; AVX2-NEXT: vpermd %ymm9, %ymm1, %ymm9
2723 ; AVX2-NEXT: vpshufb %ymm13, %ymm0, %ymm10
2724 ; AVX2-NEXT: vpermd %ymm10, %ymm1, %ymm10
2725 ; AVX2-NEXT: vpblendd {{.*#+}} ymm9 = ymm10[0,1,2,3,4,5],ymm9[6,7]
2726 ; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm8[0,1,2,3],ymm9[4,5,6,7]
2727 ; AVX2-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2728 ; AVX2-NEXT: vmovdqa 176(%rdi), %xmm0
2729 ; AVX2-NEXT: vpshufb %xmm3, %xmm0, %xmm10
2730 ; AVX2-NEXT: vmovdqa %xmm0, %xmm8
2731 ; AVX2-NEXT: vmovdqa 160(%rdi), %xmm0
2732 ; AVX2-NEXT: vpshufb %xmm3, %xmm0, %xmm3
2733 ; AVX2-NEXT: vmovdqa %xmm0, %xmm9
2734 ; AVX2-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm3[0],xmm10[0],xmm3[1],xmm10[1]
2735 ; AVX2-NEXT: vmovdqa 144(%rdi), %xmm0
2736 ; AVX2-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2737 ; AVX2-NEXT: vpshufb %xmm13, %xmm0, %xmm10
2738 ; AVX2-NEXT: vmovdqa 128(%rdi), %xmm0
2739 ; AVX2-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2740 ; AVX2-NEXT: vpshufb %xmm13, %xmm0, %xmm14
2741 ; AVX2-NEXT: vpunpckldq {{.*#+}} xmm14 = xmm14[0],xmm10[0],xmm14[1],xmm10[1]
2742 ; AVX2-NEXT: vmovdqa 224(%rdi), %ymm11
2743 ; AVX2-NEXT: vpblendd {{.*#+}} xmm3 = xmm14[0,1],xmm3[2,3]
2744 ; AVX2-NEXT: vpshufb %ymm13, %ymm11, %ymm14
2745 ; AVX2-NEXT: vpermd %ymm14, %ymm1, %ymm15
2746 ; AVX2-NEXT: vmovdqa 192(%rdi), %ymm0
2747 ; AVX2-NEXT: vmovdqu %ymm0, (%rsp) # 32-byte Spill
2748 ; AVX2-NEXT: vpshufb %ymm13, %ymm0, %ymm13
2749 ; AVX2-NEXT: vpermd %ymm13, %ymm1, %ymm13
2750 ; AVX2-NEXT: vpblendd {{.*#+}} ymm13 = ymm13[0,1,2,3,4,5],ymm15[6,7]
2751 ; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm13[4,5,6,7]
2752 ; AVX2-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2753 ; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm3 = [1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13]
2754 ; AVX2-NEXT: vpshufb %xmm3, %xmm7, %xmm13
2755 ; AVX2-NEXT: vpshufb %xmm3, %xmm12, %xmm15
2756 ; AVX2-NEXT: vpunpckldq {{.*#+}} xmm13 = xmm15[0],xmm13[0],xmm15[1],xmm13[1]
2757 ; AVX2-NEXT: vpbroadcastd {{.*#+}} ymm15 = [1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13]
2758 ; AVX2-NEXT: vpshufb %xmm15, %xmm5, %xmm0
2759 ; AVX2-NEXT: vmovdqa %xmm5, %xmm10
2760 ; AVX2-NEXT: vpshufb %xmm15, %xmm4, %xmm2
2761 ; AVX2-NEXT: vmovdqa %xmm4, %xmm14
2762 ; AVX2-NEXT: vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2763 ; AVX2-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
2764 ; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm13[2,3]
2765 ; AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
2766 ; AVX2-NEXT: vpshufb %ymm15, %ymm6, %ymm2
2767 ; AVX2-NEXT: vpermd %ymm2, %ymm1, %ymm2
2768 ; AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
2769 ; AVX2-NEXT: vpshufb %ymm15, %ymm5, %ymm13
2770 ; AVX2-NEXT: vpermd %ymm13, %ymm1, %ymm13
2771 ; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm13[0,1,2,3,4,5],ymm2[6,7]
2772 ; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7]
2773 ; AVX2-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2774 ; AVX2-NEXT: vmovdqa %xmm8, %xmm4
2775 ; AVX2-NEXT: vpshufb %xmm3, %xmm8, %xmm0
2776 ; AVX2-NEXT: vmovdqa %xmm9, %xmm7
2777 ; AVX2-NEXT: vpshufb %xmm3, %xmm9, %xmm2
2778 ; AVX2-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
2779 ; AVX2-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
2780 ; AVX2-NEXT: vpshufb %xmm15, %xmm8, %xmm2
2781 ; AVX2-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
2782 ; AVX2-NEXT: vpshufb %xmm15, %xmm9, %xmm3
2783 ; AVX2-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
2784 ; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm2[0,1],xmm0[2,3]
2785 ; AVX2-NEXT: vpshufb %ymm15, %ymm11, %ymm2
2786 ; AVX2-NEXT: vpermd %ymm2, %ymm1, %ymm2
2787 ; AVX2-NEXT: vmovdqu (%rsp), %ymm3 # 32-byte Reload
2788 ; AVX2-NEXT: vpshufb %ymm15, %ymm3, %ymm3
2789 ; AVX2-NEXT: vpermd %ymm3, %ymm1, %ymm3
2790 ; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3,4,5],ymm2[6,7]
2791 ; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7]
2792 ; AVX2-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2793 ; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm0 = [2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14]
2794 ; AVX2-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
2795 ; AVX2-NEXT: vpshufb %xmm0, %xmm2, %xmm2
2796 ; AVX2-NEXT: vpshufb %xmm0, %xmm12, %xmm3
2797 ; AVX2-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
2798 ; AVX2-NEXT: vpbroadcastd {{.*#+}} ymm3 = [2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14]
2799 ; AVX2-NEXT: vpshufb %xmm3, %xmm10, %xmm13
2800 ; AVX2-NEXT: vpshufb %xmm3, %xmm14, %xmm15
2801 ; AVX2-NEXT: vpunpckldq {{.*#+}} xmm13 = xmm15[0],xmm13[0],xmm15[1],xmm13[1]
2802 ; AVX2-NEXT: vpblendd {{.*#+}} xmm2 = xmm13[0,1],xmm2[2,3]
2803 ; AVX2-NEXT: vpshufb %ymm3, %ymm6, %ymm13
2804 ; AVX2-NEXT: vpermd %ymm13, %ymm1, %ymm13
2805 ; AVX2-NEXT: vpshufb %ymm3, %ymm5, %ymm15
2806 ; AVX2-NEXT: vpermd %ymm15, %ymm1, %ymm15
2807 ; AVX2-NEXT: vpblendd {{.*#+}} ymm13 = ymm15[0,1,2,3,4,5],ymm13[6,7]
2808 ; AVX2-NEXT: vpblendd {{.*#+}} ymm13 = ymm2[0,1,2,3],ymm13[4,5,6,7]
2809 ; AVX2-NEXT: vpshufb %xmm0, %xmm4, %xmm2
2810 ; AVX2-NEXT: vmovdqa %xmm4, %xmm14
2811 ; AVX2-NEXT: vpshufb %xmm0, %xmm7, %xmm0
2812 ; AVX2-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
2813 ; AVX2-NEXT: vpshufb %xmm3, %xmm8, %xmm2
2814 ; AVX2-NEXT: vpshufb %xmm3, %xmm9, %xmm15
2815 ; AVX2-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm15[0],xmm2[0],xmm15[1],xmm2[1]
2816 ; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm2[0,1],xmm0[2,3]
2817 ; AVX2-NEXT: vmovdqa %ymm11, %ymm15
2818 ; AVX2-NEXT: vpshufb %ymm3, %ymm11, %ymm2
2819 ; AVX2-NEXT: vpermd %ymm2, %ymm1, %ymm2
2820 ; AVX2-NEXT: vmovdqu (%rsp), %ymm11 # 32-byte Reload
2821 ; AVX2-NEXT: vpshufb %ymm3, %ymm11, %ymm3
2822 ; AVX2-NEXT: vpermd %ymm3, %ymm1, %ymm3
2823 ; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3,4,5],ymm2[6,7]
2824 ; AVX2-NEXT: vpblendd {{.*#+}} ymm3 = ymm0[0,1,2,3],ymm2[4,5,6,7]
2825 ; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm0 = [3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15]
2826 ; AVX2-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
2827 ; AVX2-NEXT: vpshufb %xmm0, %xmm2, %xmm2
2828 ; AVX2-NEXT: vpshufb %xmm0, %xmm12, %xmm6
2829 ; AVX2-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm6[0],xmm2[0],xmm6[1],xmm2[1]
2830 ; AVX2-NEXT: vpbroadcastd {{.*#+}} ymm6 = [3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15]
2831 ; AVX2-NEXT: vpshufb %xmm6, %xmm10, %xmm5
2832 ; AVX2-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
2833 ; AVX2-NEXT: vpshufb %xmm6, %xmm4, %xmm4
2834 ; AVX2-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
2835 ; AVX2-NEXT: vpblendd {{.*#+}} xmm2 = xmm4[0,1],xmm2[2,3]
2836 ; AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
2837 ; AVX2-NEXT: vpshufb %ymm6, %ymm4, %ymm4
2838 ; AVX2-NEXT: vpermd %ymm4, %ymm1, %ymm4
2839 ; AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
2840 ; AVX2-NEXT: vpshufb %ymm6, %ymm5, %ymm5
2841 ; AVX2-NEXT: vpermd %ymm5, %ymm1, %ymm5
2842 ; AVX2-NEXT: vpblendd {{.*#+}} ymm4 = ymm5[0,1,2,3,4,5],ymm4[6,7]
2843 ; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5,6,7]
2844 ; AVX2-NEXT: vpshufb %xmm0, %xmm14, %xmm4
2845 ; AVX2-NEXT: vpshufb %xmm0, %xmm7, %xmm0
2846 ; AVX2-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
2847 ; AVX2-NEXT: vpshufb %xmm6, %xmm8, %xmm4
2848 ; AVX2-NEXT: vpshufb %xmm6, %xmm9, %xmm5
2849 ; AVX2-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm5[0],xmm4[0],xmm5[1],xmm4[1]
2850 ; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm4[0,1],xmm0[2,3]
2851 ; AVX2-NEXT: vpshufb %ymm6, %ymm15, %ymm4
2852 ; AVX2-NEXT: vpshufb %ymm6, %ymm11, %ymm5
2853 ; AVX2-NEXT: vpermd %ymm4, %ymm1, %ymm4
2854 ; AVX2-NEXT: vpermd %ymm5, %ymm1, %ymm1
2855 ; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm4[6,7]
2856 ; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
2857 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
2858 ; AVX2-NEXT: vmovaps %ymm1, 32(%rsi)
2859 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
2860 ; AVX2-NEXT: vmovaps %ymm1, (%rsi)
2861 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
2862 ; AVX2-NEXT: vmovaps %ymm1, 32(%rdx)
2863 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
2864 ; AVX2-NEXT: vmovaps %ymm1, (%rdx)
2865 ; AVX2-NEXT: vmovdqa %ymm3, 32(%rcx)
2866 ; AVX2-NEXT: vmovdqa %ymm13, (%rcx)
2867 ; AVX2-NEXT: vmovdqa %ymm0, 32(%r8)
2868 ; AVX2-NEXT: vmovdqa %ymm2, (%r8)
2869 ; AVX2-NEXT: addq $168, %rsp
2870 ; AVX2-NEXT: vzeroupper
2873 ; AVX2-FP-LABEL: load_i8_stride4_vf64:
2875 ; AVX2-FP-NEXT: subq $168, %rsp
2876 ; AVX2-FP-NEXT: vmovdqa 64(%rdi), %ymm0
2877 ; AVX2-FP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2878 ; AVX2-FP-NEXT: vmovdqa 96(%rdi), %ymm2
2879 ; AVX2-FP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2880 ; AVX2-FP-NEXT: vmovdqa (%rdi), %xmm4
2881 ; AVX2-FP-NEXT: vmovdqa 16(%rdi), %xmm5
2882 ; AVX2-FP-NEXT: vmovdqa 32(%rdi), %xmm12
2883 ; AVX2-FP-NEXT: vmovdqa 48(%rdi), %xmm7
2884 ; AVX2-FP-NEXT: vpbroadcastd {{.*#+}} xmm3 = [0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12]
2885 ; AVX2-FP-NEXT: vpshufb %xmm3, %xmm7, %xmm1
2886 ; AVX2-FP-NEXT: vmovdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2887 ; AVX2-FP-NEXT: vpshufb %xmm3, %xmm12, %xmm8
2888 ; AVX2-FP-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm8[0],xmm1[0],xmm8[1],xmm1[1]
2889 ; AVX2-FP-NEXT: vpbroadcastd {{.*#+}} ymm13 = [0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12]
2890 ; AVX2-FP-NEXT: vpshufb %xmm13, %xmm5, %xmm8
2891 ; AVX2-FP-NEXT: vpshufb %xmm13, %xmm4, %xmm9
2892 ; AVX2-FP-NEXT: vpunpckldq {{.*#+}} xmm8 = xmm9[0],xmm8[0],xmm9[1],xmm8[1]
2893 ; AVX2-FP-NEXT: vpblendd {{.*#+}} xmm8 = xmm8[0,1],xmm1[2,3]
2894 ; AVX2-FP-NEXT: vpshufb %ymm13, %ymm2, %ymm9
2895 ; AVX2-FP-NEXT: vpbroadcastq {{.*#+}} ymm1 = [0,4,0,4,0,4,0,4]
2896 ; AVX2-FP-NEXT: vpermd %ymm9, %ymm1, %ymm9
2897 ; AVX2-FP-NEXT: vpshufb %ymm13, %ymm0, %ymm10
2898 ; AVX2-FP-NEXT: vpermd %ymm10, %ymm1, %ymm10
2899 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm9 = ymm10[0,1,2,3,4,5],ymm9[6,7]
2900 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm8[0,1,2,3],ymm9[4,5,6,7]
2901 ; AVX2-FP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2902 ; AVX2-FP-NEXT: vmovdqa 176(%rdi), %xmm0
2903 ; AVX2-FP-NEXT: vpshufb %xmm3, %xmm0, %xmm10
2904 ; AVX2-FP-NEXT: vmovdqa %xmm0, %xmm8
2905 ; AVX2-FP-NEXT: vmovdqa 160(%rdi), %xmm0
2906 ; AVX2-FP-NEXT: vpshufb %xmm3, %xmm0, %xmm3
2907 ; AVX2-FP-NEXT: vmovdqa %xmm0, %xmm9
2908 ; AVX2-FP-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm3[0],xmm10[0],xmm3[1],xmm10[1]
2909 ; AVX2-FP-NEXT: vmovdqa 144(%rdi), %xmm0
2910 ; AVX2-FP-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2911 ; AVX2-FP-NEXT: vpshufb %xmm13, %xmm0, %xmm10
2912 ; AVX2-FP-NEXT: vmovdqa 128(%rdi), %xmm0
2913 ; AVX2-FP-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2914 ; AVX2-FP-NEXT: vpshufb %xmm13, %xmm0, %xmm14
2915 ; AVX2-FP-NEXT: vpunpckldq {{.*#+}} xmm14 = xmm14[0],xmm10[0],xmm14[1],xmm10[1]
2916 ; AVX2-FP-NEXT: vmovdqa 224(%rdi), %ymm11
2917 ; AVX2-FP-NEXT: vpblendd {{.*#+}} xmm3 = xmm14[0,1],xmm3[2,3]
2918 ; AVX2-FP-NEXT: vpshufb %ymm13, %ymm11, %ymm14
2919 ; AVX2-FP-NEXT: vpermd %ymm14, %ymm1, %ymm15
2920 ; AVX2-FP-NEXT: vmovdqa 192(%rdi), %ymm0
2921 ; AVX2-FP-NEXT: vmovdqu %ymm0, (%rsp) # 32-byte Spill
2922 ; AVX2-FP-NEXT: vpshufb %ymm13, %ymm0, %ymm13
2923 ; AVX2-FP-NEXT: vpermd %ymm13, %ymm1, %ymm13
2924 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm13 = ymm13[0,1,2,3,4,5],ymm15[6,7]
2925 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm13[4,5,6,7]
2926 ; AVX2-FP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2927 ; AVX2-FP-NEXT: vpbroadcastd {{.*#+}} xmm3 = [1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13]
2928 ; AVX2-FP-NEXT: vpshufb %xmm3, %xmm7, %xmm13
2929 ; AVX2-FP-NEXT: vpshufb %xmm3, %xmm12, %xmm15
2930 ; AVX2-FP-NEXT: vpunpckldq {{.*#+}} xmm13 = xmm15[0],xmm13[0],xmm15[1],xmm13[1]
2931 ; AVX2-FP-NEXT: vpbroadcastd {{.*#+}} ymm15 = [1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13]
2932 ; AVX2-FP-NEXT: vpshufb %xmm15, %xmm5, %xmm0
2933 ; AVX2-FP-NEXT: vmovdqa %xmm5, %xmm10
2934 ; AVX2-FP-NEXT: vpshufb %xmm15, %xmm4, %xmm2
2935 ; AVX2-FP-NEXT: vmovdqa %xmm4, %xmm14
2936 ; AVX2-FP-NEXT: vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2937 ; AVX2-FP-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
2938 ; AVX2-FP-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm13[2,3]
2939 ; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
2940 ; AVX2-FP-NEXT: vpshufb %ymm15, %ymm6, %ymm2
2941 ; AVX2-FP-NEXT: vpermd %ymm2, %ymm1, %ymm2
2942 ; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
2943 ; AVX2-FP-NEXT: vpshufb %ymm15, %ymm5, %ymm13
2944 ; AVX2-FP-NEXT: vpermd %ymm13, %ymm1, %ymm13
2945 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm2 = ymm13[0,1,2,3,4,5],ymm2[6,7]
2946 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7]
2947 ; AVX2-FP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2948 ; AVX2-FP-NEXT: vmovdqa %xmm8, %xmm4
2949 ; AVX2-FP-NEXT: vpshufb %xmm3, %xmm8, %xmm0
2950 ; AVX2-FP-NEXT: vmovdqa %xmm9, %xmm7
2951 ; AVX2-FP-NEXT: vpshufb %xmm3, %xmm9, %xmm2
2952 ; AVX2-FP-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
2953 ; AVX2-FP-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
2954 ; AVX2-FP-NEXT: vpshufb %xmm15, %xmm8, %xmm2
2955 ; AVX2-FP-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
2956 ; AVX2-FP-NEXT: vpshufb %xmm15, %xmm9, %xmm3
2957 ; AVX2-FP-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
2958 ; AVX2-FP-NEXT: vpblendd {{.*#+}} xmm0 = xmm2[0,1],xmm0[2,3]
2959 ; AVX2-FP-NEXT: vpshufb %ymm15, %ymm11, %ymm2
2960 ; AVX2-FP-NEXT: vpermd %ymm2, %ymm1, %ymm2
2961 ; AVX2-FP-NEXT: vmovdqu (%rsp), %ymm3 # 32-byte Reload
2962 ; AVX2-FP-NEXT: vpshufb %ymm15, %ymm3, %ymm3
2963 ; AVX2-FP-NEXT: vpermd %ymm3, %ymm1, %ymm3
2964 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3,4,5],ymm2[6,7]
2965 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7]
2966 ; AVX2-FP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2967 ; AVX2-FP-NEXT: vpbroadcastd {{.*#+}} xmm0 = [2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14]
2968 ; AVX2-FP-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
2969 ; AVX2-FP-NEXT: vpshufb %xmm0, %xmm2, %xmm2
2970 ; AVX2-FP-NEXT: vpshufb %xmm0, %xmm12, %xmm3
2971 ; AVX2-FP-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
2972 ; AVX2-FP-NEXT: vpbroadcastd {{.*#+}} ymm3 = [2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14]
2973 ; AVX2-FP-NEXT: vpshufb %xmm3, %xmm10, %xmm13
2974 ; AVX2-FP-NEXT: vpshufb %xmm3, %xmm14, %xmm15
2975 ; AVX2-FP-NEXT: vpunpckldq {{.*#+}} xmm13 = xmm15[0],xmm13[0],xmm15[1],xmm13[1]
2976 ; AVX2-FP-NEXT: vpblendd {{.*#+}} xmm2 = xmm13[0,1],xmm2[2,3]
2977 ; AVX2-FP-NEXT: vpshufb %ymm3, %ymm6, %ymm13
2978 ; AVX2-FP-NEXT: vpermd %ymm13, %ymm1, %ymm13
2979 ; AVX2-FP-NEXT: vpshufb %ymm3, %ymm5, %ymm15
2980 ; AVX2-FP-NEXT: vpermd %ymm15, %ymm1, %ymm15
2981 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm13 = ymm15[0,1,2,3,4,5],ymm13[6,7]
2982 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm13 = ymm2[0,1,2,3],ymm13[4,5,6,7]
2983 ; AVX2-FP-NEXT: vpshufb %xmm0, %xmm4, %xmm2
2984 ; AVX2-FP-NEXT: vmovdqa %xmm4, %xmm14
2985 ; AVX2-FP-NEXT: vpshufb %xmm0, %xmm7, %xmm0
2986 ; AVX2-FP-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
2987 ; AVX2-FP-NEXT: vpshufb %xmm3, %xmm8, %xmm2
2988 ; AVX2-FP-NEXT: vpshufb %xmm3, %xmm9, %xmm15
2989 ; AVX2-FP-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm15[0],xmm2[0],xmm15[1],xmm2[1]
2990 ; AVX2-FP-NEXT: vpblendd {{.*#+}} xmm0 = xmm2[0,1],xmm0[2,3]
2991 ; AVX2-FP-NEXT: vmovdqa %ymm11, %ymm15
2992 ; AVX2-FP-NEXT: vpshufb %ymm3, %ymm11, %ymm2
2993 ; AVX2-FP-NEXT: vpermd %ymm2, %ymm1, %ymm2
2994 ; AVX2-FP-NEXT: vmovdqu (%rsp), %ymm11 # 32-byte Reload
2995 ; AVX2-FP-NEXT: vpshufb %ymm3, %ymm11, %ymm3
2996 ; AVX2-FP-NEXT: vpermd %ymm3, %ymm1, %ymm3
2997 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3,4,5],ymm2[6,7]
2998 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm3 = ymm0[0,1,2,3],ymm2[4,5,6,7]
2999 ; AVX2-FP-NEXT: vpbroadcastd {{.*#+}} xmm0 = [3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15]
3000 ; AVX2-FP-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
3001 ; AVX2-FP-NEXT: vpshufb %xmm0, %xmm2, %xmm2
3002 ; AVX2-FP-NEXT: vpshufb %xmm0, %xmm12, %xmm6
3003 ; AVX2-FP-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm6[0],xmm2[0],xmm6[1],xmm2[1]
3004 ; AVX2-FP-NEXT: vpbroadcastd {{.*#+}} ymm6 = [3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15]
3005 ; AVX2-FP-NEXT: vpshufb %xmm6, %xmm10, %xmm5
3006 ; AVX2-FP-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
3007 ; AVX2-FP-NEXT: vpshufb %xmm6, %xmm4, %xmm4
3008 ; AVX2-FP-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
3009 ; AVX2-FP-NEXT: vpblendd {{.*#+}} xmm2 = xmm4[0,1],xmm2[2,3]
3010 ; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
3011 ; AVX2-FP-NEXT: vpshufb %ymm6, %ymm4, %ymm4
3012 ; AVX2-FP-NEXT: vpermd %ymm4, %ymm1, %ymm4
3013 ; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
3014 ; AVX2-FP-NEXT: vpshufb %ymm6, %ymm5, %ymm5
3015 ; AVX2-FP-NEXT: vpermd %ymm5, %ymm1, %ymm5
3016 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm4 = ymm5[0,1,2,3,4,5],ymm4[6,7]
3017 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5,6,7]
3018 ; AVX2-FP-NEXT: vpshufb %xmm0, %xmm14, %xmm4
3019 ; AVX2-FP-NEXT: vpshufb %xmm0, %xmm7, %xmm0
3020 ; AVX2-FP-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
3021 ; AVX2-FP-NEXT: vpshufb %xmm6, %xmm8, %xmm4
3022 ; AVX2-FP-NEXT: vpshufb %xmm6, %xmm9, %xmm5
3023 ; AVX2-FP-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm5[0],xmm4[0],xmm5[1],xmm4[1]
3024 ; AVX2-FP-NEXT: vpblendd {{.*#+}} xmm0 = xmm4[0,1],xmm0[2,3]
3025 ; AVX2-FP-NEXT: vpshufb %ymm6, %ymm15, %ymm4
3026 ; AVX2-FP-NEXT: vpshufb %ymm6, %ymm11, %ymm5
3027 ; AVX2-FP-NEXT: vpermd %ymm4, %ymm1, %ymm4
3028 ; AVX2-FP-NEXT: vpermd %ymm5, %ymm1, %ymm1
3029 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm4[6,7]
3030 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
3031 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
3032 ; AVX2-FP-NEXT: vmovaps %ymm1, 32(%rsi)
3033 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
3034 ; AVX2-FP-NEXT: vmovaps %ymm1, (%rsi)
3035 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
3036 ; AVX2-FP-NEXT: vmovaps %ymm1, 32(%rdx)
3037 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
3038 ; AVX2-FP-NEXT: vmovaps %ymm1, (%rdx)
3039 ; AVX2-FP-NEXT: vmovdqa %ymm3, 32(%rcx)
3040 ; AVX2-FP-NEXT: vmovdqa %ymm13, (%rcx)
3041 ; AVX2-FP-NEXT: vmovdqa %ymm0, 32(%r8)
3042 ; AVX2-FP-NEXT: vmovdqa %ymm2, (%r8)
3043 ; AVX2-FP-NEXT: addq $168, %rsp
3044 ; AVX2-FP-NEXT: vzeroupper
3045 ; AVX2-FP-NEXT: retq
3047 ; AVX2-FCP-LABEL: load_i8_stride4_vf64:
3048 ; AVX2-FCP: # %bb.0:
3049 ; AVX2-FCP-NEXT: subq $168, %rsp
3050 ; AVX2-FCP-NEXT: vmovdqa 64(%rdi), %ymm0
3051 ; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3052 ; AVX2-FCP-NEXT: vmovdqa 96(%rdi), %ymm2
3053 ; AVX2-FCP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3054 ; AVX2-FCP-NEXT: vmovdqa (%rdi), %xmm4
3055 ; AVX2-FCP-NEXT: vmovdqa 16(%rdi), %xmm5
3056 ; AVX2-FCP-NEXT: vmovdqa 32(%rdi), %xmm12
3057 ; AVX2-FCP-NEXT: vmovdqa 48(%rdi), %xmm7
3058 ; AVX2-FCP-NEXT: vpbroadcastd {{.*#+}} xmm3 = [0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12]
3059 ; AVX2-FCP-NEXT: vpshufb %xmm3, %xmm7, %xmm1
3060 ; AVX2-FCP-NEXT: vmovdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3061 ; AVX2-FCP-NEXT: vpshufb %xmm3, %xmm12, %xmm8
3062 ; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm8[0],xmm1[0],xmm8[1],xmm1[1]
3063 ; AVX2-FCP-NEXT: vpbroadcastd {{.*#+}} ymm13 = [0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12]
3064 ; AVX2-FCP-NEXT: vpshufb %xmm13, %xmm5, %xmm8
3065 ; AVX2-FCP-NEXT: vpshufb %xmm13, %xmm4, %xmm9
3066 ; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm8 = xmm9[0],xmm8[0],xmm9[1],xmm8[1]
3067 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm8 = xmm8[0,1],xmm1[2,3]
3068 ; AVX2-FCP-NEXT: vpshufb %ymm13, %ymm2, %ymm9
3069 ; AVX2-FCP-NEXT: vpbroadcastq {{.*#+}} ymm1 = [0,4,0,4,0,4,0,4]
3070 ; AVX2-FCP-NEXT: vpermd %ymm9, %ymm1, %ymm9
3071 ; AVX2-FCP-NEXT: vpshufb %ymm13, %ymm0, %ymm10
3072 ; AVX2-FCP-NEXT: vpermd %ymm10, %ymm1, %ymm10
3073 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm9 = ymm10[0,1,2,3,4,5],ymm9[6,7]
3074 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm8[0,1,2,3],ymm9[4,5,6,7]
3075 ; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3076 ; AVX2-FCP-NEXT: vmovdqa 176(%rdi), %xmm0
3077 ; AVX2-FCP-NEXT: vpshufb %xmm3, %xmm0, %xmm10
3078 ; AVX2-FCP-NEXT: vmovdqa %xmm0, %xmm8
3079 ; AVX2-FCP-NEXT: vmovdqa 160(%rdi), %xmm0
3080 ; AVX2-FCP-NEXT: vpshufb %xmm3, %xmm0, %xmm3
3081 ; AVX2-FCP-NEXT: vmovdqa %xmm0, %xmm9
3082 ; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm3[0],xmm10[0],xmm3[1],xmm10[1]
3083 ; AVX2-FCP-NEXT: vmovdqa 144(%rdi), %xmm0
3084 ; AVX2-FCP-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3085 ; AVX2-FCP-NEXT: vpshufb %xmm13, %xmm0, %xmm10
3086 ; AVX2-FCP-NEXT: vmovdqa 128(%rdi), %xmm0
3087 ; AVX2-FCP-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3088 ; AVX2-FCP-NEXT: vpshufb %xmm13, %xmm0, %xmm14
3089 ; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm14 = xmm14[0],xmm10[0],xmm14[1],xmm10[1]
3090 ; AVX2-FCP-NEXT: vmovdqa 224(%rdi), %ymm11
3091 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm3 = xmm14[0,1],xmm3[2,3]
3092 ; AVX2-FCP-NEXT: vpshufb %ymm13, %ymm11, %ymm14
3093 ; AVX2-FCP-NEXT: vpermd %ymm14, %ymm1, %ymm15
3094 ; AVX2-FCP-NEXT: vmovdqa 192(%rdi), %ymm0
3095 ; AVX2-FCP-NEXT: vmovdqu %ymm0, (%rsp) # 32-byte Spill
3096 ; AVX2-FCP-NEXT: vpshufb %ymm13, %ymm0, %ymm13
3097 ; AVX2-FCP-NEXT: vpermd %ymm13, %ymm1, %ymm13
3098 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm13 = ymm13[0,1,2,3,4,5],ymm15[6,7]
3099 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm13[4,5,6,7]
3100 ; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3101 ; AVX2-FCP-NEXT: vpbroadcastd {{.*#+}} xmm3 = [1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13]
3102 ; AVX2-FCP-NEXT: vpshufb %xmm3, %xmm7, %xmm13
3103 ; AVX2-FCP-NEXT: vpshufb %xmm3, %xmm12, %xmm15
3104 ; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm13 = xmm15[0],xmm13[0],xmm15[1],xmm13[1]
3105 ; AVX2-FCP-NEXT: vpbroadcastd {{.*#+}} ymm15 = [1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13]
3106 ; AVX2-FCP-NEXT: vpshufb %xmm15, %xmm5, %xmm0
3107 ; AVX2-FCP-NEXT: vmovdqa %xmm5, %xmm10
3108 ; AVX2-FCP-NEXT: vpshufb %xmm15, %xmm4, %xmm2
3109 ; AVX2-FCP-NEXT: vmovdqa %xmm4, %xmm14
3110 ; AVX2-FCP-NEXT: vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3111 ; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
3112 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm13[2,3]
3113 ; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
3114 ; AVX2-FCP-NEXT: vpshufb %ymm15, %ymm6, %ymm2
3115 ; AVX2-FCP-NEXT: vpermd %ymm2, %ymm1, %ymm2
3116 ; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
3117 ; AVX2-FCP-NEXT: vpshufb %ymm15, %ymm5, %ymm13
3118 ; AVX2-FCP-NEXT: vpermd %ymm13, %ymm1, %ymm13
3119 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm13[0,1,2,3,4,5],ymm2[6,7]
3120 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7]
3121 ; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3122 ; AVX2-FCP-NEXT: vmovdqa %xmm8, %xmm4
3123 ; AVX2-FCP-NEXT: vpshufb %xmm3, %xmm8, %xmm0
3124 ; AVX2-FCP-NEXT: vmovdqa %xmm9, %xmm7
3125 ; AVX2-FCP-NEXT: vpshufb %xmm3, %xmm9, %xmm2
3126 ; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
3127 ; AVX2-FCP-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
3128 ; AVX2-FCP-NEXT: vpshufb %xmm15, %xmm8, %xmm2
3129 ; AVX2-FCP-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
3130 ; AVX2-FCP-NEXT: vpshufb %xmm15, %xmm9, %xmm3
3131 ; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
3132 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm0 = xmm2[0,1],xmm0[2,3]
3133 ; AVX2-FCP-NEXT: vpshufb %ymm15, %ymm11, %ymm2
3134 ; AVX2-FCP-NEXT: vpermd %ymm2, %ymm1, %ymm2
3135 ; AVX2-FCP-NEXT: vmovdqu (%rsp), %ymm3 # 32-byte Reload
3136 ; AVX2-FCP-NEXT: vpshufb %ymm15, %ymm3, %ymm3
3137 ; AVX2-FCP-NEXT: vpermd %ymm3, %ymm1, %ymm3
3138 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3,4,5],ymm2[6,7]
3139 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7]
3140 ; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3141 ; AVX2-FCP-NEXT: vpbroadcastd {{.*#+}} xmm0 = [2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14]
3142 ; AVX2-FCP-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
3143 ; AVX2-FCP-NEXT: vpshufb %xmm0, %xmm2, %xmm2
3144 ; AVX2-FCP-NEXT: vpshufb %xmm0, %xmm12, %xmm3
3145 ; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
3146 ; AVX2-FCP-NEXT: vpbroadcastd {{.*#+}} ymm3 = [2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14]
3147 ; AVX2-FCP-NEXT: vpshufb %xmm3, %xmm10, %xmm13
3148 ; AVX2-FCP-NEXT: vpshufb %xmm3, %xmm14, %xmm15
3149 ; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm13 = xmm15[0],xmm13[0],xmm15[1],xmm13[1]
3150 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm2 = xmm13[0,1],xmm2[2,3]
3151 ; AVX2-FCP-NEXT: vpshufb %ymm3, %ymm6, %ymm13
3152 ; AVX2-FCP-NEXT: vpermd %ymm13, %ymm1, %ymm13
3153 ; AVX2-FCP-NEXT: vpshufb %ymm3, %ymm5, %ymm15
3154 ; AVX2-FCP-NEXT: vpermd %ymm15, %ymm1, %ymm15
3155 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm13 = ymm15[0,1,2,3,4,5],ymm13[6,7]
3156 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm13 = ymm2[0,1,2,3],ymm13[4,5,6,7]
3157 ; AVX2-FCP-NEXT: vpshufb %xmm0, %xmm4, %xmm2
3158 ; AVX2-FCP-NEXT: vmovdqa %xmm4, %xmm14
3159 ; AVX2-FCP-NEXT: vpshufb %xmm0, %xmm7, %xmm0
3160 ; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
3161 ; AVX2-FCP-NEXT: vpshufb %xmm3, %xmm8, %xmm2
3162 ; AVX2-FCP-NEXT: vpshufb %xmm3, %xmm9, %xmm15
3163 ; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm15[0],xmm2[0],xmm15[1],xmm2[1]
3164 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm0 = xmm2[0,1],xmm0[2,3]
3165 ; AVX2-FCP-NEXT: vmovdqa %ymm11, %ymm15
3166 ; AVX2-FCP-NEXT: vpshufb %ymm3, %ymm11, %ymm2
3167 ; AVX2-FCP-NEXT: vpermd %ymm2, %ymm1, %ymm2
3168 ; AVX2-FCP-NEXT: vmovdqu (%rsp), %ymm11 # 32-byte Reload
3169 ; AVX2-FCP-NEXT: vpshufb %ymm3, %ymm11, %ymm3
3170 ; AVX2-FCP-NEXT: vpermd %ymm3, %ymm1, %ymm3
3171 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3,4,5],ymm2[6,7]
3172 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm0[0,1,2,3],ymm2[4,5,6,7]
3173 ; AVX2-FCP-NEXT: vpbroadcastd {{.*#+}} xmm0 = [3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15]
3174 ; AVX2-FCP-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
3175 ; AVX2-FCP-NEXT: vpshufb %xmm0, %xmm2, %xmm2
3176 ; AVX2-FCP-NEXT: vpshufb %xmm0, %xmm12, %xmm6
3177 ; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm6[0],xmm2[0],xmm6[1],xmm2[1]
3178 ; AVX2-FCP-NEXT: vpbroadcastd {{.*#+}} ymm6 = [3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15]
3179 ; AVX2-FCP-NEXT: vpshufb %xmm6, %xmm10, %xmm5
3180 ; AVX2-FCP-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
3181 ; AVX2-FCP-NEXT: vpshufb %xmm6, %xmm4, %xmm4
3182 ; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
3183 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm2 = xmm4[0,1],xmm2[2,3]
3184 ; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
3185 ; AVX2-FCP-NEXT: vpshufb %ymm6, %ymm4, %ymm4
3186 ; AVX2-FCP-NEXT: vpermd %ymm4, %ymm1, %ymm4
3187 ; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
3188 ; AVX2-FCP-NEXT: vpshufb %ymm6, %ymm5, %ymm5
3189 ; AVX2-FCP-NEXT: vpermd %ymm5, %ymm1, %ymm5
3190 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm5[0,1,2,3,4,5],ymm4[6,7]
3191 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5,6,7]
3192 ; AVX2-FCP-NEXT: vpshufb %xmm0, %xmm14, %xmm4
3193 ; AVX2-FCP-NEXT: vpshufb %xmm0, %xmm7, %xmm0
3194 ; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
3195 ; AVX2-FCP-NEXT: vpshufb %xmm6, %xmm8, %xmm4
3196 ; AVX2-FCP-NEXT: vpshufb %xmm6, %xmm9, %xmm5
3197 ; AVX2-FCP-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm5[0],xmm4[0],xmm5[1],xmm4[1]
3198 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm0 = xmm4[0,1],xmm0[2,3]
3199 ; AVX2-FCP-NEXT: vpshufb %ymm6, %ymm15, %ymm4
3200 ; AVX2-FCP-NEXT: vpshufb %ymm6, %ymm11, %ymm5
3201 ; AVX2-FCP-NEXT: vpermd %ymm4, %ymm1, %ymm4
3202 ; AVX2-FCP-NEXT: vpermd %ymm5, %ymm1, %ymm1
3203 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm4[6,7]
3204 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
3205 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
3206 ; AVX2-FCP-NEXT: vmovaps %ymm1, 32(%rsi)
3207 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
3208 ; AVX2-FCP-NEXT: vmovaps %ymm1, (%rsi)
3209 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
3210 ; AVX2-FCP-NEXT: vmovaps %ymm1, 32(%rdx)
3211 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
3212 ; AVX2-FCP-NEXT: vmovaps %ymm1, (%rdx)
3213 ; AVX2-FCP-NEXT: vmovdqa %ymm3, 32(%rcx)
3214 ; AVX2-FCP-NEXT: vmovdqa %ymm13, (%rcx)
3215 ; AVX2-FCP-NEXT: vmovdqa %ymm0, 32(%r8)
3216 ; AVX2-FCP-NEXT: vmovdqa %ymm2, (%r8)
3217 ; AVX2-FCP-NEXT: addq $168, %rsp
3218 ; AVX2-FCP-NEXT: vzeroupper
3219 ; AVX2-FCP-NEXT: retq
3221 ; AVX512-LABEL: load_i8_stride4_vf64:
3223 ; AVX512-NEXT: vmovdqa64 (%rdi), %zmm0
3224 ; AVX512-NEXT: vmovdqa64 128(%rdi), %zmm2
3225 ; AVX512-NEXT: vpbroadcastd {{.*#+}} ymm7 = [0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12]
3226 ; AVX512-NEXT: vmovdqa 224(%rdi), %ymm3
3227 ; AVX512-NEXT: vpshufb %ymm7, %ymm3, %ymm5
3228 ; AVX512-NEXT: vmovdqa 192(%rdi), %ymm4
3229 ; AVX512-NEXT: vpshufb %ymm7, %ymm4, %ymm6
3230 ; AVX512-NEXT: vpmovsxbd {{.*#+}} ymm1 = [0,4,0,4,0,4,8,12]
3231 ; AVX512-NEXT: vpermt2d %ymm5, %ymm1, %ymm6
3232 ; AVX512-NEXT: vpmovdb %zmm2, %xmm5
3233 ; AVX512-NEXT: vpblendd {{.*#+}} ymm8 = ymm5[0,1,2,3],ymm6[4,5,6,7]
3234 ; AVX512-NEXT: vmovdqa 96(%rdi), %ymm5
3235 ; AVX512-NEXT: vpshufb %ymm7, %ymm5, %ymm9
3236 ; AVX512-NEXT: vmovdqa 64(%rdi), %ymm6
3237 ; AVX512-NEXT: vpshufb %ymm7, %ymm6, %ymm7
3238 ; AVX512-NEXT: vpermt2d %ymm9, %ymm1, %ymm7
3239 ; AVX512-NEXT: vpmovdb %zmm0, %xmm9
3240 ; AVX512-NEXT: vpblendd {{.*#+}} ymm7 = ymm9[0,1,2,3],ymm7[4,5,6,7]
3241 ; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm7 = zmm7[0,1,2,3],zmm8[0,1,2,3]
3242 ; AVX512-NEXT: vpbroadcastd {{.*#+}} ymm8 = [1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13]
3243 ; AVX512-NEXT: vpshufb %ymm8, %ymm3, %ymm9
3244 ; AVX512-NEXT: vpshufb %ymm8, %ymm4, %ymm10
3245 ; AVX512-NEXT: vpermt2d %ymm9, %ymm1, %ymm10
3246 ; AVX512-NEXT: vpsrld $8, %zmm2, %zmm9
3247 ; AVX512-NEXT: vpmovdb %zmm9, %xmm9
3248 ; AVX512-NEXT: vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3],ymm10[4,5,6,7]
3249 ; AVX512-NEXT: vpshufb %ymm8, %ymm5, %ymm10
3250 ; AVX512-NEXT: vpshufb %ymm8, %ymm6, %ymm8
3251 ; AVX512-NEXT: vpermt2d %ymm10, %ymm1, %ymm8
3252 ; AVX512-NEXT: vpsrld $8, %zmm0, %zmm10
3253 ; AVX512-NEXT: vpmovdb %zmm10, %xmm10
3254 ; AVX512-NEXT: vpblendd {{.*#+}} ymm8 = ymm10[0,1,2,3],ymm8[4,5,6,7]
3255 ; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm8 = zmm8[0,1,2,3],zmm9[0,1,2,3]
3256 ; AVX512-NEXT: vpbroadcastd {{.*#+}} ymm9 = [2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14]
3257 ; AVX512-NEXT: vpshufb %ymm9, %ymm3, %ymm10
3258 ; AVX512-NEXT: vpshufb %ymm9, %ymm4, %ymm11
3259 ; AVX512-NEXT: vpermt2d %ymm10, %ymm1, %ymm11
3260 ; AVX512-NEXT: vpsrld $16, %zmm2, %zmm10
3261 ; AVX512-NEXT: vpmovdb %zmm10, %xmm10
3262 ; AVX512-NEXT: vpblendd {{.*#+}} ymm10 = ymm10[0,1,2,3],ymm11[4,5,6,7]
3263 ; AVX512-NEXT: vpshufb %ymm9, %ymm5, %ymm11
3264 ; AVX512-NEXT: vpshufb %ymm9, %ymm6, %ymm9
3265 ; AVX512-NEXT: vpermt2d %ymm11, %ymm1, %ymm9
3266 ; AVX512-NEXT: vpsrld $16, %zmm0, %zmm11
3267 ; AVX512-NEXT: vpmovdb %zmm11, %xmm11
3268 ; AVX512-NEXT: vpblendd {{.*#+}} ymm9 = ymm11[0,1,2,3],ymm9[4,5,6,7]
3269 ; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm9 = zmm9[0,1,2,3],zmm10[0,1,2,3]
3270 ; AVX512-NEXT: vpbroadcastd {{.*#+}} ymm10 = [3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15]
3271 ; AVX512-NEXT: vpshufb %ymm10, %ymm3, %ymm3
3272 ; AVX512-NEXT: vpshufb %ymm10, %ymm4, %ymm4
3273 ; AVX512-NEXT: vpermt2d %ymm3, %ymm1, %ymm4
3274 ; AVX512-NEXT: vpsrld $24, %zmm2, %zmm2
3275 ; AVX512-NEXT: vpmovdb %zmm2, %xmm2
3276 ; AVX512-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5,6,7]
3277 ; AVX512-NEXT: vpshufb %ymm10, %ymm5, %ymm3
3278 ; AVX512-NEXT: vpshufb %ymm10, %ymm6, %ymm4
3279 ; AVX512-NEXT: vpermt2d %ymm3, %ymm1, %ymm4
3280 ; AVX512-NEXT: vpsrld $24, %zmm0, %zmm0
3281 ; AVX512-NEXT: vpmovdb %zmm0, %xmm0
3282 ; AVX512-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm4[4,5,6,7]
3283 ; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm2[0,1,2,3]
3284 ; AVX512-NEXT: vmovdqa64 %zmm7, (%rsi)
3285 ; AVX512-NEXT: vmovdqa64 %zmm8, (%rdx)
3286 ; AVX512-NEXT: vmovdqa64 %zmm9, (%rcx)
3287 ; AVX512-NEXT: vmovdqa64 %zmm0, (%r8)
3288 ; AVX512-NEXT: vzeroupper
3291 ; AVX512-FCP-LABEL: load_i8_stride4_vf64:
3292 ; AVX512-FCP: # %bb.0:
3293 ; AVX512-FCP-NEXT: vmovdqa64 (%rdi), %zmm0
3294 ; AVX512-FCP-NEXT: vmovdqa64 128(%rdi), %zmm2
3295 ; AVX512-FCP-NEXT: vpbroadcastd {{.*#+}} ymm7 = [0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12]
3296 ; AVX512-FCP-NEXT: vmovdqa 224(%rdi), %ymm3
3297 ; AVX512-FCP-NEXT: vpshufb %ymm7, %ymm3, %ymm5
3298 ; AVX512-FCP-NEXT: vmovdqa 192(%rdi), %ymm4
3299 ; AVX512-FCP-NEXT: vpshufb %ymm7, %ymm4, %ymm6
3300 ; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm1 = [0,4,0,4,0,4,8,12]
3301 ; AVX512-FCP-NEXT: vpermt2d %ymm5, %ymm1, %ymm6
3302 ; AVX512-FCP-NEXT: vpmovdb %zmm2, %xmm5
3303 ; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm8 = ymm5[0,1,2,3],ymm6[4,5,6,7]
3304 ; AVX512-FCP-NEXT: vmovdqa 96(%rdi), %ymm5
3305 ; AVX512-FCP-NEXT: vpshufb %ymm7, %ymm5, %ymm9
3306 ; AVX512-FCP-NEXT: vmovdqa 64(%rdi), %ymm6
3307 ; AVX512-FCP-NEXT: vpshufb %ymm7, %ymm6, %ymm7
3308 ; AVX512-FCP-NEXT: vpermt2d %ymm9, %ymm1, %ymm7
3309 ; AVX512-FCP-NEXT: vpmovdb %zmm0, %xmm9
3310 ; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm7 = ymm9[0,1,2,3],ymm7[4,5,6,7]
3311 ; AVX512-FCP-NEXT: vshufi64x2 {{.*#+}} zmm7 = zmm7[0,1,2,3],zmm8[0,1,2,3]
3312 ; AVX512-FCP-NEXT: vpbroadcastd {{.*#+}} ymm8 = [1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13]
3313 ; AVX512-FCP-NEXT: vpshufb %ymm8, %ymm3, %ymm9
3314 ; AVX512-FCP-NEXT: vpshufb %ymm8, %ymm4, %ymm10
3315 ; AVX512-FCP-NEXT: vpermt2d %ymm9, %ymm1, %ymm10
3316 ; AVX512-FCP-NEXT: vpsrld $8, %zmm2, %zmm9
3317 ; AVX512-FCP-NEXT: vpmovdb %zmm9, %xmm9
3318 ; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3],ymm10[4,5,6,7]
3319 ; AVX512-FCP-NEXT: vpshufb %ymm8, %ymm5, %ymm10
3320 ; AVX512-FCP-NEXT: vpshufb %ymm8, %ymm6, %ymm8
3321 ; AVX512-FCP-NEXT: vpermt2d %ymm10, %ymm1, %ymm8
3322 ; AVX512-FCP-NEXT: vpsrld $8, %zmm0, %zmm10
3323 ; AVX512-FCP-NEXT: vpmovdb %zmm10, %xmm10
3324 ; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm8 = ymm10[0,1,2,3],ymm8[4,5,6,7]
3325 ; AVX512-FCP-NEXT: vshufi64x2 {{.*#+}} zmm8 = zmm8[0,1,2,3],zmm9[0,1,2,3]
3326 ; AVX512-FCP-NEXT: vpbroadcastd {{.*#+}} ymm9 = [2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14]
3327 ; AVX512-FCP-NEXT: vpshufb %ymm9, %ymm3, %ymm10
3328 ; AVX512-FCP-NEXT: vpshufb %ymm9, %ymm4, %ymm11
3329 ; AVX512-FCP-NEXT: vpermt2d %ymm10, %ymm1, %ymm11
3330 ; AVX512-FCP-NEXT: vpsrld $16, %zmm2, %zmm10
3331 ; AVX512-FCP-NEXT: vpmovdb %zmm10, %xmm10
3332 ; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm10 = ymm10[0,1,2,3],ymm11[4,5,6,7]
3333 ; AVX512-FCP-NEXT: vpshufb %ymm9, %ymm5, %ymm11
3334 ; AVX512-FCP-NEXT: vpshufb %ymm9, %ymm6, %ymm9
3335 ; AVX512-FCP-NEXT: vpermt2d %ymm11, %ymm1, %ymm9
3336 ; AVX512-FCP-NEXT: vpsrld $16, %zmm0, %zmm11
3337 ; AVX512-FCP-NEXT: vpmovdb %zmm11, %xmm11
3338 ; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm9 = ymm11[0,1,2,3],ymm9[4,5,6,7]
3339 ; AVX512-FCP-NEXT: vshufi64x2 {{.*#+}} zmm9 = zmm9[0,1,2,3],zmm10[0,1,2,3]
3340 ; AVX512-FCP-NEXT: vpbroadcastd {{.*#+}} ymm10 = [3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15]
3341 ; AVX512-FCP-NEXT: vpshufb %ymm10, %ymm3, %ymm3
3342 ; AVX512-FCP-NEXT: vpshufb %ymm10, %ymm4, %ymm4
3343 ; AVX512-FCP-NEXT: vpermt2d %ymm3, %ymm1, %ymm4
3344 ; AVX512-FCP-NEXT: vpsrld $24, %zmm2, %zmm2
3345 ; AVX512-FCP-NEXT: vpmovdb %zmm2, %xmm2
3346 ; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5,6,7]
3347 ; AVX512-FCP-NEXT: vpshufb %ymm10, %ymm5, %ymm3
3348 ; AVX512-FCP-NEXT: vpshufb %ymm10, %ymm6, %ymm4
3349 ; AVX512-FCP-NEXT: vpermt2d %ymm3, %ymm1, %ymm4
3350 ; AVX512-FCP-NEXT: vpsrld $24, %zmm0, %zmm0
3351 ; AVX512-FCP-NEXT: vpmovdb %zmm0, %xmm0
3352 ; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm4[4,5,6,7]
3353 ; AVX512-FCP-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm2[0,1,2,3]
3354 ; AVX512-FCP-NEXT: vmovdqa64 %zmm7, (%rsi)
3355 ; AVX512-FCP-NEXT: vmovdqa64 %zmm8, (%rdx)
3356 ; AVX512-FCP-NEXT: vmovdqa64 %zmm9, (%rcx)
3357 ; AVX512-FCP-NEXT: vmovdqa64 %zmm0, (%r8)
3358 ; AVX512-FCP-NEXT: vzeroupper
3359 ; AVX512-FCP-NEXT: retq
3361 ; AVX512DQ-LABEL: load_i8_stride4_vf64:
3362 ; AVX512DQ: # %bb.0:
3363 ; AVX512DQ-NEXT: vmovdqa64 (%rdi), %zmm0
3364 ; AVX512DQ-NEXT: vmovdqa64 128(%rdi), %zmm2
3365 ; AVX512DQ-NEXT: vpbroadcastd {{.*#+}} ymm7 = [0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12]
3366 ; AVX512DQ-NEXT: vmovdqa 224(%rdi), %ymm3
3367 ; AVX512DQ-NEXT: vpshufb %ymm7, %ymm3, %ymm5
3368 ; AVX512DQ-NEXT: vmovdqa 192(%rdi), %ymm4
3369 ; AVX512DQ-NEXT: vpshufb %ymm7, %ymm4, %ymm6
3370 ; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} ymm1 = [0,4,0,4,0,4,8,12]
3371 ; AVX512DQ-NEXT: vpermt2d %ymm5, %ymm1, %ymm6
3372 ; AVX512DQ-NEXT: vpmovdb %zmm2, %xmm5
3373 ; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm8 = ymm5[0,1,2,3],ymm6[4,5,6,7]
3374 ; AVX512DQ-NEXT: vmovdqa 96(%rdi), %ymm5
3375 ; AVX512DQ-NEXT: vpshufb %ymm7, %ymm5, %ymm9
3376 ; AVX512DQ-NEXT: vmovdqa 64(%rdi), %ymm6
3377 ; AVX512DQ-NEXT: vpshufb %ymm7, %ymm6, %ymm7
3378 ; AVX512DQ-NEXT: vpermt2d %ymm9, %ymm1, %ymm7
3379 ; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm9
3380 ; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm7 = ymm9[0,1,2,3],ymm7[4,5,6,7]
3381 ; AVX512DQ-NEXT: vshufi64x2 {{.*#+}} zmm7 = zmm7[0,1,2,3],zmm8[0,1,2,3]
3382 ; AVX512DQ-NEXT: vpbroadcastd {{.*#+}} ymm8 = [1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13]
3383 ; AVX512DQ-NEXT: vpshufb %ymm8, %ymm3, %ymm9
3384 ; AVX512DQ-NEXT: vpshufb %ymm8, %ymm4, %ymm10
3385 ; AVX512DQ-NEXT: vpermt2d %ymm9, %ymm1, %ymm10
3386 ; AVX512DQ-NEXT: vpsrld $8, %zmm2, %zmm9
3387 ; AVX512DQ-NEXT: vpmovdb %zmm9, %xmm9
3388 ; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3],ymm10[4,5,6,7]
3389 ; AVX512DQ-NEXT: vpshufb %ymm8, %ymm5, %ymm10
3390 ; AVX512DQ-NEXT: vpshufb %ymm8, %ymm6, %ymm8
3391 ; AVX512DQ-NEXT: vpermt2d %ymm10, %ymm1, %ymm8
3392 ; AVX512DQ-NEXT: vpsrld $8, %zmm0, %zmm10
3393 ; AVX512DQ-NEXT: vpmovdb %zmm10, %xmm10
3394 ; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm8 = ymm10[0,1,2,3],ymm8[4,5,6,7]
3395 ; AVX512DQ-NEXT: vshufi64x2 {{.*#+}} zmm8 = zmm8[0,1,2,3],zmm9[0,1,2,3]
3396 ; AVX512DQ-NEXT: vpbroadcastd {{.*#+}} ymm9 = [2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14]
3397 ; AVX512DQ-NEXT: vpshufb %ymm9, %ymm3, %ymm10
3398 ; AVX512DQ-NEXT: vpshufb %ymm9, %ymm4, %ymm11
3399 ; AVX512DQ-NEXT: vpermt2d %ymm10, %ymm1, %ymm11
3400 ; AVX512DQ-NEXT: vpsrld $16, %zmm2, %zmm10
3401 ; AVX512DQ-NEXT: vpmovdb %zmm10, %xmm10
3402 ; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm10 = ymm10[0,1,2,3],ymm11[4,5,6,7]
3403 ; AVX512DQ-NEXT: vpshufb %ymm9, %ymm5, %ymm11
3404 ; AVX512DQ-NEXT: vpshufb %ymm9, %ymm6, %ymm9
3405 ; AVX512DQ-NEXT: vpermt2d %ymm11, %ymm1, %ymm9
3406 ; AVX512DQ-NEXT: vpsrld $16, %zmm0, %zmm11
3407 ; AVX512DQ-NEXT: vpmovdb %zmm11, %xmm11
3408 ; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm9 = ymm11[0,1,2,3],ymm9[4,5,6,7]
3409 ; AVX512DQ-NEXT: vshufi64x2 {{.*#+}} zmm9 = zmm9[0,1,2,3],zmm10[0,1,2,3]
3410 ; AVX512DQ-NEXT: vpbroadcastd {{.*#+}} ymm10 = [3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15]
3411 ; AVX512DQ-NEXT: vpshufb %ymm10, %ymm3, %ymm3
3412 ; AVX512DQ-NEXT: vpshufb %ymm10, %ymm4, %ymm4
3413 ; AVX512DQ-NEXT: vpermt2d %ymm3, %ymm1, %ymm4
3414 ; AVX512DQ-NEXT: vpsrld $24, %zmm2, %zmm2
3415 ; AVX512DQ-NEXT: vpmovdb %zmm2, %xmm2
3416 ; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5,6,7]
3417 ; AVX512DQ-NEXT: vpshufb %ymm10, %ymm5, %ymm3
3418 ; AVX512DQ-NEXT: vpshufb %ymm10, %ymm6, %ymm4
3419 ; AVX512DQ-NEXT: vpermt2d %ymm3, %ymm1, %ymm4
3420 ; AVX512DQ-NEXT: vpsrld $24, %zmm0, %zmm0
3421 ; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
3422 ; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm4[4,5,6,7]
3423 ; AVX512DQ-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm2[0,1,2,3]
3424 ; AVX512DQ-NEXT: vmovdqa64 %zmm7, (%rsi)
3425 ; AVX512DQ-NEXT: vmovdqa64 %zmm8, (%rdx)
3426 ; AVX512DQ-NEXT: vmovdqa64 %zmm9, (%rcx)
3427 ; AVX512DQ-NEXT: vmovdqa64 %zmm0, (%r8)
3428 ; AVX512DQ-NEXT: vzeroupper
3429 ; AVX512DQ-NEXT: retq
3431 ; AVX512DQ-FCP-LABEL: load_i8_stride4_vf64:
3432 ; AVX512DQ-FCP: # %bb.0:
3433 ; AVX512DQ-FCP-NEXT: vmovdqa64 (%rdi), %zmm0
3434 ; AVX512DQ-FCP-NEXT: vmovdqa64 128(%rdi), %zmm2
3435 ; AVX512DQ-FCP-NEXT: vpbroadcastd {{.*#+}} ymm7 = [0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12]
3436 ; AVX512DQ-FCP-NEXT: vmovdqa 224(%rdi), %ymm3
3437 ; AVX512DQ-FCP-NEXT: vpshufb %ymm7, %ymm3, %ymm5
3438 ; AVX512DQ-FCP-NEXT: vmovdqa 192(%rdi), %ymm4
3439 ; AVX512DQ-FCP-NEXT: vpshufb %ymm7, %ymm4, %ymm6
3440 ; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm1 = [0,4,0,4,0,4,8,12]
3441 ; AVX512DQ-FCP-NEXT: vpermt2d %ymm5, %ymm1, %ymm6
3442 ; AVX512DQ-FCP-NEXT: vpmovdb %zmm2, %xmm5
3443 ; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm8 = ymm5[0,1,2,3],ymm6[4,5,6,7]
3444 ; AVX512DQ-FCP-NEXT: vmovdqa 96(%rdi), %ymm5
3445 ; AVX512DQ-FCP-NEXT: vpshufb %ymm7, %ymm5, %ymm9
3446 ; AVX512DQ-FCP-NEXT: vmovdqa 64(%rdi), %ymm6
3447 ; AVX512DQ-FCP-NEXT: vpshufb %ymm7, %ymm6, %ymm7
3448 ; AVX512DQ-FCP-NEXT: vpermt2d %ymm9, %ymm1, %ymm7
3449 ; AVX512DQ-FCP-NEXT: vpmovdb %zmm0, %xmm9
3450 ; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm7 = ymm9[0,1,2,3],ymm7[4,5,6,7]
3451 ; AVX512DQ-FCP-NEXT: vshufi64x2 {{.*#+}} zmm7 = zmm7[0,1,2,3],zmm8[0,1,2,3]
3452 ; AVX512DQ-FCP-NEXT: vpbroadcastd {{.*#+}} ymm8 = [1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13]
3453 ; AVX512DQ-FCP-NEXT: vpshufb %ymm8, %ymm3, %ymm9
3454 ; AVX512DQ-FCP-NEXT: vpshufb %ymm8, %ymm4, %ymm10
3455 ; AVX512DQ-FCP-NEXT: vpermt2d %ymm9, %ymm1, %ymm10
3456 ; AVX512DQ-FCP-NEXT: vpsrld $8, %zmm2, %zmm9
3457 ; AVX512DQ-FCP-NEXT: vpmovdb %zmm9, %xmm9
3458 ; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3],ymm10[4,5,6,7]
3459 ; AVX512DQ-FCP-NEXT: vpshufb %ymm8, %ymm5, %ymm10
3460 ; AVX512DQ-FCP-NEXT: vpshufb %ymm8, %ymm6, %ymm8
3461 ; AVX512DQ-FCP-NEXT: vpermt2d %ymm10, %ymm1, %ymm8
3462 ; AVX512DQ-FCP-NEXT: vpsrld $8, %zmm0, %zmm10
3463 ; AVX512DQ-FCP-NEXT: vpmovdb %zmm10, %xmm10
3464 ; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm8 = ymm10[0,1,2,3],ymm8[4,5,6,7]
3465 ; AVX512DQ-FCP-NEXT: vshufi64x2 {{.*#+}} zmm8 = zmm8[0,1,2,3],zmm9[0,1,2,3]
3466 ; AVX512DQ-FCP-NEXT: vpbroadcastd {{.*#+}} ymm9 = [2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14]
3467 ; AVX512DQ-FCP-NEXT: vpshufb %ymm9, %ymm3, %ymm10
3468 ; AVX512DQ-FCP-NEXT: vpshufb %ymm9, %ymm4, %ymm11
3469 ; AVX512DQ-FCP-NEXT: vpermt2d %ymm10, %ymm1, %ymm11
3470 ; AVX512DQ-FCP-NEXT: vpsrld $16, %zmm2, %zmm10
3471 ; AVX512DQ-FCP-NEXT: vpmovdb %zmm10, %xmm10
3472 ; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm10 = ymm10[0,1,2,3],ymm11[4,5,6,7]
3473 ; AVX512DQ-FCP-NEXT: vpshufb %ymm9, %ymm5, %ymm11
3474 ; AVX512DQ-FCP-NEXT: vpshufb %ymm9, %ymm6, %ymm9
3475 ; AVX512DQ-FCP-NEXT: vpermt2d %ymm11, %ymm1, %ymm9
3476 ; AVX512DQ-FCP-NEXT: vpsrld $16, %zmm0, %zmm11
3477 ; AVX512DQ-FCP-NEXT: vpmovdb %zmm11, %xmm11
3478 ; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm9 = ymm11[0,1,2,3],ymm9[4,5,6,7]
3479 ; AVX512DQ-FCP-NEXT: vshufi64x2 {{.*#+}} zmm9 = zmm9[0,1,2,3],zmm10[0,1,2,3]
3480 ; AVX512DQ-FCP-NEXT: vpbroadcastd {{.*#+}} ymm10 = [3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15]
3481 ; AVX512DQ-FCP-NEXT: vpshufb %ymm10, %ymm3, %ymm3
3482 ; AVX512DQ-FCP-NEXT: vpshufb %ymm10, %ymm4, %ymm4
3483 ; AVX512DQ-FCP-NEXT: vpermt2d %ymm3, %ymm1, %ymm4
3484 ; AVX512DQ-FCP-NEXT: vpsrld $24, %zmm2, %zmm2
3485 ; AVX512DQ-FCP-NEXT: vpmovdb %zmm2, %xmm2
3486 ; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5,6,7]
3487 ; AVX512DQ-FCP-NEXT: vpshufb %ymm10, %ymm5, %ymm3
3488 ; AVX512DQ-FCP-NEXT: vpshufb %ymm10, %ymm6, %ymm4
3489 ; AVX512DQ-FCP-NEXT: vpermt2d %ymm3, %ymm1, %ymm4
3490 ; AVX512DQ-FCP-NEXT: vpsrld $24, %zmm0, %zmm0
3491 ; AVX512DQ-FCP-NEXT: vpmovdb %zmm0, %xmm0
3492 ; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm4[4,5,6,7]
3493 ; AVX512DQ-FCP-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm2[0,1,2,3]
3494 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm7, (%rsi)
3495 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm8, (%rdx)
3496 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm9, (%rcx)
3497 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm0, (%r8)
3498 ; AVX512DQ-FCP-NEXT: vzeroupper
3499 ; AVX512DQ-FCP-NEXT: retq
3501 ; AVX512BW-LABEL: load_i8_stride4_vf64:
3502 ; AVX512BW: # %bb.0:
3503 ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0
3504 ; AVX512BW-NEXT: vmovdqa64 64(%rdi), %zmm1
3505 ; AVX512BW-NEXT: vmovdqa64 128(%rdi), %zmm2
3506 ; AVX512BW-NEXT: vmovdqa64 192(%rdi), %zmm3
3507 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm4 = [128,128,128,128,0,4,8,12,u,u,u,u,u,u,u,u,128,128,128,128,16,20,24,28,u,u,u,u,u,u,u,u,128,128,128,128,32,36,40,44,u,u,u,u,u,u,u,u,128,128,128,128,48,52,56,60,u,u,u,u,u,u,u,u]
3508 ; AVX512BW-NEXT: vpshufb %zmm4, %zmm3, %zmm5
3509 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm6 = [0,4,8,12,128,128,128,128,u,u,u,u,u,u,u,u,16,20,24,28,128,128,128,128,u,u,u,u,u,u,u,u,32,36,40,44,128,128,128,128,u,u,u,u,u,u,u,u,48,52,56,60,128,128,128,128,u,u,u,u,u,u,u,u]
3510 ; AVX512BW-NEXT: vpshufb %zmm6, %zmm2, %zmm7
3511 ; AVX512BW-NEXT: vporq %zmm5, %zmm7, %zmm5
3512 ; AVX512BW-NEXT: vpshufb %zmm4, %zmm1, %zmm4
3513 ; AVX512BW-NEXT: vpshufb %zmm6, %zmm0, %zmm6
3514 ; AVX512BW-NEXT: vporq %zmm4, %zmm6, %zmm4
3515 ; AVX512BW-NEXT: vpmovsxbd {{.*#+}} zmm6 = [0,4,8,12,1,5,9,13,16,20,24,28,17,21,25,29]
3516 ; AVX512BW-NEXT: vpermt2d %zmm5, %zmm6, %zmm4
3517 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm5 = [128,128,128,128,1,5,9,13,u,u,u,u,u,u,u,u,128,128,128,128,17,21,25,29,u,u,u,u,u,u,u,u,128,128,128,128,33,37,41,45,u,u,u,u,u,u,u,u,128,128,128,128,49,53,57,61,u,u,u,u,u,u,u,u]
3518 ; AVX512BW-NEXT: vpshufb %zmm5, %zmm3, %zmm7
3519 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm8 = [1,5,9,13,128,128,128,128,u,u,u,u,u,u,u,u,17,21,25,29,128,128,128,128,u,u,u,u,u,u,u,u,33,37,41,45,128,128,128,128,u,u,u,u,u,u,u,u,49,53,57,61,128,128,128,128,u,u,u,u,u,u,u,u]
3520 ; AVX512BW-NEXT: vpshufb %zmm8, %zmm2, %zmm9
3521 ; AVX512BW-NEXT: vporq %zmm7, %zmm9, %zmm7
3522 ; AVX512BW-NEXT: vpshufb %zmm5, %zmm1, %zmm5
3523 ; AVX512BW-NEXT: vpshufb %zmm8, %zmm0, %zmm8
3524 ; AVX512BW-NEXT: vporq %zmm5, %zmm8, %zmm5
3525 ; AVX512BW-NEXT: vpermt2d %zmm7, %zmm6, %zmm5
3526 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm7 = [128,128,128,128,2,6,10,14,u,u,u,u,u,u,u,u,128,128,128,128,18,22,26,30,u,u,u,u,u,u,u,u,128,128,128,128,34,38,42,46,u,u,u,u,u,u,u,u,128,128,128,128,50,54,58,62,u,u,u,u,u,u,u,u]
3527 ; AVX512BW-NEXT: vpshufb %zmm7, %zmm3, %zmm8
3528 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm9 = [2,6,10,14,128,128,128,128,u,u,u,u,u,u,u,u,18,22,26,30,128,128,128,128,u,u,u,u,u,u,u,u,34,38,42,46,128,128,128,128,u,u,u,u,u,u,u,u,50,54,58,62,128,128,128,128,u,u,u,u,u,u,u,u]
3529 ; AVX512BW-NEXT: vpshufb %zmm9, %zmm2, %zmm10
3530 ; AVX512BW-NEXT: vporq %zmm8, %zmm10, %zmm8
3531 ; AVX512BW-NEXT: vpshufb %zmm7, %zmm1, %zmm7
3532 ; AVX512BW-NEXT: vpshufb %zmm9, %zmm0, %zmm9
3533 ; AVX512BW-NEXT: vporq %zmm7, %zmm9, %zmm7
3534 ; AVX512BW-NEXT: vpermt2d %zmm8, %zmm6, %zmm7
3535 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm8 = [128,128,128,128,3,7,11,15,u,u,u,u,u,u,u,u,128,128,128,128,19,23,27,31,u,u,u,u,u,u,u,u,128,128,128,128,35,39,43,47,u,u,u,u,u,u,u,u,128,128,128,128,51,55,59,63,u,u,u,u,u,u,u,u]
3536 ; AVX512BW-NEXT: vpshufb %zmm8, %zmm3, %zmm3
3537 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm9 = [3,7,11,15,128,128,128,128,u,u,u,u,u,u,u,u,19,23,27,31,128,128,128,128,u,u,u,u,u,u,u,u,35,39,43,47,128,128,128,128,u,u,u,u,u,u,u,u,51,55,59,63,128,128,128,128,u,u,u,u,u,u,u,u]
3538 ; AVX512BW-NEXT: vpshufb %zmm9, %zmm2, %zmm2
3539 ; AVX512BW-NEXT: vporq %zmm3, %zmm2, %zmm2
3540 ; AVX512BW-NEXT: vpshufb %zmm8, %zmm1, %zmm1
3541 ; AVX512BW-NEXT: vpshufb %zmm9, %zmm0, %zmm0
3542 ; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0
3543 ; AVX512BW-NEXT: vpermt2d %zmm2, %zmm6, %zmm0
3544 ; AVX512BW-NEXT: vmovdqa64 %zmm4, (%rsi)
3545 ; AVX512BW-NEXT: vmovdqa64 %zmm5, (%rdx)
3546 ; AVX512BW-NEXT: vmovdqa64 %zmm7, (%rcx)
3547 ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%r8)
3548 ; AVX512BW-NEXT: vzeroupper
3549 ; AVX512BW-NEXT: retq
3551 ; AVX512BW-FCP-LABEL: load_i8_stride4_vf64:
3552 ; AVX512BW-FCP: # %bb.0:
3553 ; AVX512BW-FCP-NEXT: vmovdqa64 (%rdi), %zmm0
3554 ; AVX512BW-FCP-NEXT: vmovdqa64 64(%rdi), %zmm1
3555 ; AVX512BW-FCP-NEXT: vmovdqa64 128(%rdi), %zmm2
3556 ; AVX512BW-FCP-NEXT: vmovdqa64 192(%rdi), %zmm3
3557 ; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm4 = [128,128,128,128,0,4,8,12,u,u,u,u,u,u,u,u,128,128,128,128,16,20,24,28,u,u,u,u,u,u,u,u,128,128,128,128,32,36,40,44,u,u,u,u,u,u,u,u,128,128,128,128,48,52,56,60,u,u,u,u,u,u,u,u]
3558 ; AVX512BW-FCP-NEXT: vpshufb %zmm4, %zmm3, %zmm5
3559 ; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm6 = [0,4,8,12,128,128,128,128,u,u,u,u,u,u,u,u,16,20,24,28,128,128,128,128,u,u,u,u,u,u,u,u,32,36,40,44,128,128,128,128,u,u,u,u,u,u,u,u,48,52,56,60,128,128,128,128,u,u,u,u,u,u,u,u]
3560 ; AVX512BW-FCP-NEXT: vpshufb %zmm6, %zmm2, %zmm7
3561 ; AVX512BW-FCP-NEXT: vporq %zmm5, %zmm7, %zmm5
3562 ; AVX512BW-FCP-NEXT: vpshufb %zmm4, %zmm1, %zmm4
3563 ; AVX512BW-FCP-NEXT: vpshufb %zmm6, %zmm0, %zmm6
3564 ; AVX512BW-FCP-NEXT: vporq %zmm4, %zmm6, %zmm4
3565 ; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm6 = [0,4,8,12,1,5,9,13,16,20,24,28,17,21,25,29]
3566 ; AVX512BW-FCP-NEXT: vpermt2d %zmm5, %zmm6, %zmm4
3567 ; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm5 = [128,128,128,128,1,5,9,13,u,u,u,u,u,u,u,u,128,128,128,128,17,21,25,29,u,u,u,u,u,u,u,u,128,128,128,128,33,37,41,45,u,u,u,u,u,u,u,u,128,128,128,128,49,53,57,61,u,u,u,u,u,u,u,u]
3568 ; AVX512BW-FCP-NEXT: vpshufb %zmm5, %zmm3, %zmm7
3569 ; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm8 = [1,5,9,13,128,128,128,128,u,u,u,u,u,u,u,u,17,21,25,29,128,128,128,128,u,u,u,u,u,u,u,u,33,37,41,45,128,128,128,128,u,u,u,u,u,u,u,u,49,53,57,61,128,128,128,128,u,u,u,u,u,u,u,u]
3570 ; AVX512BW-FCP-NEXT: vpshufb %zmm8, %zmm2, %zmm9
3571 ; AVX512BW-FCP-NEXT: vporq %zmm7, %zmm9, %zmm7
3572 ; AVX512BW-FCP-NEXT: vpshufb %zmm5, %zmm1, %zmm5
3573 ; AVX512BW-FCP-NEXT: vpshufb %zmm8, %zmm0, %zmm8
3574 ; AVX512BW-FCP-NEXT: vporq %zmm5, %zmm8, %zmm5
3575 ; AVX512BW-FCP-NEXT: vpermt2d %zmm7, %zmm6, %zmm5
3576 ; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm7 = [128,128,128,128,2,6,10,14,u,u,u,u,u,u,u,u,128,128,128,128,18,22,26,30,u,u,u,u,u,u,u,u,128,128,128,128,34,38,42,46,u,u,u,u,u,u,u,u,128,128,128,128,50,54,58,62,u,u,u,u,u,u,u,u]
3577 ; AVX512BW-FCP-NEXT: vpshufb %zmm7, %zmm3, %zmm8
3578 ; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm9 = [2,6,10,14,128,128,128,128,u,u,u,u,u,u,u,u,18,22,26,30,128,128,128,128,u,u,u,u,u,u,u,u,34,38,42,46,128,128,128,128,u,u,u,u,u,u,u,u,50,54,58,62,128,128,128,128,u,u,u,u,u,u,u,u]
3579 ; AVX512BW-FCP-NEXT: vpshufb %zmm9, %zmm2, %zmm10
3580 ; AVX512BW-FCP-NEXT: vporq %zmm8, %zmm10, %zmm8
3581 ; AVX512BW-FCP-NEXT: vpshufb %zmm7, %zmm1, %zmm7
3582 ; AVX512BW-FCP-NEXT: vpshufb %zmm9, %zmm0, %zmm9
3583 ; AVX512BW-FCP-NEXT: vporq %zmm7, %zmm9, %zmm7
3584 ; AVX512BW-FCP-NEXT: vpermt2d %zmm8, %zmm6, %zmm7
3585 ; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm8 = [128,128,128,128,3,7,11,15,u,u,u,u,u,u,u,u,128,128,128,128,19,23,27,31,u,u,u,u,u,u,u,u,128,128,128,128,35,39,43,47,u,u,u,u,u,u,u,u,128,128,128,128,51,55,59,63,u,u,u,u,u,u,u,u]
3586 ; AVX512BW-FCP-NEXT: vpshufb %zmm8, %zmm3, %zmm3
3587 ; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm9 = [3,7,11,15,128,128,128,128,u,u,u,u,u,u,u,u,19,23,27,31,128,128,128,128,u,u,u,u,u,u,u,u,35,39,43,47,128,128,128,128,u,u,u,u,u,u,u,u,51,55,59,63,128,128,128,128,u,u,u,u,u,u,u,u]
3588 ; AVX512BW-FCP-NEXT: vpshufb %zmm9, %zmm2, %zmm2
3589 ; AVX512BW-FCP-NEXT: vporq %zmm3, %zmm2, %zmm2
3590 ; AVX512BW-FCP-NEXT: vpshufb %zmm8, %zmm1, %zmm1
3591 ; AVX512BW-FCP-NEXT: vpshufb %zmm9, %zmm0, %zmm0
3592 ; AVX512BW-FCP-NEXT: vporq %zmm1, %zmm0, %zmm0
3593 ; AVX512BW-FCP-NEXT: vpermt2d %zmm2, %zmm6, %zmm0
3594 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm4, (%rsi)
3595 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm5, (%rdx)
3596 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm7, (%rcx)
3597 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm0, (%r8)
3598 ; AVX512BW-FCP-NEXT: vzeroupper
3599 ; AVX512BW-FCP-NEXT: retq
3601 ; AVX512DQ-BW-LABEL: load_i8_stride4_vf64:
3602 ; AVX512DQ-BW: # %bb.0:
3603 ; AVX512DQ-BW-NEXT: vmovdqa64 (%rdi), %zmm0
3604 ; AVX512DQ-BW-NEXT: vmovdqa64 64(%rdi), %zmm1
3605 ; AVX512DQ-BW-NEXT: vmovdqa64 128(%rdi), %zmm2
3606 ; AVX512DQ-BW-NEXT: vmovdqa64 192(%rdi), %zmm3
3607 ; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} zmm4 = [128,128,128,128,0,4,8,12,u,u,u,u,u,u,u,u,128,128,128,128,16,20,24,28,u,u,u,u,u,u,u,u,128,128,128,128,32,36,40,44,u,u,u,u,u,u,u,u,128,128,128,128,48,52,56,60,u,u,u,u,u,u,u,u]
3608 ; AVX512DQ-BW-NEXT: vpshufb %zmm4, %zmm3, %zmm5
3609 ; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} zmm6 = [0,4,8,12,128,128,128,128,u,u,u,u,u,u,u,u,16,20,24,28,128,128,128,128,u,u,u,u,u,u,u,u,32,36,40,44,128,128,128,128,u,u,u,u,u,u,u,u,48,52,56,60,128,128,128,128,u,u,u,u,u,u,u,u]
3610 ; AVX512DQ-BW-NEXT: vpshufb %zmm6, %zmm2, %zmm7
3611 ; AVX512DQ-BW-NEXT: vporq %zmm5, %zmm7, %zmm5
3612 ; AVX512DQ-BW-NEXT: vpshufb %zmm4, %zmm1, %zmm4
3613 ; AVX512DQ-BW-NEXT: vpshufb %zmm6, %zmm0, %zmm6
3614 ; AVX512DQ-BW-NEXT: vporq %zmm4, %zmm6, %zmm4
3615 ; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} zmm6 = [0,4,8,12,1,5,9,13,16,20,24,28,17,21,25,29]
3616 ; AVX512DQ-BW-NEXT: vpermt2d %zmm5, %zmm6, %zmm4
3617 ; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} zmm5 = [128,128,128,128,1,5,9,13,u,u,u,u,u,u,u,u,128,128,128,128,17,21,25,29,u,u,u,u,u,u,u,u,128,128,128,128,33,37,41,45,u,u,u,u,u,u,u,u,128,128,128,128,49,53,57,61,u,u,u,u,u,u,u,u]
3618 ; AVX512DQ-BW-NEXT: vpshufb %zmm5, %zmm3, %zmm7
3619 ; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} zmm8 = [1,5,9,13,128,128,128,128,u,u,u,u,u,u,u,u,17,21,25,29,128,128,128,128,u,u,u,u,u,u,u,u,33,37,41,45,128,128,128,128,u,u,u,u,u,u,u,u,49,53,57,61,128,128,128,128,u,u,u,u,u,u,u,u]
3620 ; AVX512DQ-BW-NEXT: vpshufb %zmm8, %zmm2, %zmm9
3621 ; AVX512DQ-BW-NEXT: vporq %zmm7, %zmm9, %zmm7
3622 ; AVX512DQ-BW-NEXT: vpshufb %zmm5, %zmm1, %zmm5
3623 ; AVX512DQ-BW-NEXT: vpshufb %zmm8, %zmm0, %zmm8
3624 ; AVX512DQ-BW-NEXT: vporq %zmm5, %zmm8, %zmm5
3625 ; AVX512DQ-BW-NEXT: vpermt2d %zmm7, %zmm6, %zmm5
3626 ; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} zmm7 = [128,128,128,128,2,6,10,14,u,u,u,u,u,u,u,u,128,128,128,128,18,22,26,30,u,u,u,u,u,u,u,u,128,128,128,128,34,38,42,46,u,u,u,u,u,u,u,u,128,128,128,128,50,54,58,62,u,u,u,u,u,u,u,u]
3627 ; AVX512DQ-BW-NEXT: vpshufb %zmm7, %zmm3, %zmm8
3628 ; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} zmm9 = [2,6,10,14,128,128,128,128,u,u,u,u,u,u,u,u,18,22,26,30,128,128,128,128,u,u,u,u,u,u,u,u,34,38,42,46,128,128,128,128,u,u,u,u,u,u,u,u,50,54,58,62,128,128,128,128,u,u,u,u,u,u,u,u]
3629 ; AVX512DQ-BW-NEXT: vpshufb %zmm9, %zmm2, %zmm10
3630 ; AVX512DQ-BW-NEXT: vporq %zmm8, %zmm10, %zmm8
3631 ; AVX512DQ-BW-NEXT: vpshufb %zmm7, %zmm1, %zmm7
3632 ; AVX512DQ-BW-NEXT: vpshufb %zmm9, %zmm0, %zmm9
3633 ; AVX512DQ-BW-NEXT: vporq %zmm7, %zmm9, %zmm7
3634 ; AVX512DQ-BW-NEXT: vpermt2d %zmm8, %zmm6, %zmm7
3635 ; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} zmm8 = [128,128,128,128,3,7,11,15,u,u,u,u,u,u,u,u,128,128,128,128,19,23,27,31,u,u,u,u,u,u,u,u,128,128,128,128,35,39,43,47,u,u,u,u,u,u,u,u,128,128,128,128,51,55,59,63,u,u,u,u,u,u,u,u]
3636 ; AVX512DQ-BW-NEXT: vpshufb %zmm8, %zmm3, %zmm3
3637 ; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} zmm9 = [3,7,11,15,128,128,128,128,u,u,u,u,u,u,u,u,19,23,27,31,128,128,128,128,u,u,u,u,u,u,u,u,35,39,43,47,128,128,128,128,u,u,u,u,u,u,u,u,51,55,59,63,128,128,128,128,u,u,u,u,u,u,u,u]
3638 ; AVX512DQ-BW-NEXT: vpshufb %zmm9, %zmm2, %zmm2
3639 ; AVX512DQ-BW-NEXT: vporq %zmm3, %zmm2, %zmm2
3640 ; AVX512DQ-BW-NEXT: vpshufb %zmm8, %zmm1, %zmm1
3641 ; AVX512DQ-BW-NEXT: vpshufb %zmm9, %zmm0, %zmm0
3642 ; AVX512DQ-BW-NEXT: vporq %zmm1, %zmm0, %zmm0
3643 ; AVX512DQ-BW-NEXT: vpermt2d %zmm2, %zmm6, %zmm0
3644 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm4, (%rsi)
3645 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm5, (%rdx)
3646 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm7, (%rcx)
3647 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm0, (%r8)
3648 ; AVX512DQ-BW-NEXT: vzeroupper
3649 ; AVX512DQ-BW-NEXT: retq
3651 ; AVX512DQ-BW-FCP-LABEL: load_i8_stride4_vf64:
3652 ; AVX512DQ-BW-FCP: # %bb.0:
3653 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rdi), %zmm0
3654 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 64(%rdi), %zmm1
3655 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 128(%rdi), %zmm2
3656 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 192(%rdi), %zmm3
3657 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm4 = [128,128,128,128,0,4,8,12,u,u,u,u,u,u,u,u,128,128,128,128,16,20,24,28,u,u,u,u,u,u,u,u,128,128,128,128,32,36,40,44,u,u,u,u,u,u,u,u,128,128,128,128,48,52,56,60,u,u,u,u,u,u,u,u]
3658 ; AVX512DQ-BW-FCP-NEXT: vpshufb %zmm4, %zmm3, %zmm5
3659 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm6 = [0,4,8,12,128,128,128,128,u,u,u,u,u,u,u,u,16,20,24,28,128,128,128,128,u,u,u,u,u,u,u,u,32,36,40,44,128,128,128,128,u,u,u,u,u,u,u,u,48,52,56,60,128,128,128,128,u,u,u,u,u,u,u,u]
3660 ; AVX512DQ-BW-FCP-NEXT: vpshufb %zmm6, %zmm2, %zmm7
3661 ; AVX512DQ-BW-FCP-NEXT: vporq %zmm5, %zmm7, %zmm5
3662 ; AVX512DQ-BW-FCP-NEXT: vpshufb %zmm4, %zmm1, %zmm4
3663 ; AVX512DQ-BW-FCP-NEXT: vpshufb %zmm6, %zmm0, %zmm6
3664 ; AVX512DQ-BW-FCP-NEXT: vporq %zmm4, %zmm6, %zmm4
3665 ; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm6 = [0,4,8,12,1,5,9,13,16,20,24,28,17,21,25,29]
3666 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm5, %zmm6, %zmm4
3667 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm5 = [128,128,128,128,1,5,9,13,u,u,u,u,u,u,u,u,128,128,128,128,17,21,25,29,u,u,u,u,u,u,u,u,128,128,128,128,33,37,41,45,u,u,u,u,u,u,u,u,128,128,128,128,49,53,57,61,u,u,u,u,u,u,u,u]
3668 ; AVX512DQ-BW-FCP-NEXT: vpshufb %zmm5, %zmm3, %zmm7
3669 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm8 = [1,5,9,13,128,128,128,128,u,u,u,u,u,u,u,u,17,21,25,29,128,128,128,128,u,u,u,u,u,u,u,u,33,37,41,45,128,128,128,128,u,u,u,u,u,u,u,u,49,53,57,61,128,128,128,128,u,u,u,u,u,u,u,u]
3670 ; AVX512DQ-BW-FCP-NEXT: vpshufb %zmm8, %zmm2, %zmm9
3671 ; AVX512DQ-BW-FCP-NEXT: vporq %zmm7, %zmm9, %zmm7
3672 ; AVX512DQ-BW-FCP-NEXT: vpshufb %zmm5, %zmm1, %zmm5
3673 ; AVX512DQ-BW-FCP-NEXT: vpshufb %zmm8, %zmm0, %zmm8
3674 ; AVX512DQ-BW-FCP-NEXT: vporq %zmm5, %zmm8, %zmm5
3675 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm7, %zmm6, %zmm5
3676 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm7 = [128,128,128,128,2,6,10,14,u,u,u,u,u,u,u,u,128,128,128,128,18,22,26,30,u,u,u,u,u,u,u,u,128,128,128,128,34,38,42,46,u,u,u,u,u,u,u,u,128,128,128,128,50,54,58,62,u,u,u,u,u,u,u,u]
3677 ; AVX512DQ-BW-FCP-NEXT: vpshufb %zmm7, %zmm3, %zmm8
3678 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm9 = [2,6,10,14,128,128,128,128,u,u,u,u,u,u,u,u,18,22,26,30,128,128,128,128,u,u,u,u,u,u,u,u,34,38,42,46,128,128,128,128,u,u,u,u,u,u,u,u,50,54,58,62,128,128,128,128,u,u,u,u,u,u,u,u]
3679 ; AVX512DQ-BW-FCP-NEXT: vpshufb %zmm9, %zmm2, %zmm10
3680 ; AVX512DQ-BW-FCP-NEXT: vporq %zmm8, %zmm10, %zmm8
3681 ; AVX512DQ-BW-FCP-NEXT: vpshufb %zmm7, %zmm1, %zmm7
3682 ; AVX512DQ-BW-FCP-NEXT: vpshufb %zmm9, %zmm0, %zmm9
3683 ; AVX512DQ-BW-FCP-NEXT: vporq %zmm7, %zmm9, %zmm7
3684 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm8, %zmm6, %zmm7
3685 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm8 = [128,128,128,128,3,7,11,15,u,u,u,u,u,u,u,u,128,128,128,128,19,23,27,31,u,u,u,u,u,u,u,u,128,128,128,128,35,39,43,47,u,u,u,u,u,u,u,u,128,128,128,128,51,55,59,63,u,u,u,u,u,u,u,u]
3686 ; AVX512DQ-BW-FCP-NEXT: vpshufb %zmm8, %zmm3, %zmm3
3687 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm9 = [3,7,11,15,128,128,128,128,u,u,u,u,u,u,u,u,19,23,27,31,128,128,128,128,u,u,u,u,u,u,u,u,35,39,43,47,128,128,128,128,u,u,u,u,u,u,u,u,51,55,59,63,128,128,128,128,u,u,u,u,u,u,u,u]
3688 ; AVX512DQ-BW-FCP-NEXT: vpshufb %zmm9, %zmm2, %zmm2
3689 ; AVX512DQ-BW-FCP-NEXT: vporq %zmm3, %zmm2, %zmm2
3690 ; AVX512DQ-BW-FCP-NEXT: vpshufb %zmm8, %zmm1, %zmm1
3691 ; AVX512DQ-BW-FCP-NEXT: vpshufb %zmm9, %zmm0, %zmm0
3692 ; AVX512DQ-BW-FCP-NEXT: vporq %zmm1, %zmm0, %zmm0
3693 ; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm2, %zmm6, %zmm0
3694 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm4, (%rsi)
3695 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm5, (%rdx)
3696 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm7, (%rcx)
3697 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm0, (%r8)
3698 ; AVX512DQ-BW-FCP-NEXT: vzeroupper
3699 ; AVX512DQ-BW-FCP-NEXT: retq
3700 %wide.vec = load <256 x i8>, ptr %in.vec, align 64
3701 %strided.vec0 = shufflevector <256 x i8> %wide.vec, <256 x i8> poison, <64 x i32> <i32 0, i32 4, i32 8, i32 12, i32 16, i32 20, i32 24, i32 28, i32 32, i32 36, i32 40, i32 44, i32 48, i32 52, i32 56, i32 60, i32 64, i32 68, i32 72, i32 76, i32 80, i32 84, i32 88, i32 92, i32 96, i32 100, i32 104, i32 108, i32 112, i32 116, i32 120, i32 124, i32 128, i32 132, i32 136, i32 140, i32 144, i32 148, i32 152, i32 156, i32 160, i32 164, i32 168, i32 172, i32 176, i32 180, i32 184, i32 188, i32 192, i32 196, i32 200, i32 204, i32 208, i32 212, i32 216, i32 220, i32 224, i32 228, i32 232, i32 236, i32 240, i32 244, i32 248, i32 252>
3702 %strided.vec1 = shufflevector <256 x i8> %wide.vec, <256 x i8> poison, <64 x i32> <i32 1, i32 5, i32 9, i32 13, i32 17, i32 21, i32 25, i32 29, i32 33, i32 37, i32 41, i32 45, i32 49, i32 53, i32 57, i32 61, i32 65, i32 69, i32 73, i32 77, i32 81, i32 85, i32 89, i32 93, i32 97, i32 101, i32 105, i32 109, i32 113, i32 117, i32 121, i32 125, i32 129, i32 133, i32 137, i32 141, i32 145, i32 149, i32 153, i32 157, i32 161, i32 165, i32 169, i32 173, i32 177, i32 181, i32 185, i32 189, i32 193, i32 197, i32 201, i32 205, i32 209, i32 213, i32 217, i32 221, i32 225, i32 229, i32 233, i32 237, i32 241, i32 245, i32 249, i32 253>
3703 %strided.vec2 = shufflevector <256 x i8> %wide.vec, <256 x i8> poison, <64 x i32> <i32 2, i32 6, i32 10, i32 14, i32 18, i32 22, i32 26, i32 30, i32 34, i32 38, i32 42, i32 46, i32 50, i32 54, i32 58, i32 62, i32 66, i32 70, i32 74, i32 78, i32 82, i32 86, i32 90, i32 94, i32 98, i32 102, i32 106, i32 110, i32 114, i32 118, i32 122, i32 126, i32 130, i32 134, i32 138, i32 142, i32 146, i32 150, i32 154, i32 158, i32 162, i32 166, i32 170, i32 174, i32 178, i32 182, i32 186, i32 190, i32 194, i32 198, i32 202, i32 206, i32 210, i32 214, i32 218, i32 222, i32 226, i32 230, i32 234, i32 238, i32 242, i32 246, i32 250, i32 254>
3704 %strided.vec3 = shufflevector <256 x i8> %wide.vec, <256 x i8> poison, <64 x i32> <i32 3, i32 7, i32 11, i32 15, i32 19, i32 23, i32 27, i32 31, i32 35, i32 39, i32 43, i32 47, i32 51, i32 55, i32 59, i32 63, i32 67, i32 71, i32 75, i32 79, i32 83, i32 87, i32 91, i32 95, i32 99, i32 103, i32 107, i32 111, i32 115, i32 119, i32 123, i32 127, i32 131, i32 135, i32 139, i32 143, i32 147, i32 151, i32 155, i32 159, i32 163, i32 167, i32 171, i32 175, i32 179, i32 183, i32 187, i32 191, i32 195, i32 199, i32 203, i32 207, i32 211, i32 215, i32 219, i32 223, i32 227, i32 231, i32 235, i32 239, i32 243, i32 247, i32 251, i32 255>
3705 store <64 x i8> %strided.vec0, ptr %out.vec0, align 64
3706 store <64 x i8> %strided.vec1, ptr %out.vec1, align 64
3707 store <64 x i8> %strided.vec2, ptr %out.vec2, align 64
3708 store <64 x i8> %strided.vec3, ptr %out.vec3, align 64