1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+sse2 | FileCheck %s --check-prefixes=SSE
3 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx | FileCheck %s --check-prefixes=AVX
4 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX2
5 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX2-FP
6 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX2-FCP
7 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX512
8 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX512-FCP
9 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX512DQ
10 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX512DQ-FCP
11 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX512BW
12 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX512BW-FCP
13 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX512DQ-BW
14 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX512DQ-BW-FCP
16 ; These patterns are produced by LoopVectorizer for interleaved loads.
18 define void @load_i8_stride6_vf2(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2, ptr %out.vec3, ptr %out.vec4, ptr %out.vec5) nounwind {
19 ; SSE-LABEL: load_i8_stride6_vf2:
21 ; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax
22 ; SSE-NEXT: movdqa (%rdi), %xmm1
23 ; SSE-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255]
24 ; SSE-NEXT: pand %xmm1, %xmm3
25 ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm3[0,3,2,3,4,5,6,7]
26 ; SSE-NEXT: packuswb %xmm2, %xmm2
27 ; SSE-NEXT: pxor %xmm4, %xmm4
28 ; SSE-NEXT: movdqa %xmm1, %xmm0
29 ; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
30 ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm0[0,3,2,3]
31 ; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[1,3,2,3,4,5,6,7]
32 ; SSE-NEXT: packuswb %xmm5, %xmm5
33 ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm3[0,2,2,3]
34 ; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[1,2,2,3,4,5,6,7]
35 ; SSE-NEXT: packuswb %xmm6, %xmm6
36 ; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm4[8],xmm1[9],xmm4[9],xmm1[10],xmm4[10],xmm1[11],xmm4[11],xmm1[12],xmm4[12],xmm1[13],xmm4[13],xmm1[14],xmm4[14],xmm1[15],xmm4[15]
37 ; SSE-NEXT: movdqa %xmm0, %xmm4
38 ; SSE-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3]
39 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[3,1,2,3]
40 ; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[0,3,2,3,4,5,6,7]
41 ; SSE-NEXT: packuswb %xmm4, %xmm4
42 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,1,2,3]
43 ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[2,1,2,3,4,5,6,7]
44 ; SSE-NEXT: packuswb %xmm3, %xmm3
45 ; SSE-NEXT: psrlq $48, %xmm1
46 ; SSE-NEXT: psrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
47 ; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
48 ; SSE-NEXT: packuswb %xmm0, %xmm0
49 ; SSE-NEXT: movd %xmm2, %edi
50 ; SSE-NEXT: movw %di, (%rsi)
51 ; SSE-NEXT: movd %xmm5, %esi
52 ; SSE-NEXT: movw %si, (%rdx)
53 ; SSE-NEXT: movd %xmm6, %edx
54 ; SSE-NEXT: movw %dx, (%rcx)
55 ; SSE-NEXT: movd %xmm4, %ecx
56 ; SSE-NEXT: movw %cx, (%r8)
57 ; SSE-NEXT: movd %xmm3, %ecx
58 ; SSE-NEXT: movw %cx, (%r9)
59 ; SSE-NEXT: movd %xmm0, %ecx
60 ; SSE-NEXT: movw %cx, (%rax)
63 ; AVX-LABEL: load_i8_stride6_vf2:
65 ; AVX-NEXT: movq {{[0-9]+}}(%rsp), %rax
66 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
67 ; AVX-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[0,6,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
68 ; AVX-NEXT: vpshufb {{.*#+}} xmm2 = xmm0[1,7,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
69 ; AVX-NEXT: vpshufb {{.*#+}} xmm3 = xmm0[2,8,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
70 ; AVX-NEXT: vpshufb {{.*#+}} xmm4 = xmm0[3,9,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
71 ; AVX-NEXT: vpshufb {{.*#+}} xmm5 = xmm0[4,10,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
72 ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[5,11,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
73 ; AVX-NEXT: vpextrw $0, %xmm1, (%rsi)
74 ; AVX-NEXT: vpextrw $0, %xmm2, (%rdx)
75 ; AVX-NEXT: vpextrw $0, %xmm3, (%rcx)
76 ; AVX-NEXT: vpextrw $0, %xmm4, (%r8)
77 ; AVX-NEXT: vpextrw $0, %xmm5, (%r9)
78 ; AVX-NEXT: vpextrw $0, %xmm0, (%rax)
81 ; AVX2-LABEL: load_i8_stride6_vf2:
83 ; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax
84 ; AVX2-NEXT: vmovdqa (%rdi), %xmm0
85 ; AVX2-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[0,6,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
86 ; AVX2-NEXT: vpshufb {{.*#+}} xmm2 = xmm0[1,7,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
87 ; AVX2-NEXT: vpshufb {{.*#+}} xmm3 = xmm0[2,8,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
88 ; AVX2-NEXT: vpshufb {{.*#+}} xmm4 = xmm0[3,9,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
89 ; AVX2-NEXT: vpshufb {{.*#+}} xmm5 = xmm0[4,10,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
90 ; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[5,11,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
91 ; AVX2-NEXT: vpextrw $0, %xmm1, (%rsi)
92 ; AVX2-NEXT: vpextrw $0, %xmm2, (%rdx)
93 ; AVX2-NEXT: vpextrw $0, %xmm3, (%rcx)
94 ; AVX2-NEXT: vpextrw $0, %xmm4, (%r8)
95 ; AVX2-NEXT: vpextrw $0, %xmm5, (%r9)
96 ; AVX2-NEXT: vpextrw $0, %xmm0, (%rax)
99 ; AVX2-FP-LABEL: load_i8_stride6_vf2:
101 ; AVX2-FP-NEXT: movq {{[0-9]+}}(%rsp), %rax
102 ; AVX2-FP-NEXT: vmovdqa (%rdi), %xmm0
103 ; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[0,6,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
104 ; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm2 = xmm0[1,7,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
105 ; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm3 = xmm0[2,8,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
106 ; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm4 = xmm0[3,9,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
107 ; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm5 = xmm0[4,10,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
108 ; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[5,11,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
109 ; AVX2-FP-NEXT: vpextrw $0, %xmm1, (%rsi)
110 ; AVX2-FP-NEXT: vpextrw $0, %xmm2, (%rdx)
111 ; AVX2-FP-NEXT: vpextrw $0, %xmm3, (%rcx)
112 ; AVX2-FP-NEXT: vpextrw $0, %xmm4, (%r8)
113 ; AVX2-FP-NEXT: vpextrw $0, %xmm5, (%r9)
114 ; AVX2-FP-NEXT: vpextrw $0, %xmm0, (%rax)
117 ; AVX2-FCP-LABEL: load_i8_stride6_vf2:
119 ; AVX2-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
120 ; AVX2-FCP-NEXT: vmovdqa (%rdi), %xmm0
121 ; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[0,6,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
122 ; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm0[1,7,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
123 ; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm0[2,8,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
124 ; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm4 = xmm0[3,9,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
125 ; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm5 = xmm0[4,10,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
126 ; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[5,11,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
127 ; AVX2-FCP-NEXT: vpextrw $0, %xmm1, (%rsi)
128 ; AVX2-FCP-NEXT: vpextrw $0, %xmm2, (%rdx)
129 ; AVX2-FCP-NEXT: vpextrw $0, %xmm3, (%rcx)
130 ; AVX2-FCP-NEXT: vpextrw $0, %xmm4, (%r8)
131 ; AVX2-FCP-NEXT: vpextrw $0, %xmm5, (%r9)
132 ; AVX2-FCP-NEXT: vpextrw $0, %xmm0, (%rax)
133 ; AVX2-FCP-NEXT: retq
135 ; AVX512-LABEL: load_i8_stride6_vf2:
137 ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax
138 ; AVX512-NEXT: vmovdqa (%rdi), %xmm0
139 ; AVX512-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[0,6,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
140 ; AVX512-NEXT: vpshufb {{.*#+}} xmm2 = xmm0[1,7,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
141 ; AVX512-NEXT: vpshufb {{.*#+}} xmm3 = xmm0[2,8,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
142 ; AVX512-NEXT: vpshufb {{.*#+}} xmm4 = xmm0[3,9,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
143 ; AVX512-NEXT: vpshufb {{.*#+}} xmm5 = xmm0[4,10,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
144 ; AVX512-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[5,11,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
145 ; AVX512-NEXT: vpextrw $0, %xmm1, (%rsi)
146 ; AVX512-NEXT: vpextrw $0, %xmm2, (%rdx)
147 ; AVX512-NEXT: vpextrw $0, %xmm3, (%rcx)
148 ; AVX512-NEXT: vpextrw $0, %xmm4, (%r8)
149 ; AVX512-NEXT: vpextrw $0, %xmm5, (%r9)
150 ; AVX512-NEXT: vpextrw $0, %xmm0, (%rax)
153 ; AVX512-FCP-LABEL: load_i8_stride6_vf2:
154 ; AVX512-FCP: # %bb.0:
155 ; AVX512-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
156 ; AVX512-FCP-NEXT: vmovdqa (%rdi), %xmm0
157 ; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[0,6,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
158 ; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm0[1,7,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
159 ; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm0[2,8,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
160 ; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm4 = xmm0[3,9,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
161 ; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm5 = xmm0[4,10,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
162 ; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[5,11,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
163 ; AVX512-FCP-NEXT: vpextrw $0, %xmm1, (%rsi)
164 ; AVX512-FCP-NEXT: vpextrw $0, %xmm2, (%rdx)
165 ; AVX512-FCP-NEXT: vpextrw $0, %xmm3, (%rcx)
166 ; AVX512-FCP-NEXT: vpextrw $0, %xmm4, (%r8)
167 ; AVX512-FCP-NEXT: vpextrw $0, %xmm5, (%r9)
168 ; AVX512-FCP-NEXT: vpextrw $0, %xmm0, (%rax)
169 ; AVX512-FCP-NEXT: retq
171 ; AVX512DQ-LABEL: load_i8_stride6_vf2:
173 ; AVX512DQ-NEXT: movq {{[0-9]+}}(%rsp), %rax
174 ; AVX512DQ-NEXT: vmovdqa (%rdi), %xmm0
175 ; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[0,6,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
176 ; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm2 = xmm0[1,7,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
177 ; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm3 = xmm0[2,8,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
178 ; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm4 = xmm0[3,9,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
179 ; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm5 = xmm0[4,10,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
180 ; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[5,11,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
181 ; AVX512DQ-NEXT: vpextrw $0, %xmm1, (%rsi)
182 ; AVX512DQ-NEXT: vpextrw $0, %xmm2, (%rdx)
183 ; AVX512DQ-NEXT: vpextrw $0, %xmm3, (%rcx)
184 ; AVX512DQ-NEXT: vpextrw $0, %xmm4, (%r8)
185 ; AVX512DQ-NEXT: vpextrw $0, %xmm5, (%r9)
186 ; AVX512DQ-NEXT: vpextrw $0, %xmm0, (%rax)
187 ; AVX512DQ-NEXT: retq
189 ; AVX512DQ-FCP-LABEL: load_i8_stride6_vf2:
190 ; AVX512DQ-FCP: # %bb.0:
191 ; AVX512DQ-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
192 ; AVX512DQ-FCP-NEXT: vmovdqa (%rdi), %xmm0
193 ; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[0,6,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
194 ; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm0[1,7,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
195 ; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm0[2,8,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
196 ; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm4 = xmm0[3,9,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
197 ; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm5 = xmm0[4,10,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
198 ; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[5,11,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
199 ; AVX512DQ-FCP-NEXT: vpextrw $0, %xmm1, (%rsi)
200 ; AVX512DQ-FCP-NEXT: vpextrw $0, %xmm2, (%rdx)
201 ; AVX512DQ-FCP-NEXT: vpextrw $0, %xmm3, (%rcx)
202 ; AVX512DQ-FCP-NEXT: vpextrw $0, %xmm4, (%r8)
203 ; AVX512DQ-FCP-NEXT: vpextrw $0, %xmm5, (%r9)
204 ; AVX512DQ-FCP-NEXT: vpextrw $0, %xmm0, (%rax)
205 ; AVX512DQ-FCP-NEXT: retq
207 ; AVX512BW-LABEL: load_i8_stride6_vf2:
209 ; AVX512BW-NEXT: movq {{[0-9]+}}(%rsp), %rax
210 ; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0
211 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[0,6,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
212 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm2 = xmm0[1,7,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
213 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm3 = xmm0[2,8,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
214 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm4 = xmm0[3,9,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
215 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm5 = xmm0[4,10,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
216 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[5,11,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
217 ; AVX512BW-NEXT: vpextrw $0, %xmm1, (%rsi)
218 ; AVX512BW-NEXT: vpextrw $0, %xmm2, (%rdx)
219 ; AVX512BW-NEXT: vpextrw $0, %xmm3, (%rcx)
220 ; AVX512BW-NEXT: vpextrw $0, %xmm4, (%r8)
221 ; AVX512BW-NEXT: vpextrw $0, %xmm5, (%r9)
222 ; AVX512BW-NEXT: vpextrw $0, %xmm0, (%rax)
223 ; AVX512BW-NEXT: retq
225 ; AVX512BW-FCP-LABEL: load_i8_stride6_vf2:
226 ; AVX512BW-FCP: # %bb.0:
227 ; AVX512BW-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
228 ; AVX512BW-FCP-NEXT: vmovdqa (%rdi), %xmm0
229 ; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[0,6,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
230 ; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm0[1,7,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
231 ; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm0[2,8,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
232 ; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm4 = xmm0[3,9,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
233 ; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm5 = xmm0[4,10,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
234 ; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[5,11,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
235 ; AVX512BW-FCP-NEXT: vpextrw $0, %xmm1, (%rsi)
236 ; AVX512BW-FCP-NEXT: vpextrw $0, %xmm2, (%rdx)
237 ; AVX512BW-FCP-NEXT: vpextrw $0, %xmm3, (%rcx)
238 ; AVX512BW-FCP-NEXT: vpextrw $0, %xmm4, (%r8)
239 ; AVX512BW-FCP-NEXT: vpextrw $0, %xmm5, (%r9)
240 ; AVX512BW-FCP-NEXT: vpextrw $0, %xmm0, (%rax)
241 ; AVX512BW-FCP-NEXT: retq
243 ; AVX512DQ-BW-LABEL: load_i8_stride6_vf2:
244 ; AVX512DQ-BW: # %bb.0:
245 ; AVX512DQ-BW-NEXT: movq {{[0-9]+}}(%rsp), %rax
246 ; AVX512DQ-BW-NEXT: vmovdqa (%rdi), %xmm0
247 ; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[0,6,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
248 ; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm2 = xmm0[1,7,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
249 ; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm3 = xmm0[2,8,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
250 ; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm4 = xmm0[3,9,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
251 ; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm5 = xmm0[4,10,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
252 ; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[5,11,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
253 ; AVX512DQ-BW-NEXT: vpextrw $0, %xmm1, (%rsi)
254 ; AVX512DQ-BW-NEXT: vpextrw $0, %xmm2, (%rdx)
255 ; AVX512DQ-BW-NEXT: vpextrw $0, %xmm3, (%rcx)
256 ; AVX512DQ-BW-NEXT: vpextrw $0, %xmm4, (%r8)
257 ; AVX512DQ-BW-NEXT: vpextrw $0, %xmm5, (%r9)
258 ; AVX512DQ-BW-NEXT: vpextrw $0, %xmm0, (%rax)
259 ; AVX512DQ-BW-NEXT: retq
261 ; AVX512DQ-BW-FCP-LABEL: load_i8_stride6_vf2:
262 ; AVX512DQ-BW-FCP: # %bb.0:
263 ; AVX512DQ-BW-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
264 ; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdi), %xmm0
265 ; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[0,6,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
266 ; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm0[1,7,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
267 ; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm0[2,8,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
268 ; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm4 = xmm0[3,9,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
269 ; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm5 = xmm0[4,10,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
270 ; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[5,11,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
271 ; AVX512DQ-BW-FCP-NEXT: vpextrw $0, %xmm1, (%rsi)
272 ; AVX512DQ-BW-FCP-NEXT: vpextrw $0, %xmm2, (%rdx)
273 ; AVX512DQ-BW-FCP-NEXT: vpextrw $0, %xmm3, (%rcx)
274 ; AVX512DQ-BW-FCP-NEXT: vpextrw $0, %xmm4, (%r8)
275 ; AVX512DQ-BW-FCP-NEXT: vpextrw $0, %xmm5, (%r9)
276 ; AVX512DQ-BW-FCP-NEXT: vpextrw $0, %xmm0, (%rax)
277 ; AVX512DQ-BW-FCP-NEXT: retq
278 %wide.vec = load <12 x i8>, ptr %in.vec, align 64
279 %strided.vec0 = shufflevector <12 x i8> %wide.vec, <12 x i8> poison, <2 x i32> <i32 0, i32 6>
280 %strided.vec1 = shufflevector <12 x i8> %wide.vec, <12 x i8> poison, <2 x i32> <i32 1, i32 7>
281 %strided.vec2 = shufflevector <12 x i8> %wide.vec, <12 x i8> poison, <2 x i32> <i32 2, i32 8>
282 %strided.vec3 = shufflevector <12 x i8> %wide.vec, <12 x i8> poison, <2 x i32> <i32 3, i32 9>
283 %strided.vec4 = shufflevector <12 x i8> %wide.vec, <12 x i8> poison, <2 x i32> <i32 4, i32 10>
284 %strided.vec5 = shufflevector <12 x i8> %wide.vec, <12 x i8> poison, <2 x i32> <i32 5, i32 11>
285 store <2 x i8> %strided.vec0, ptr %out.vec0, align 64
286 store <2 x i8> %strided.vec1, ptr %out.vec1, align 64
287 store <2 x i8> %strided.vec2, ptr %out.vec2, align 64
288 store <2 x i8> %strided.vec3, ptr %out.vec3, align 64
289 store <2 x i8> %strided.vec4, ptr %out.vec4, align 64
290 store <2 x i8> %strided.vec5, ptr %out.vec5, align 64
294 define void @load_i8_stride6_vf4(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2, ptr %out.vec3, ptr %out.vec4, ptr %out.vec5) nounwind {
295 ; SSE-LABEL: load_i8_stride6_vf4:
297 ; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax
298 ; SSE-NEXT: movdqa (%rdi), %xmm5
299 ; SSE-NEXT: movdqa 16(%rdi), %xmm1
300 ; SSE-NEXT: movdqa {{.*#+}} xmm0 = [65535,0,65535,65535,65535,65535,65535,65535]
301 ; SSE-NEXT: movdqa %xmm5, %xmm2
302 ; SSE-NEXT: pand %xmm0, %xmm2
303 ; SSE-NEXT: pandn %xmm1, %xmm0
304 ; SSE-NEXT: por %xmm2, %xmm0
305 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
306 ; SSE-NEXT: movdqa {{.*#+}} xmm2 = [16711935,16711935,16711935,16711935]
307 ; SSE-NEXT: pand %xmm2, %xmm0
308 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,6,7]
309 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
310 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,3,2,1,4,5,6,7]
311 ; SSE-NEXT: packuswb %xmm0, %xmm0
312 ; SSE-NEXT: pxor %xmm3, %xmm3
313 ; SSE-NEXT: movdqa %xmm5, %xmm7
314 ; SSE-NEXT: movdqa {{.*#+}} xmm4 = [255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255]
315 ; SSE-NEXT: pandn %xmm1, %xmm4
316 ; SSE-NEXT: movdqa %xmm1, %xmm6
317 ; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[1,0],xmm5[0,0]
318 ; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[2,0],xmm5[2,3]
319 ; SSE-NEXT: movdqa {{.*#+}} xmm8 = [65535,65535,0,65535,65535,0,65535,65535]
320 ; SSE-NEXT: pand %xmm8, %xmm1
321 ; SSE-NEXT: pandn %xmm5, %xmm8
322 ; SSE-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm3[8],xmm5[9],xmm3[9],xmm5[10],xmm3[10],xmm5[11],xmm3[11],xmm5[12],xmm3[12],xmm5[13],xmm3[13],xmm5[14],xmm3[14],xmm5[15],xmm3[15]
323 ; SSE-NEXT: movdqa %xmm5, %xmm9
324 ; SSE-NEXT: psrld $16, %xmm9
325 ; SSE-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm3[0],xmm7[1],xmm3[1],xmm7[2],xmm3[2],xmm7[3],xmm3[3],xmm7[4],xmm3[4],xmm7[5],xmm3[5],xmm7[6],xmm3[6],xmm7[7],xmm3[7]
326 ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm7[0,1,0,3]
327 ; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,5,7,6,7]
328 ; SSE-NEXT: punpckhdq {{.*#+}} xmm7 = xmm7[2],xmm9[2],xmm7[3],xmm9[3]
329 ; SSE-NEXT: packuswb %xmm7, %xmm7
330 ; SSE-NEXT: por %xmm7, %xmm4
331 ; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm6[2,1,2,3,4,5,6,7]
332 ; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,7,6,7]
333 ; SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm7
334 ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm7[0,2,2,3]
335 ; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm7[1,2,3,0,4,5,6,7]
336 ; SSE-NEXT: packuswb %xmm7, %xmm7
337 ; SSE-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm3[0],xmm6[1],xmm3[1],xmm6[2],xmm3[2],xmm6[3],xmm3[3],xmm6[4],xmm3[4],xmm6[5],xmm3[5],xmm6[6],xmm3[6],xmm6[7],xmm3[7]
338 ; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[1,2],xmm5[0,3]
339 ; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[2,0,1,3]
340 ; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[3,1,2,3,4,5,6,7]
341 ; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,7,5,6,7]
342 ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,2,2,3]
343 ; SSE-NEXT: packuswb %xmm6, %xmm6
344 ; SSE-NEXT: por %xmm1, %xmm8
345 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm8[3,1,2,0]
346 ; SSE-NEXT: pand %xmm2, %xmm1
347 ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,5,6,7]
348 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,1,2,3]
349 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[2,1,0,3,4,5,6,7]
350 ; SSE-NEXT: packuswb %xmm1, %xmm1
351 ; SSE-NEXT: punpcklbw {{.*#+}} xmm8 = xmm8[0],xmm3[0],xmm8[1],xmm3[1],xmm8[2],xmm3[2],xmm8[3],xmm3[3],xmm8[4],xmm3[4],xmm8[5],xmm3[5],xmm8[6],xmm3[6],xmm8[7],xmm3[7]
352 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[1,0],xmm8[0,0]
353 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[2,0],xmm8[2,3]
354 ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm5[3,1,2,3,4,5,6,7]
355 ; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,5,6,7]
356 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
357 ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[3,0,1,2,4,5,6,7]
358 ; SSE-NEXT: packuswb %xmm2, %xmm2
359 ; SSE-NEXT: movd %xmm0, (%rsi)
360 ; SSE-NEXT: movd %xmm4, (%rdx)
361 ; SSE-NEXT: movd %xmm7, (%rcx)
362 ; SSE-NEXT: movd %xmm6, (%r8)
363 ; SSE-NEXT: movd %xmm1, (%r9)
364 ; SSE-NEXT: movd %xmm2, (%rax)
367 ; AVX-LABEL: load_i8_stride6_vf4:
369 ; AVX-NEXT: movq {{[0-9]+}}(%rsp), %rax
370 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
371 ; AVX-NEXT: vmovdqa 16(%rdi), %xmm1
372 ; AVX-NEXT: vpshufb {{.*#+}} xmm2 = zero,zero,zero,xmm1[2,u,u,u,u,u,u,u,u,u,u,u,u]
373 ; AVX-NEXT: vpshufb {{.*#+}} xmm3 = xmm0[0,6,12],zero,xmm0[u,u,u,u,u,u,u,u,u,u,u,u]
374 ; AVX-NEXT: vpor %xmm2, %xmm3, %xmm2
375 ; AVX-NEXT: vpshufb {{.*#+}} xmm3 = zero,zero,zero,xmm1[3,u,u,u,u,u,u,u,u,u,u,u,u]
376 ; AVX-NEXT: vpshufb {{.*#+}} xmm4 = xmm0[1,7,13],zero,xmm0[u,u,u,u,u,u,u,u,u,u,u,u]
377 ; AVX-NEXT: vpor %xmm3, %xmm4, %xmm3
378 ; AVX-NEXT: vpshufb {{.*#+}} xmm4 = zero,zero,zero,xmm1[4,u,u,u,u,u,u,u,u,u,u,u,u]
379 ; AVX-NEXT: vpshufb {{.*#+}} xmm5 = xmm0[2,8,14],zero,xmm0[u,u,u,u,u,u,u,u,u,u,u,u]
380 ; AVX-NEXT: vpor %xmm4, %xmm5, %xmm4
381 ; AVX-NEXT: vpshufb {{.*#+}} xmm5 = zero,zero,zero,xmm1[5,u,u,u,u,u,u,u,u,u,u,u,u]
382 ; AVX-NEXT: vpshufb {{.*#+}} xmm6 = xmm0[3,9,15],zero,xmm0[u,u,u,u,u,u,u,u,u,u,u,u]
383 ; AVX-NEXT: vpor %xmm5, %xmm6, %xmm5
384 ; AVX-NEXT: vpshufb {{.*#+}} xmm6 = xmm1[0,6,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
385 ; AVX-NEXT: vpshufb {{.*#+}} xmm7 = xmm0[4,10,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
386 ; AVX-NEXT: vpunpcklwd {{.*#+}} xmm6 = xmm7[0],xmm6[0],xmm7[1],xmm6[1],xmm7[2],xmm6[2],xmm7[3],xmm6[3]
387 ; AVX-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[1,7,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
388 ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[5,11,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
389 ; AVX-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
390 ; AVX-NEXT: vmovd %xmm2, (%rsi)
391 ; AVX-NEXT: vmovd %xmm3, (%rdx)
392 ; AVX-NEXT: vmovd %xmm4, (%rcx)
393 ; AVX-NEXT: vmovd %xmm5, (%r8)
394 ; AVX-NEXT: vmovd %xmm6, (%r9)
395 ; AVX-NEXT: vmovd %xmm0, (%rax)
398 ; AVX2-LABEL: load_i8_stride6_vf4:
400 ; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax
401 ; AVX2-NEXT: vmovdqa (%rdi), %xmm0
402 ; AVX2-NEXT: vmovdqa 16(%rdi), %xmm1
403 ; AVX2-NEXT: vpshufb {{.*#+}} xmm2 = zero,zero,zero,xmm1[2,u,u,u,u,u,u,u,u,u,u,u,u]
404 ; AVX2-NEXT: vpshufb {{.*#+}} xmm3 = xmm0[0,6,12],zero,xmm0[u,u,u,u,u,u,u,u,u,u,u,u]
405 ; AVX2-NEXT: vpor %xmm2, %xmm3, %xmm2
406 ; AVX2-NEXT: vpshufb {{.*#+}} xmm3 = zero,zero,zero,xmm1[3,u,u,u,u,u,u,u,u,u,u,u,u]
407 ; AVX2-NEXT: vpshufb {{.*#+}} xmm4 = xmm0[1,7,13],zero,xmm0[u,u,u,u,u,u,u,u,u,u,u,u]
408 ; AVX2-NEXT: vpor %xmm3, %xmm4, %xmm3
409 ; AVX2-NEXT: vpshufb {{.*#+}} xmm4 = zero,zero,zero,xmm1[4,u,u,u,u,u,u,u,u,u,u,u,u]
410 ; AVX2-NEXT: vpshufb {{.*#+}} xmm5 = xmm0[2,8,14],zero,xmm0[u,u,u,u,u,u,u,u,u,u,u,u]
411 ; AVX2-NEXT: vpor %xmm4, %xmm5, %xmm4
412 ; AVX2-NEXT: vpshufb {{.*#+}} xmm5 = zero,zero,zero,xmm1[5,u,u,u,u,u,u,u,u,u,u,u,u]
413 ; AVX2-NEXT: vpshufb {{.*#+}} xmm6 = xmm0[3,9,15],zero,xmm0[u,u,u,u,u,u,u,u,u,u,u,u]
414 ; AVX2-NEXT: vpor %xmm5, %xmm6, %xmm5
415 ; AVX2-NEXT: vpshufb {{.*#+}} xmm6 = xmm1[0,6,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
416 ; AVX2-NEXT: vpshufb {{.*#+}} xmm7 = xmm0[4,10,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
417 ; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm6 = xmm7[0],xmm6[0],xmm7[1],xmm6[1],xmm7[2],xmm6[2],xmm7[3],xmm6[3]
418 ; AVX2-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[1,7,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
419 ; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[5,11,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
420 ; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
421 ; AVX2-NEXT: vmovd %xmm2, (%rsi)
422 ; AVX2-NEXT: vmovd %xmm3, (%rdx)
423 ; AVX2-NEXT: vmovd %xmm4, (%rcx)
424 ; AVX2-NEXT: vmovd %xmm5, (%r8)
425 ; AVX2-NEXT: vmovd %xmm6, (%r9)
426 ; AVX2-NEXT: vmovd %xmm0, (%rax)
429 ; AVX2-FP-LABEL: load_i8_stride6_vf4:
431 ; AVX2-FP-NEXT: movq {{[0-9]+}}(%rsp), %rax
432 ; AVX2-FP-NEXT: vmovdqa (%rdi), %xmm0
433 ; AVX2-FP-NEXT: vmovdqa 16(%rdi), %xmm1
434 ; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm2 = zero,zero,zero,xmm1[2,u,u,u,u,u,u,u,u,u,u,u,u]
435 ; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm3 = xmm0[0,6,12],zero,xmm0[u,u,u,u,u,u,u,u,u,u,u,u]
436 ; AVX2-FP-NEXT: vpor %xmm2, %xmm3, %xmm2
437 ; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm3 = zero,zero,zero,xmm1[3,u,u,u,u,u,u,u,u,u,u,u,u]
438 ; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm4 = xmm0[1,7,13],zero,xmm0[u,u,u,u,u,u,u,u,u,u,u,u]
439 ; AVX2-FP-NEXT: vpor %xmm3, %xmm4, %xmm3
440 ; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm4 = zero,zero,zero,xmm1[4,u,u,u,u,u,u,u,u,u,u,u,u]
441 ; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm5 = xmm0[2,8,14],zero,xmm0[u,u,u,u,u,u,u,u,u,u,u,u]
442 ; AVX2-FP-NEXT: vpor %xmm4, %xmm5, %xmm4
443 ; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm5 = zero,zero,zero,xmm1[5,u,u,u,u,u,u,u,u,u,u,u,u]
444 ; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm6 = xmm0[3,9,15],zero,xmm0[u,u,u,u,u,u,u,u,u,u,u,u]
445 ; AVX2-FP-NEXT: vpor %xmm5, %xmm6, %xmm5
446 ; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm6 = xmm1[0,6,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
447 ; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm7 = xmm0[4,10,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
448 ; AVX2-FP-NEXT: vpunpcklwd {{.*#+}} xmm6 = xmm7[0],xmm6[0],xmm7[1],xmm6[1],xmm7[2],xmm6[2],xmm7[3],xmm6[3]
449 ; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[1,7,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
450 ; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[5,11,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
451 ; AVX2-FP-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
452 ; AVX2-FP-NEXT: vmovd %xmm2, (%rsi)
453 ; AVX2-FP-NEXT: vmovd %xmm3, (%rdx)
454 ; AVX2-FP-NEXT: vmovd %xmm4, (%rcx)
455 ; AVX2-FP-NEXT: vmovd %xmm5, (%r8)
456 ; AVX2-FP-NEXT: vmovd %xmm6, (%r9)
457 ; AVX2-FP-NEXT: vmovd %xmm0, (%rax)
460 ; AVX2-FCP-LABEL: load_i8_stride6_vf4:
462 ; AVX2-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
463 ; AVX2-FCP-NEXT: vmovdqa (%rdi), %xmm0
464 ; AVX2-FCP-NEXT: vmovdqa 16(%rdi), %xmm1
465 ; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm2 = zero,zero,zero,xmm1[2,u,u,u,u,u,u,u,u,u,u,u,u]
466 ; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm0[0,6,12],zero,xmm0[u,u,u,u,u,u,u,u,u,u,u,u]
467 ; AVX2-FCP-NEXT: vpor %xmm2, %xmm3, %xmm2
468 ; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm3 = zero,zero,zero,xmm1[3,u,u,u,u,u,u,u,u,u,u,u,u]
469 ; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm4 = xmm0[1,7,13],zero,xmm0[u,u,u,u,u,u,u,u,u,u,u,u]
470 ; AVX2-FCP-NEXT: vpor %xmm3, %xmm4, %xmm3
471 ; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm4 = zero,zero,zero,xmm1[4,u,u,u,u,u,u,u,u,u,u,u,u]
472 ; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm5 = xmm0[2,8,14],zero,xmm0[u,u,u,u,u,u,u,u,u,u,u,u]
473 ; AVX2-FCP-NEXT: vpor %xmm4, %xmm5, %xmm4
474 ; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm5 = zero,zero,zero,xmm1[5,u,u,u,u,u,u,u,u,u,u,u,u]
475 ; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm6 = xmm0[3,9,15],zero,xmm0[u,u,u,u,u,u,u,u,u,u,u,u]
476 ; AVX2-FCP-NEXT: vpor %xmm5, %xmm6, %xmm5
477 ; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm6 = xmm1[0,6,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
478 ; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm0[4,10,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
479 ; AVX2-FCP-NEXT: vpunpcklwd {{.*#+}} xmm6 = xmm7[0],xmm6[0],xmm7[1],xmm6[1],xmm7[2],xmm6[2],xmm7[3],xmm6[3]
480 ; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[1,7,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
481 ; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[5,11,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
482 ; AVX2-FCP-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
483 ; AVX2-FCP-NEXT: vmovd %xmm2, (%rsi)
484 ; AVX2-FCP-NEXT: vmovd %xmm3, (%rdx)
485 ; AVX2-FCP-NEXT: vmovd %xmm4, (%rcx)
486 ; AVX2-FCP-NEXT: vmovd %xmm5, (%r8)
487 ; AVX2-FCP-NEXT: vmovd %xmm6, (%r9)
488 ; AVX2-FCP-NEXT: vmovd %xmm0, (%rax)
489 ; AVX2-FCP-NEXT: retq
491 ; AVX512-LABEL: load_i8_stride6_vf4:
493 ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax
494 ; AVX512-NEXT: vmovdqa (%rdi), %xmm0
495 ; AVX512-NEXT: vmovdqa 16(%rdi), %xmm1
496 ; AVX512-NEXT: vpshufb {{.*#+}} xmm2 = zero,zero,zero,xmm1[2,u,u,u,u,u,u,u,u,u,u,u,u]
497 ; AVX512-NEXT: vpshufb {{.*#+}} xmm3 = xmm0[0,6,12],zero,xmm0[u,u,u,u,u,u,u,u,u,u,u,u]
498 ; AVX512-NEXT: vpor %xmm2, %xmm3, %xmm2
499 ; AVX512-NEXT: vpshufb {{.*#+}} xmm3 = zero,zero,zero,xmm1[3,u,u,u,u,u,u,u,u,u,u,u,u]
500 ; AVX512-NEXT: vpshufb {{.*#+}} xmm4 = xmm0[1,7,13],zero,xmm0[u,u,u,u,u,u,u,u,u,u,u,u]
501 ; AVX512-NEXT: vpor %xmm3, %xmm4, %xmm3
502 ; AVX512-NEXT: vpshufb {{.*#+}} xmm4 = zero,zero,zero,xmm1[4,u,u,u,u,u,u,u,u,u,u,u,u]
503 ; AVX512-NEXT: vpshufb {{.*#+}} xmm5 = xmm0[2,8,14],zero,xmm0[u,u,u,u,u,u,u,u,u,u,u,u]
504 ; AVX512-NEXT: vpor %xmm4, %xmm5, %xmm4
505 ; AVX512-NEXT: vpshufb {{.*#+}} xmm5 = zero,zero,zero,xmm1[5,u,u,u,u,u,u,u,u,u,u,u,u]
506 ; AVX512-NEXT: vpshufb {{.*#+}} xmm6 = xmm0[3,9,15],zero,xmm0[u,u,u,u,u,u,u,u,u,u,u,u]
507 ; AVX512-NEXT: vpor %xmm5, %xmm6, %xmm5
508 ; AVX512-NEXT: vpshufb {{.*#+}} xmm6 = xmm1[0,6,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
509 ; AVX512-NEXT: vpshufb {{.*#+}} xmm7 = xmm0[4,10,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
510 ; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm6 = xmm7[0],xmm6[0],xmm7[1],xmm6[1],xmm7[2],xmm6[2],xmm7[3],xmm6[3]
511 ; AVX512-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[1,7,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
512 ; AVX512-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[5,11,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
513 ; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
514 ; AVX512-NEXT: vmovd %xmm2, (%rsi)
515 ; AVX512-NEXT: vmovd %xmm3, (%rdx)
516 ; AVX512-NEXT: vmovd %xmm4, (%rcx)
517 ; AVX512-NEXT: vmovd %xmm5, (%r8)
518 ; AVX512-NEXT: vmovd %xmm6, (%r9)
519 ; AVX512-NEXT: vmovd %xmm0, (%rax)
522 ; AVX512-FCP-LABEL: load_i8_stride6_vf4:
523 ; AVX512-FCP: # %bb.0:
524 ; AVX512-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
525 ; AVX512-FCP-NEXT: vmovdqa (%rdi), %xmm0
526 ; AVX512-FCP-NEXT: vmovdqa 16(%rdi), %xmm1
527 ; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm2 = zero,zero,zero,xmm1[2,u,u,u,u,u,u,u,u,u,u,u,u]
528 ; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm0[0,6,12],zero,xmm0[u,u,u,u,u,u,u,u,u,u,u,u]
529 ; AVX512-FCP-NEXT: vpor %xmm2, %xmm3, %xmm2
530 ; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm3 = zero,zero,zero,xmm1[3,u,u,u,u,u,u,u,u,u,u,u,u]
531 ; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm4 = xmm0[1,7,13],zero,xmm0[u,u,u,u,u,u,u,u,u,u,u,u]
532 ; AVX512-FCP-NEXT: vpor %xmm3, %xmm4, %xmm3
533 ; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm4 = zero,zero,zero,xmm1[4,u,u,u,u,u,u,u,u,u,u,u,u]
534 ; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm5 = xmm0[2,8,14],zero,xmm0[u,u,u,u,u,u,u,u,u,u,u,u]
535 ; AVX512-FCP-NEXT: vpor %xmm4, %xmm5, %xmm4
536 ; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm5 = zero,zero,zero,xmm1[5,u,u,u,u,u,u,u,u,u,u,u,u]
537 ; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm6 = xmm0[3,9,15],zero,xmm0[u,u,u,u,u,u,u,u,u,u,u,u]
538 ; AVX512-FCP-NEXT: vpor %xmm5, %xmm6, %xmm5
539 ; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm6 = xmm1[0,6,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
540 ; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm0[4,10,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
541 ; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} xmm6 = xmm7[0],xmm6[0],xmm7[1],xmm6[1],xmm7[2],xmm6[2],xmm7[3],xmm6[3]
542 ; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[1,7,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
543 ; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[5,11,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
544 ; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
545 ; AVX512-FCP-NEXT: vmovd %xmm2, (%rsi)
546 ; AVX512-FCP-NEXT: vmovd %xmm3, (%rdx)
547 ; AVX512-FCP-NEXT: vmovd %xmm4, (%rcx)
548 ; AVX512-FCP-NEXT: vmovd %xmm5, (%r8)
549 ; AVX512-FCP-NEXT: vmovd %xmm6, (%r9)
550 ; AVX512-FCP-NEXT: vmovd %xmm0, (%rax)
551 ; AVX512-FCP-NEXT: retq
553 ; AVX512DQ-LABEL: load_i8_stride6_vf4:
555 ; AVX512DQ-NEXT: movq {{[0-9]+}}(%rsp), %rax
556 ; AVX512DQ-NEXT: vmovdqa (%rdi), %xmm0
557 ; AVX512DQ-NEXT: vmovdqa 16(%rdi), %xmm1
558 ; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm2 = zero,zero,zero,xmm1[2,u,u,u,u,u,u,u,u,u,u,u,u]
559 ; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm3 = xmm0[0,6,12],zero,xmm0[u,u,u,u,u,u,u,u,u,u,u,u]
560 ; AVX512DQ-NEXT: vpor %xmm2, %xmm3, %xmm2
561 ; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm3 = zero,zero,zero,xmm1[3,u,u,u,u,u,u,u,u,u,u,u,u]
562 ; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm4 = xmm0[1,7,13],zero,xmm0[u,u,u,u,u,u,u,u,u,u,u,u]
563 ; AVX512DQ-NEXT: vpor %xmm3, %xmm4, %xmm3
564 ; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm4 = zero,zero,zero,xmm1[4,u,u,u,u,u,u,u,u,u,u,u,u]
565 ; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm5 = xmm0[2,8,14],zero,xmm0[u,u,u,u,u,u,u,u,u,u,u,u]
566 ; AVX512DQ-NEXT: vpor %xmm4, %xmm5, %xmm4
567 ; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm5 = zero,zero,zero,xmm1[5,u,u,u,u,u,u,u,u,u,u,u,u]
568 ; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm6 = xmm0[3,9,15],zero,xmm0[u,u,u,u,u,u,u,u,u,u,u,u]
569 ; AVX512DQ-NEXT: vpor %xmm5, %xmm6, %xmm5
570 ; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm6 = xmm1[0,6,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
571 ; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm7 = xmm0[4,10,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
572 ; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} xmm6 = xmm7[0],xmm6[0],xmm7[1],xmm6[1],xmm7[2],xmm6[2],xmm7[3],xmm6[3]
573 ; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[1,7,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
574 ; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[5,11,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
575 ; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
576 ; AVX512DQ-NEXT: vmovd %xmm2, (%rsi)
577 ; AVX512DQ-NEXT: vmovd %xmm3, (%rdx)
578 ; AVX512DQ-NEXT: vmovd %xmm4, (%rcx)
579 ; AVX512DQ-NEXT: vmovd %xmm5, (%r8)
580 ; AVX512DQ-NEXT: vmovd %xmm6, (%r9)
581 ; AVX512DQ-NEXT: vmovd %xmm0, (%rax)
582 ; AVX512DQ-NEXT: retq
584 ; AVX512DQ-FCP-LABEL: load_i8_stride6_vf4:
585 ; AVX512DQ-FCP: # %bb.0:
586 ; AVX512DQ-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
587 ; AVX512DQ-FCP-NEXT: vmovdqa (%rdi), %xmm0
588 ; AVX512DQ-FCP-NEXT: vmovdqa 16(%rdi), %xmm1
589 ; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm2 = zero,zero,zero,xmm1[2,u,u,u,u,u,u,u,u,u,u,u,u]
590 ; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm0[0,6,12],zero,xmm0[u,u,u,u,u,u,u,u,u,u,u,u]
591 ; AVX512DQ-FCP-NEXT: vpor %xmm2, %xmm3, %xmm2
592 ; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm3 = zero,zero,zero,xmm1[3,u,u,u,u,u,u,u,u,u,u,u,u]
593 ; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm4 = xmm0[1,7,13],zero,xmm0[u,u,u,u,u,u,u,u,u,u,u,u]
594 ; AVX512DQ-FCP-NEXT: vpor %xmm3, %xmm4, %xmm3
595 ; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm4 = zero,zero,zero,xmm1[4,u,u,u,u,u,u,u,u,u,u,u,u]
596 ; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm5 = xmm0[2,8,14],zero,xmm0[u,u,u,u,u,u,u,u,u,u,u,u]
597 ; AVX512DQ-FCP-NEXT: vpor %xmm4, %xmm5, %xmm4
598 ; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm5 = zero,zero,zero,xmm1[5,u,u,u,u,u,u,u,u,u,u,u,u]
599 ; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm6 = xmm0[3,9,15],zero,xmm0[u,u,u,u,u,u,u,u,u,u,u,u]
600 ; AVX512DQ-FCP-NEXT: vpor %xmm5, %xmm6, %xmm5
601 ; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm6 = xmm1[0,6,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
602 ; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm0[4,10,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
603 ; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} xmm6 = xmm7[0],xmm6[0],xmm7[1],xmm6[1],xmm7[2],xmm6[2],xmm7[3],xmm6[3]
604 ; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[1,7,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
605 ; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[5,11,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
606 ; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
607 ; AVX512DQ-FCP-NEXT: vmovd %xmm2, (%rsi)
608 ; AVX512DQ-FCP-NEXT: vmovd %xmm3, (%rdx)
609 ; AVX512DQ-FCP-NEXT: vmovd %xmm4, (%rcx)
610 ; AVX512DQ-FCP-NEXT: vmovd %xmm5, (%r8)
611 ; AVX512DQ-FCP-NEXT: vmovd %xmm6, (%r9)
612 ; AVX512DQ-FCP-NEXT: vmovd %xmm0, (%rax)
613 ; AVX512DQ-FCP-NEXT: retq
615 ; AVX512BW-LABEL: load_i8_stride6_vf4:
617 ; AVX512BW-NEXT: movq {{[0-9]+}}(%rsp), %rax
618 ; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0
619 ; AVX512BW-NEXT: vmovdqa 16(%rdi), %xmm1
620 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm2 = zero,zero,zero,xmm1[2,u,u,u,u,u,u,u,u,u,u,u,u]
621 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm3 = xmm0[0,6,12],zero,xmm0[u,u,u,u,u,u,u,u,u,u,u,u]
622 ; AVX512BW-NEXT: vpor %xmm2, %xmm3, %xmm2
623 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm3 = zero,zero,zero,xmm1[3,u,u,u,u,u,u,u,u,u,u,u,u]
624 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm4 = xmm0[1,7,13],zero,xmm0[u,u,u,u,u,u,u,u,u,u,u,u]
625 ; AVX512BW-NEXT: vpor %xmm3, %xmm4, %xmm3
626 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm4 = zero,zero,zero,xmm1[4,u,u,u,u,u,u,u,u,u,u,u,u]
627 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm5 = xmm0[2,8,14],zero,xmm0[u,u,u,u,u,u,u,u,u,u,u,u]
628 ; AVX512BW-NEXT: vpor %xmm4, %xmm5, %xmm4
629 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm5 = zero,zero,zero,xmm1[5,u,u,u,u,u,u,u,u,u,u,u,u]
630 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm6 = xmm0[3,9,15],zero,xmm0[u,u,u,u,u,u,u,u,u,u,u,u]
631 ; AVX512BW-NEXT: vpor %xmm5, %xmm6, %xmm5
632 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm6 = xmm1[0,6,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
633 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm7 = xmm0[4,10,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
634 ; AVX512BW-NEXT: vpunpcklwd {{.*#+}} xmm6 = xmm7[0],xmm6[0],xmm7[1],xmm6[1],xmm7[2],xmm6[2],xmm7[3],xmm6[3]
635 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[1,7,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
636 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[5,11,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
637 ; AVX512BW-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
638 ; AVX512BW-NEXT: vmovd %xmm2, (%rsi)
639 ; AVX512BW-NEXT: vmovd %xmm3, (%rdx)
640 ; AVX512BW-NEXT: vmovd %xmm4, (%rcx)
641 ; AVX512BW-NEXT: vmovd %xmm5, (%r8)
642 ; AVX512BW-NEXT: vmovd %xmm6, (%r9)
643 ; AVX512BW-NEXT: vmovd %xmm0, (%rax)
644 ; AVX512BW-NEXT: retq
646 ; AVX512BW-FCP-LABEL: load_i8_stride6_vf4:
647 ; AVX512BW-FCP: # %bb.0:
648 ; AVX512BW-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
649 ; AVX512BW-FCP-NEXT: vmovdqa (%rdi), %xmm0
650 ; AVX512BW-FCP-NEXT: vmovdqa 16(%rdi), %xmm1
651 ; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm2 = zero,zero,zero,xmm1[2,u,u,u,u,u,u,u,u,u,u,u,u]
652 ; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm0[0,6,12],zero,xmm0[u,u,u,u,u,u,u,u,u,u,u,u]
653 ; AVX512BW-FCP-NEXT: vpor %xmm2, %xmm3, %xmm2
654 ; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm3 = zero,zero,zero,xmm1[3,u,u,u,u,u,u,u,u,u,u,u,u]
655 ; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm4 = xmm0[1,7,13],zero,xmm0[u,u,u,u,u,u,u,u,u,u,u,u]
656 ; AVX512BW-FCP-NEXT: vpor %xmm3, %xmm4, %xmm3
657 ; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm4 = zero,zero,zero,xmm1[4,u,u,u,u,u,u,u,u,u,u,u,u]
658 ; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm5 = xmm0[2,8,14],zero,xmm0[u,u,u,u,u,u,u,u,u,u,u,u]
659 ; AVX512BW-FCP-NEXT: vpor %xmm4, %xmm5, %xmm4
660 ; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm5 = zero,zero,zero,xmm1[5,u,u,u,u,u,u,u,u,u,u,u,u]
661 ; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm6 = xmm0[3,9,15],zero,xmm0[u,u,u,u,u,u,u,u,u,u,u,u]
662 ; AVX512BW-FCP-NEXT: vpor %xmm5, %xmm6, %xmm5
663 ; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm6 = xmm1[0,6,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
664 ; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm0[4,10,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
665 ; AVX512BW-FCP-NEXT: vpunpcklwd {{.*#+}} xmm6 = xmm7[0],xmm6[0],xmm7[1],xmm6[1],xmm7[2],xmm6[2],xmm7[3],xmm6[3]
666 ; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[1,7,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
667 ; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[5,11,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
668 ; AVX512BW-FCP-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
669 ; AVX512BW-FCP-NEXT: vmovd %xmm2, (%rsi)
670 ; AVX512BW-FCP-NEXT: vmovd %xmm3, (%rdx)
671 ; AVX512BW-FCP-NEXT: vmovd %xmm4, (%rcx)
672 ; AVX512BW-FCP-NEXT: vmovd %xmm5, (%r8)
673 ; AVX512BW-FCP-NEXT: vmovd %xmm6, (%r9)
674 ; AVX512BW-FCP-NEXT: vmovd %xmm0, (%rax)
675 ; AVX512BW-FCP-NEXT: retq
677 ; AVX512DQ-BW-LABEL: load_i8_stride6_vf4:
678 ; AVX512DQ-BW: # %bb.0:
679 ; AVX512DQ-BW-NEXT: movq {{[0-9]+}}(%rsp), %rax
680 ; AVX512DQ-BW-NEXT: vmovdqa (%rdi), %xmm0
681 ; AVX512DQ-BW-NEXT: vmovdqa 16(%rdi), %xmm1
682 ; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm2 = zero,zero,zero,xmm1[2,u,u,u,u,u,u,u,u,u,u,u,u]
683 ; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm3 = xmm0[0,6,12],zero,xmm0[u,u,u,u,u,u,u,u,u,u,u,u]
684 ; AVX512DQ-BW-NEXT: vpor %xmm2, %xmm3, %xmm2
685 ; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm3 = zero,zero,zero,xmm1[3,u,u,u,u,u,u,u,u,u,u,u,u]
686 ; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm4 = xmm0[1,7,13],zero,xmm0[u,u,u,u,u,u,u,u,u,u,u,u]
687 ; AVX512DQ-BW-NEXT: vpor %xmm3, %xmm4, %xmm3
688 ; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm4 = zero,zero,zero,xmm1[4,u,u,u,u,u,u,u,u,u,u,u,u]
689 ; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm5 = xmm0[2,8,14],zero,xmm0[u,u,u,u,u,u,u,u,u,u,u,u]
690 ; AVX512DQ-BW-NEXT: vpor %xmm4, %xmm5, %xmm4
691 ; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm5 = zero,zero,zero,xmm1[5,u,u,u,u,u,u,u,u,u,u,u,u]
692 ; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm6 = xmm0[3,9,15],zero,xmm0[u,u,u,u,u,u,u,u,u,u,u,u]
693 ; AVX512DQ-BW-NEXT: vpor %xmm5, %xmm6, %xmm5
694 ; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm6 = xmm1[0,6,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
695 ; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm7 = xmm0[4,10,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
696 ; AVX512DQ-BW-NEXT: vpunpcklwd {{.*#+}} xmm6 = xmm7[0],xmm6[0],xmm7[1],xmm6[1],xmm7[2],xmm6[2],xmm7[3],xmm6[3]
697 ; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[1,7,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
698 ; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[5,11,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
699 ; AVX512DQ-BW-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
700 ; AVX512DQ-BW-NEXT: vmovd %xmm2, (%rsi)
701 ; AVX512DQ-BW-NEXT: vmovd %xmm3, (%rdx)
702 ; AVX512DQ-BW-NEXT: vmovd %xmm4, (%rcx)
703 ; AVX512DQ-BW-NEXT: vmovd %xmm5, (%r8)
704 ; AVX512DQ-BW-NEXT: vmovd %xmm6, (%r9)
705 ; AVX512DQ-BW-NEXT: vmovd %xmm0, (%rax)
706 ; AVX512DQ-BW-NEXT: retq
708 ; AVX512DQ-BW-FCP-LABEL: load_i8_stride6_vf4:
709 ; AVX512DQ-BW-FCP: # %bb.0:
710 ; AVX512DQ-BW-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
711 ; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdi), %xmm0
712 ; AVX512DQ-BW-FCP-NEXT: vmovdqa 16(%rdi), %xmm1
713 ; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm2 = zero,zero,zero,xmm1[2,u,u,u,u,u,u,u,u,u,u,u,u]
714 ; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm0[0,6,12],zero,xmm0[u,u,u,u,u,u,u,u,u,u,u,u]
715 ; AVX512DQ-BW-FCP-NEXT: vpor %xmm2, %xmm3, %xmm2
716 ; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm3 = zero,zero,zero,xmm1[3,u,u,u,u,u,u,u,u,u,u,u,u]
717 ; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm4 = xmm0[1,7,13],zero,xmm0[u,u,u,u,u,u,u,u,u,u,u,u]
718 ; AVX512DQ-BW-FCP-NEXT: vpor %xmm3, %xmm4, %xmm3
719 ; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm4 = zero,zero,zero,xmm1[4,u,u,u,u,u,u,u,u,u,u,u,u]
720 ; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm5 = xmm0[2,8,14],zero,xmm0[u,u,u,u,u,u,u,u,u,u,u,u]
721 ; AVX512DQ-BW-FCP-NEXT: vpor %xmm4, %xmm5, %xmm4
722 ; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm5 = zero,zero,zero,xmm1[5,u,u,u,u,u,u,u,u,u,u,u,u]
723 ; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm6 = xmm0[3,9,15],zero,xmm0[u,u,u,u,u,u,u,u,u,u,u,u]
724 ; AVX512DQ-BW-FCP-NEXT: vpor %xmm5, %xmm6, %xmm5
725 ; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm6 = xmm1[0,6,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
726 ; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm0[4,10,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
727 ; AVX512DQ-BW-FCP-NEXT: vpunpcklwd {{.*#+}} xmm6 = xmm7[0],xmm6[0],xmm7[1],xmm6[1],xmm7[2],xmm6[2],xmm7[3],xmm6[3]
728 ; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[1,7,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
729 ; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[5,11,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
730 ; AVX512DQ-BW-FCP-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
731 ; AVX512DQ-BW-FCP-NEXT: vmovd %xmm2, (%rsi)
732 ; AVX512DQ-BW-FCP-NEXT: vmovd %xmm3, (%rdx)
733 ; AVX512DQ-BW-FCP-NEXT: vmovd %xmm4, (%rcx)
734 ; AVX512DQ-BW-FCP-NEXT: vmovd %xmm5, (%r8)
735 ; AVX512DQ-BW-FCP-NEXT: vmovd %xmm6, (%r9)
736 ; AVX512DQ-BW-FCP-NEXT: vmovd %xmm0, (%rax)
737 ; AVX512DQ-BW-FCP-NEXT: retq
738 %wide.vec = load <24 x i8>, ptr %in.vec, align 64
739 %strided.vec0 = shufflevector <24 x i8> %wide.vec, <24 x i8> poison, <4 x i32> <i32 0, i32 6, i32 12, i32 18>
740 %strided.vec1 = shufflevector <24 x i8> %wide.vec, <24 x i8> poison, <4 x i32> <i32 1, i32 7, i32 13, i32 19>
741 %strided.vec2 = shufflevector <24 x i8> %wide.vec, <24 x i8> poison, <4 x i32> <i32 2, i32 8, i32 14, i32 20>
742 %strided.vec3 = shufflevector <24 x i8> %wide.vec, <24 x i8> poison, <4 x i32> <i32 3, i32 9, i32 15, i32 21>
743 %strided.vec4 = shufflevector <24 x i8> %wide.vec, <24 x i8> poison, <4 x i32> <i32 4, i32 10, i32 16, i32 22>
744 %strided.vec5 = shufflevector <24 x i8> %wide.vec, <24 x i8> poison, <4 x i32> <i32 5, i32 11, i32 17, i32 23>
745 store <4 x i8> %strided.vec0, ptr %out.vec0, align 64
746 store <4 x i8> %strided.vec1, ptr %out.vec1, align 64
747 store <4 x i8> %strided.vec2, ptr %out.vec2, align 64
748 store <4 x i8> %strided.vec3, ptr %out.vec3, align 64
749 store <4 x i8> %strided.vec4, ptr %out.vec4, align 64
750 store <4 x i8> %strided.vec5, ptr %out.vec5, align 64
754 define void @load_i8_stride6_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2, ptr %out.vec3, ptr %out.vec4, ptr %out.vec5) nounwind {
755 ; SSE-LABEL: load_i8_stride6_vf8:
757 ; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax
758 ; SSE-NEXT: movdqa (%rdi), %xmm4
759 ; SSE-NEXT: movdqa 16(%rdi), %xmm3
760 ; SSE-NEXT: movdqa 32(%rdi), %xmm0
761 ; SSE-NEXT: movdqa {{.*#+}} xmm8 = [65535,0,65535,65535,0,65535,65535,0]
762 ; SSE-NEXT: movdqa %xmm4, %xmm1
763 ; SSE-NEXT: pand %xmm8, %xmm1
764 ; SSE-NEXT: pandn %xmm3, %xmm8
765 ; SSE-NEXT: por %xmm1, %xmm8
766 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm8[0,2,1,3]
767 ; SSE-NEXT: movdqa {{.*#+}} xmm5 = [16711935,16711935,16711935,16711935]
768 ; SSE-NEXT: pand %xmm5, %xmm1
769 ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,5,6,7]
770 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,1,3]
771 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,3,2,1,4,5,6,7]
772 ; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm1[0,1,2,3,4,7,6,7]
773 ; SSE-NEXT: packuswb %xmm6, %xmm6
774 ; SSE-NEXT: movdqa {{.*#+}} xmm1 = [65535,65535,65535,0,65535,65535,65535,65535]
775 ; SSE-NEXT: pand %xmm1, %xmm6
776 ; SSE-NEXT: movdqa %xmm0, %xmm7
777 ; SSE-NEXT: pand %xmm5, %xmm7
778 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm7[0,1,2,1]
779 ; SSE-NEXT: pshufhw {{.*#+}} xmm9 = xmm2[0,1,2,3,4,5,6,5]
780 ; SSE-NEXT: packuswb %xmm9, %xmm9
781 ; SSE-NEXT: movdqa %xmm1, %xmm2
782 ; SSE-NEXT: pandn %xmm9, %xmm2
783 ; SSE-NEXT: por %xmm6, %xmm2
784 ; SSE-NEXT: pxor %xmm6, %xmm6
785 ; SSE-NEXT: movdqa %xmm8, %xmm9
786 ; SSE-NEXT: punpckhbw {{.*#+}} xmm9 = xmm9[8],xmm6[8],xmm9[9],xmm6[9],xmm9[10],xmm6[10],xmm9[11],xmm6[11],xmm9[12],xmm6[12],xmm9[13],xmm6[13],xmm9[14],xmm6[14],xmm9[15],xmm6[15]
787 ; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm9[2,1,0,3]
788 ; SSE-NEXT: pshuflw {{.*#+}} xmm9 = xmm9[1,1,1,1,4,5,6,7]
789 ; SSE-NEXT: pshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,5,7,6,7]
790 ; SSE-NEXT: movdqa {{.*#+}} xmm10 = [65535,65535,0,65535,0,0,65535,65535]
791 ; SSE-NEXT: punpcklbw {{.*#+}} xmm8 = xmm8[0],xmm6[0],xmm8[1],xmm6[1],xmm8[2],xmm6[2],xmm8[3],xmm6[3],xmm8[4],xmm6[4],xmm8[5],xmm6[5],xmm8[6],xmm6[6],xmm8[7],xmm6[7]
792 ; SSE-NEXT: pshuflw {{.*#+}} xmm8 = xmm8[3,1,2,3,4,5,6,7]
793 ; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm8[0,3,2,3]
794 ; SSE-NEXT: pshuflw {{.*#+}} xmm8 = xmm8[1,3,2,0,4,5,6,7]
795 ; SSE-NEXT: pand %xmm10, %xmm8
796 ; SSE-NEXT: pandn %xmm9, %xmm10
797 ; SSE-NEXT: por %xmm8, %xmm10
798 ; SSE-NEXT: packuswb %xmm10, %xmm10
799 ; SSE-NEXT: pand %xmm1, %xmm10
800 ; SSE-NEXT: movdqa %xmm0, %xmm8
801 ; SSE-NEXT: punpckhbw {{.*#+}} xmm8 = xmm8[8],xmm6[8],xmm8[9],xmm6[9],xmm8[10],xmm6[10],xmm8[11],xmm6[11],xmm8[12],xmm6[12],xmm8[13],xmm6[13],xmm8[14],xmm6[14],xmm8[15],xmm6[15]
802 ; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1],xmm0[2],xmm6[2],xmm0[3],xmm6[3],xmm0[4],xmm6[4],xmm0[5],xmm6[5],xmm0[6],xmm6[6],xmm0[7],xmm6[7]
803 ; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm0[2,2,3,3]
804 ; SSE-NEXT: punpcklwd {{.*#+}} xmm9 = xmm9[0],xmm8[0],xmm9[1],xmm8[1],xmm9[2],xmm8[2],xmm9[3],xmm8[3]
805 ; SSE-NEXT: packuswb %xmm9, %xmm9
806 ; SSE-NEXT: pandn %xmm9, %xmm1
807 ; SSE-NEXT: por %xmm10, %xmm1
808 ; SSE-NEXT: movdqa {{.*#+}} xmm11 = [65535,65535,0,65535,65535,0,65535,65535]
809 ; SSE-NEXT: movdqa %xmm11, %xmm9
810 ; SSE-NEXT: pandn %xmm3, %xmm9
811 ; SSE-NEXT: movdqa %xmm4, %xmm12
812 ; SSE-NEXT: pand %xmm11, %xmm12
813 ; SSE-NEXT: por %xmm9, %xmm12
814 ; SSE-NEXT: pshuflw {{.*#+}} xmm9 = xmm12[2,1,2,3,4,5,6,7]
815 ; SSE-NEXT: pshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,4,5,4,7]
816 ; SSE-NEXT: pand %xmm5, %xmm9
817 ; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm9[0,3,2,3]
818 ; SSE-NEXT: pshuflw {{.*#+}} xmm9 = xmm9[1,2,3,0,4,5,6,7]
819 ; SSE-NEXT: pshufhw {{.*#+}} xmm13 = xmm9[0,1,2,3,5,5,5,5]
820 ; SSE-NEXT: packuswb %xmm13, %xmm13
821 ; SSE-NEXT: movdqa {{.*#+}} xmm9 = [255,255,255,255,255,0,0,0,255,255,255,255,255,255,255,255]
822 ; SSE-NEXT: pand %xmm9, %xmm13
823 ; SSE-NEXT: pshuflw {{.*#+}} xmm10 = xmm7[0,3,2,3,4,5,6,7]
824 ; SSE-NEXT: pshufd {{.*#+}} xmm10 = xmm10[0,1,0,3]
825 ; SSE-NEXT: pshufhw {{.*#+}} xmm14 = xmm10[0,1,2,3,4,4,5,6]
826 ; SSE-NEXT: packuswb %xmm14, %xmm14
827 ; SSE-NEXT: movdqa %xmm9, %xmm10
828 ; SSE-NEXT: pandn %xmm14, %xmm10
829 ; SSE-NEXT: por %xmm13, %xmm10
830 ; SSE-NEXT: movdqa %xmm12, %xmm13
831 ; SSE-NEXT: punpcklbw {{.*#+}} xmm13 = xmm13[0],xmm6[0],xmm13[1],xmm6[1],xmm13[2],xmm6[2],xmm13[3],xmm6[3],xmm13[4],xmm6[4],xmm13[5],xmm6[5],xmm13[6],xmm6[6],xmm13[7],xmm6[7]
832 ; SSE-NEXT: pshufd {{.*#+}} xmm13 = xmm13[2,1,2,3]
833 ; SSE-NEXT: pshuflw {{.*#+}} xmm13 = xmm13[3,1,2,1,4,5,6,7]
834 ; SSE-NEXT: movdqa {{.*#+}} xmm14 = [0,65535,65535,0,65535,65535,65535,65535]
835 ; SSE-NEXT: punpckhbw {{.*#+}} xmm12 = xmm12[8],xmm6[8],xmm12[9],xmm6[9],xmm12[10],xmm6[10],xmm12[11],xmm6[11],xmm12[12],xmm6[12],xmm12[13],xmm6[13],xmm12[14],xmm6[14],xmm12[15],xmm6[15]
836 ; SSE-NEXT: pshufd {{.*#+}} xmm12 = xmm12[0,3,2,1]
837 ; SSE-NEXT: pshuflw {{.*#+}} xmm12 = xmm12[0,1,3,3,4,5,6,7]
838 ; SSE-NEXT: pshufhw {{.*#+}} xmm12 = xmm12[0,1,2,3,7,7,7,7]
839 ; SSE-NEXT: pand %xmm14, %xmm12
840 ; SSE-NEXT: pandn %xmm13, %xmm14
841 ; SSE-NEXT: por %xmm12, %xmm14
842 ; SSE-NEXT: packuswb %xmm14, %xmm14
843 ; SSE-NEXT: pand %xmm9, %xmm14
844 ; SSE-NEXT: movdqa %xmm8, %xmm12
845 ; SSE-NEXT: shufps {{.*#+}} xmm12 = xmm12[2,0],xmm0[3,0]
846 ; SSE-NEXT: movaps %xmm0, %xmm13
847 ; SSE-NEXT: shufps {{.*#+}} xmm13 = xmm13[0,1],xmm12[0,2]
848 ; SSE-NEXT: pshufhw {{.*#+}} xmm12 = xmm13[0,1,2,3,7,5,6,7]
849 ; SSE-NEXT: pshufd {{.*#+}} xmm13 = xmm12[0,1,0,2]
850 ; SSE-NEXT: packuswb %xmm13, %xmm13
851 ; SSE-NEXT: movdqa %xmm9, %xmm12
852 ; SSE-NEXT: pandn %xmm13, %xmm12
853 ; SSE-NEXT: por %xmm14, %xmm12
854 ; SSE-NEXT: pand %xmm11, %xmm3
855 ; SSE-NEXT: pandn %xmm4, %xmm11
856 ; SSE-NEXT: por %xmm3, %xmm11
857 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm11[3,1,2,0]
858 ; SSE-NEXT: pand %xmm5, %xmm3
859 ; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,6,5,6,7]
860 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,1,0,3]
861 ; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm3[2,1,0,3,4,5,6,7]
862 ; SSE-NEXT: packuswb %xmm4, %xmm4
863 ; SSE-NEXT: pand %xmm9, %xmm4
864 ; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm7[0,1,2,3,4,7,6,7]
865 ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm3[0,1,0,2]
866 ; SSE-NEXT: packuswb %xmm5, %xmm5
867 ; SSE-NEXT: movdqa %xmm9, %xmm3
868 ; SSE-NEXT: pandn %xmm5, %xmm3
869 ; SSE-NEXT: por %xmm4, %xmm3
870 ; SSE-NEXT: movdqa %xmm11, %xmm4
871 ; SSE-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm6[8],xmm4[9],xmm6[9],xmm4[10],xmm6[10],xmm4[11],xmm6[11],xmm4[12],xmm6[12],xmm4[13],xmm6[13],xmm4[14],xmm6[14],xmm4[15],xmm6[15]
872 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,2,3]
873 ; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,5,5,5,5]
874 ; SSE-NEXT: movdqa {{.*#+}} xmm5 = [65535,0,65535,65535,0,65535,65535,65535]
875 ; SSE-NEXT: punpcklbw {{.*#+}} xmm11 = xmm11[0],xmm6[0],xmm11[1],xmm6[1],xmm11[2],xmm6[2],xmm11[3],xmm6[3],xmm11[4],xmm6[4],xmm11[5],xmm6[5],xmm11[6],xmm6[6],xmm11[7],xmm6[7]
876 ; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm11[0,1,2,3,7,5,6,7]
877 ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,2,2,3]
878 ; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[3,1,1,2,4,5,6,7]
879 ; SSE-NEXT: pand %xmm5, %xmm6
880 ; SSE-NEXT: pandn %xmm4, %xmm5
881 ; SSE-NEXT: por %xmm6, %xmm5
882 ; SSE-NEXT: packuswb %xmm5, %xmm5
883 ; SSE-NEXT: pand %xmm9, %xmm5
884 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm8[0,0]
885 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm8[2,3]
886 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,1,2,3,4,5,6,7]
887 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,3]
888 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,5,7]
889 ; SSE-NEXT: packuswb %xmm0, %xmm0
890 ; SSE-NEXT: pandn %xmm0, %xmm9
891 ; SSE-NEXT: por %xmm5, %xmm9
892 ; SSE-NEXT: movq %xmm2, (%rsi)
893 ; SSE-NEXT: movq %xmm1, (%rdx)
894 ; SSE-NEXT: movq %xmm10, (%rcx)
895 ; SSE-NEXT: movq %xmm12, (%r8)
896 ; SSE-NEXT: movq %xmm3, (%r9)
897 ; SSE-NEXT: movq %xmm9, (%rax)
900 ; AVX-LABEL: load_i8_stride6_vf8:
902 ; AVX-NEXT: movq {{[0-9]+}}(%rsp), %rax
903 ; AVX-NEXT: vmovdqa (%rdi), %xmm1
904 ; AVX-NEXT: vmovdqa 16(%rdi), %xmm2
905 ; AVX-NEXT: vmovdqa 32(%rdi), %xmm0
906 ; AVX-NEXT: vpshufb {{.*#+}} xmm3 = zero,zero,zero,xmm2[2,8,14,u,u,u,u,u,u,u,u,u,u]
907 ; AVX-NEXT: vpshufb {{.*#+}} xmm4 = xmm1[0,6,12],zero,zero,zero,xmm1[u,u,u,u,u,u,u,u,u,u]
908 ; AVX-NEXT: vpor %xmm3, %xmm4, %xmm3
909 ; AVX-NEXT: vpxor %xmm4, %xmm4, %xmm4
910 ; AVX-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1,2],xmm4[3],xmm3[4,5,6,7]
911 ; AVX-NEXT: vpshufb {{.*#+}} xmm5 = zero,zero,zero,zero,zero,zero,xmm0[4,10,u,u,u,u,u,u,u,u]
912 ; AVX-NEXT: vpor %xmm5, %xmm3, %xmm3
913 ; AVX-NEXT: vpshufb {{.*#+}} xmm5 = zero,zero,zero,xmm2[3,9,15,u,u,u,u,u,u,u,u,u,u]
914 ; AVX-NEXT: vpshufb {{.*#+}} xmm6 = xmm1[1,7,13],zero,zero,zero,xmm1[u,u,u,u,u,u,u,u,u,u]
915 ; AVX-NEXT: vpor %xmm5, %xmm6, %xmm5
916 ; AVX-NEXT: vpblendw {{.*#+}} xmm4 = xmm5[0,1,2],xmm4[3],xmm5[4,5,6,7]
917 ; AVX-NEXT: vpshufb {{.*#+}} xmm5 = zero,zero,zero,zero,zero,zero,xmm0[5,11,u,u,u,u,u,u,u,u]
918 ; AVX-NEXT: vpor %xmm5, %xmm4, %xmm4
919 ; AVX-NEXT: vpshufb {{.*#+}} xmm5 = zero,zero,zero,xmm2[4,10,u,u,u,u,u,u,u,u,u,u,u]
920 ; AVX-NEXT: vpshufb {{.*#+}} xmm6 = xmm1[2,8,14],zero,zero,xmm1[u,u,u,u,u,u,u,u,u,u,u]
921 ; AVX-NEXT: vpor %xmm5, %xmm6, %xmm5
922 ; AVX-NEXT: vmovq {{.*#+}} xmm6 = [0,1,2,3,4,128,128,128,0,0,0,0,0,0,0,0]
923 ; AVX-NEXT: vpshufb %xmm6, %xmm5, %xmm5
924 ; AVX-NEXT: vpshufb {{.*#+}} xmm7 = zero,zero,zero,zero,zero,xmm0[0,6,12,u,u,u,u,u,u,u,u]
925 ; AVX-NEXT: vpor %xmm7, %xmm5, %xmm5
926 ; AVX-NEXT: vpshufb {{.*#+}} xmm7 = zero,zero,zero,xmm2[5,11,u,u,u,u,u,u,u,u,u,u,u]
927 ; AVX-NEXT: vpshufb {{.*#+}} xmm8 = xmm1[3,9,15],zero,zero,xmm1[u,u,u,u,u,u,u,u,u,u,u]
928 ; AVX-NEXT: vpor %xmm7, %xmm8, %xmm7
929 ; AVX-NEXT: vpshufb %xmm6, %xmm7, %xmm7
930 ; AVX-NEXT: vpshufb {{.*#+}} xmm8 = zero,zero,zero,zero,zero,xmm0[1,7,13,u,u,u,u,u,u,u,u]
931 ; AVX-NEXT: vpor %xmm7, %xmm8, %xmm7
932 ; AVX-NEXT: vpshufb {{.*#+}} xmm8 = xmm1[4,10],zero,zero,zero,xmm1[u,u,u,u,u,u,u,u,u,u,u]
933 ; AVX-NEXT: vpshufb {{.*#+}} xmm9 = zero,zero,xmm2[0,6,12,u,u,u,u,u,u,u,u,u,u,u]
934 ; AVX-NEXT: vpor %xmm8, %xmm9, %xmm8
935 ; AVX-NEXT: vpshufb %xmm6, %xmm8, %xmm8
936 ; AVX-NEXT: vpshufb {{.*#+}} xmm9 = zero,zero,zero,zero,zero,xmm0[2,8,14,u,u,u,u,u,u,u,u]
937 ; AVX-NEXT: vpor %xmm9, %xmm8, %xmm8
938 ; AVX-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[5,11],zero,zero,zero,xmm1[u,u,u,u,u,u,u,u,u,u,u]
939 ; AVX-NEXT: vpshufb {{.*#+}} xmm2 = zero,zero,xmm2[1,7,13,u,u,u,u,u,u,u,u,u,u,u]
940 ; AVX-NEXT: vpor %xmm1, %xmm2, %xmm1
941 ; AVX-NEXT: vpshufb %xmm6, %xmm1, %xmm1
942 ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = zero,zero,zero,zero,zero,xmm0[3,9,15,u,u,u,u,u,u,u,u]
943 ; AVX-NEXT: vpor %xmm0, %xmm1, %xmm0
944 ; AVX-NEXT: vmovq %xmm3, (%rsi)
945 ; AVX-NEXT: vmovq %xmm4, (%rdx)
946 ; AVX-NEXT: vmovq %xmm5, (%rcx)
947 ; AVX-NEXT: vmovq %xmm7, (%r8)
948 ; AVX-NEXT: vmovq %xmm8, (%r9)
949 ; AVX-NEXT: vmovq %xmm0, (%rax)
952 ; AVX2-LABEL: load_i8_stride6_vf8:
954 ; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax
955 ; AVX2-NEXT: vmovdqa (%rdi), %ymm0
956 ; AVX2-NEXT: vmovdqa 32(%rdi), %ymm1
957 ; AVX2-NEXT: vpblendw {{.*#+}} ymm2 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7,8,9],ymm1[10],ymm0[11,12],ymm1[13],ymm0[14,15]
958 ; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm3
959 ; AVX2-NEXT: vpshufb {{.*#+}} xmm4 = zero,zero,zero,xmm3[2,8,14],zero,zero,xmm3[u,u,u,u,u,u,u,u]
960 ; AVX2-NEXT: vpshufb {{.*#+}} xmm5 = xmm2[0,6,12],zero,zero,zero,xmm2[4,10,u,u,u,u,u,u,u,u]
961 ; AVX2-NEXT: vpor %xmm4, %xmm5, %xmm4
962 ; AVX2-NEXT: vpshufb {{.*#+}} xmm3 = zero,zero,zero,xmm3[3,9,15],zero,zero,xmm3[u,u,u,u,u,u,u,u]
963 ; AVX2-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[1,7,13],zero,zero,zero,xmm2[5,11,u,u,u,u,u,u,u,u]
964 ; AVX2-NEXT: vpor %xmm3, %xmm2, %xmm2
965 ; AVX2-NEXT: vpblendw {{.*#+}} ymm3 = ymm1[0],ymm0[1,2],ymm1[3],ymm0[4,5],ymm1[6],ymm0[7],ymm1[8],ymm0[9,10],ymm1[11],ymm0[12,13],ymm1[14],ymm0[15]
966 ; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm5
967 ; AVX2-NEXT: vpshufb {{.*#+}} xmm6 = zero,zero,zero,xmm5[4,10],zero,zero,zero,xmm5[u,u,u,u,u,u,u,u]
968 ; AVX2-NEXT: vpshufb {{.*#+}} xmm7 = xmm3[2,8,14],zero,zero,xmm3[0,6,12,u,u,u,u,u,u,u,u]
969 ; AVX2-NEXT: vpor %xmm6, %xmm7, %xmm6
970 ; AVX2-NEXT: vpshufb {{.*#+}} xmm5 = zero,zero,zero,xmm5[5,11],zero,zero,zero,xmm5[u,u,u,u,u,u,u,u]
971 ; AVX2-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[3,9,15],zero,zero,xmm3[1,7,13,u,u,u,u,u,u,u,u]
972 ; AVX2-NEXT: vpor %xmm5, %xmm3, %xmm3
973 ; AVX2-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7],ymm0[8],ymm1[9],ymm0[10,11],ymm1[12],ymm0[13,14],ymm1[15]
974 ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
975 ; AVX2-NEXT: vpshufb {{.*#+}} xmm5 = zero,zero,xmm1[0,6,12],zero,zero,zero,xmm1[u,u,u,u,u,u,u,u]
976 ; AVX2-NEXT: vpshufb {{.*#+}} xmm7 = xmm0[4,10],zero,zero,zero,xmm0[2,8,14,u,u,u,u,u,u,u,u]
977 ; AVX2-NEXT: vpor %xmm5, %xmm7, %xmm5
978 ; AVX2-NEXT: vpshufb {{.*#+}} xmm1 = zero,zero,xmm1[1,7,13],zero,zero,zero,xmm1[u,u,u,u,u,u,u,u]
979 ; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[5,11],zero,zero,zero,xmm0[3,9,15,u,u,u,u,u,u,u,u]
980 ; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
981 ; AVX2-NEXT: vmovq %xmm4, (%rsi)
982 ; AVX2-NEXT: vmovq %xmm2, (%rdx)
983 ; AVX2-NEXT: vmovq %xmm6, (%rcx)
984 ; AVX2-NEXT: vmovq %xmm3, (%r8)
985 ; AVX2-NEXT: vmovq %xmm5, (%r9)
986 ; AVX2-NEXT: vmovq %xmm0, (%rax)
987 ; AVX2-NEXT: vzeroupper
990 ; AVX2-FP-LABEL: load_i8_stride6_vf8:
992 ; AVX2-FP-NEXT: movq {{[0-9]+}}(%rsp), %rax
993 ; AVX2-FP-NEXT: vmovdqa (%rdi), %ymm0
994 ; AVX2-FP-NEXT: vmovdqa 32(%rdi), %ymm1
995 ; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm2 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7,8,9],ymm1[10],ymm0[11,12],ymm1[13],ymm0[14,15]
996 ; AVX2-FP-NEXT: vextracti128 $1, %ymm2, %xmm3
997 ; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm4 = zero,zero,zero,xmm3[2,8,14],zero,zero,xmm3[u,u,u,u,u,u,u,u]
998 ; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm5 = xmm2[0,6,12],zero,zero,zero,xmm2[4,10,u,u,u,u,u,u,u,u]
999 ; AVX2-FP-NEXT: vpor %xmm4, %xmm5, %xmm4
1000 ; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm3 = zero,zero,zero,xmm3[3,9,15],zero,zero,xmm3[u,u,u,u,u,u,u,u]
1001 ; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[1,7,13],zero,zero,zero,xmm2[5,11,u,u,u,u,u,u,u,u]
1002 ; AVX2-FP-NEXT: vpor %xmm3, %xmm2, %xmm2
1003 ; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm3 = ymm1[0],ymm0[1,2],ymm1[3],ymm0[4,5],ymm1[6],ymm0[7],ymm1[8],ymm0[9,10],ymm1[11],ymm0[12,13],ymm1[14],ymm0[15]
1004 ; AVX2-FP-NEXT: vextracti128 $1, %ymm3, %xmm5
1005 ; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm6 = zero,zero,zero,xmm5[4,10],zero,zero,zero,xmm5[u,u,u,u,u,u,u,u]
1006 ; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm7 = xmm3[2,8,14],zero,zero,xmm3[0,6,12,u,u,u,u,u,u,u,u]
1007 ; AVX2-FP-NEXT: vpor %xmm6, %xmm7, %xmm6
1008 ; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm5 = zero,zero,zero,xmm5[5,11],zero,zero,zero,xmm5[u,u,u,u,u,u,u,u]
1009 ; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[3,9,15],zero,zero,xmm3[1,7,13,u,u,u,u,u,u,u,u]
1010 ; AVX2-FP-NEXT: vpor %xmm5, %xmm3, %xmm3
1011 ; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7],ymm0[8],ymm1[9],ymm0[10,11],ymm1[12],ymm0[13,14],ymm1[15]
1012 ; AVX2-FP-NEXT: vextracti128 $1, %ymm0, %xmm1
1013 ; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm5 = zero,zero,xmm1[0,6,12],zero,zero,zero,xmm1[u,u,u,u,u,u,u,u]
1014 ; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm7 = xmm0[4,10],zero,zero,zero,xmm0[2,8,14,u,u,u,u,u,u,u,u]
1015 ; AVX2-FP-NEXT: vpor %xmm5, %xmm7, %xmm5
1016 ; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm1 = zero,zero,xmm1[1,7,13],zero,zero,zero,xmm1[u,u,u,u,u,u,u,u]
1017 ; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[5,11],zero,zero,zero,xmm0[3,9,15,u,u,u,u,u,u,u,u]
1018 ; AVX2-FP-NEXT: vpor %xmm1, %xmm0, %xmm0
1019 ; AVX2-FP-NEXT: vmovq %xmm4, (%rsi)
1020 ; AVX2-FP-NEXT: vmovq %xmm2, (%rdx)
1021 ; AVX2-FP-NEXT: vmovq %xmm6, (%rcx)
1022 ; AVX2-FP-NEXT: vmovq %xmm3, (%r8)
1023 ; AVX2-FP-NEXT: vmovq %xmm5, (%r9)
1024 ; AVX2-FP-NEXT: vmovq %xmm0, (%rax)
1025 ; AVX2-FP-NEXT: vzeroupper
1026 ; AVX2-FP-NEXT: retq
1028 ; AVX2-FCP-LABEL: load_i8_stride6_vf8:
1029 ; AVX2-FCP: # %bb.0:
1030 ; AVX2-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
1031 ; AVX2-FCP-NEXT: vmovdqa (%rdi), %ymm0
1032 ; AVX2-FCP-NEXT: vmovdqa 32(%rdi), %ymm1
1033 ; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm2 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7,8,9],ymm1[10],ymm0[11,12],ymm1[13],ymm0[14,15]
1034 ; AVX2-FCP-NEXT: vextracti128 $1, %ymm2, %xmm3
1035 ; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm4 = zero,zero,zero,xmm3[2,8,14],zero,zero,xmm3[u,u,u,u,u,u,u,u]
1036 ; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm5 = xmm2[0,6,12],zero,zero,zero,xmm2[4,10,u,u,u,u,u,u,u,u]
1037 ; AVX2-FCP-NEXT: vpor %xmm4, %xmm5, %xmm4
1038 ; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm3 = zero,zero,zero,xmm3[3,9,15],zero,zero,xmm3[u,u,u,u,u,u,u,u]
1039 ; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[1,7,13],zero,zero,zero,xmm2[5,11,u,u,u,u,u,u,u,u]
1040 ; AVX2-FCP-NEXT: vpor %xmm3, %xmm2, %xmm2
1041 ; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm3 = ymm1[0],ymm0[1,2],ymm1[3],ymm0[4,5],ymm1[6],ymm0[7],ymm1[8],ymm0[9,10],ymm1[11],ymm0[12,13],ymm1[14],ymm0[15]
1042 ; AVX2-FCP-NEXT: vextracti128 $1, %ymm3, %xmm5
1043 ; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm6 = zero,zero,zero,xmm5[4,10],zero,zero,zero,xmm5[u,u,u,u,u,u,u,u]
1044 ; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm3[2,8,14],zero,zero,xmm3[0,6,12,u,u,u,u,u,u,u,u]
1045 ; AVX2-FCP-NEXT: vpor %xmm6, %xmm7, %xmm6
1046 ; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm5 = zero,zero,zero,xmm5[5,11],zero,zero,zero,xmm5[u,u,u,u,u,u,u,u]
1047 ; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[3,9,15],zero,zero,xmm3[1,7,13,u,u,u,u,u,u,u,u]
1048 ; AVX2-FCP-NEXT: vpor %xmm5, %xmm3, %xmm3
1049 ; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7],ymm0[8],ymm1[9],ymm0[10,11],ymm1[12],ymm0[13,14],ymm1[15]
1050 ; AVX2-FCP-NEXT: vextracti128 $1, %ymm0, %xmm1
1051 ; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm5 = zero,zero,xmm1[0,6,12],zero,zero,zero,xmm1[u,u,u,u,u,u,u,u]
1052 ; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm0[4,10],zero,zero,zero,xmm0[2,8,14,u,u,u,u,u,u,u,u]
1053 ; AVX2-FCP-NEXT: vpor %xmm5, %xmm7, %xmm5
1054 ; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm1 = zero,zero,xmm1[1,7,13],zero,zero,zero,xmm1[u,u,u,u,u,u,u,u]
1055 ; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[5,11],zero,zero,zero,xmm0[3,9,15,u,u,u,u,u,u,u,u]
1056 ; AVX2-FCP-NEXT: vpor %xmm1, %xmm0, %xmm0
1057 ; AVX2-FCP-NEXT: vmovq %xmm4, (%rsi)
1058 ; AVX2-FCP-NEXT: vmovq %xmm2, (%rdx)
1059 ; AVX2-FCP-NEXT: vmovq %xmm6, (%rcx)
1060 ; AVX2-FCP-NEXT: vmovq %xmm3, (%r8)
1061 ; AVX2-FCP-NEXT: vmovq %xmm5, (%r9)
1062 ; AVX2-FCP-NEXT: vmovq %xmm0, (%rax)
1063 ; AVX2-FCP-NEXT: vzeroupper
1064 ; AVX2-FCP-NEXT: retq
1066 ; AVX512-LABEL: load_i8_stride6_vf8:
1068 ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax
1069 ; AVX512-NEXT: vmovdqa (%rdi), %ymm0
1070 ; AVX512-NEXT: vmovdqa 32(%rdi), %ymm1
1071 ; AVX512-NEXT: vpblendw {{.*#+}} ymm2 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7,8,9],ymm1[10],ymm0[11,12],ymm1[13],ymm0[14,15]
1072 ; AVX512-NEXT: vextracti128 $1, %ymm2, %xmm3
1073 ; AVX512-NEXT: vpshufb {{.*#+}} xmm4 = zero,zero,zero,xmm3[2,8,14],zero,zero,xmm3[u,u,u,u,u,u,u,u]
1074 ; AVX512-NEXT: vpshufb {{.*#+}} xmm5 = xmm2[0,6,12],zero,zero,zero,xmm2[4,10,u,u,u,u,u,u,u,u]
1075 ; AVX512-NEXT: vpor %xmm4, %xmm5, %xmm4
1076 ; AVX512-NEXT: vpshufb {{.*#+}} xmm3 = zero,zero,zero,xmm3[3,9,15],zero,zero,xmm3[u,u,u,u,u,u,u,u]
1077 ; AVX512-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[1,7,13],zero,zero,zero,xmm2[5,11,u,u,u,u,u,u,u,u]
1078 ; AVX512-NEXT: vpor %xmm3, %xmm2, %xmm2
1079 ; AVX512-NEXT: vpblendw {{.*#+}} ymm3 = ymm1[0],ymm0[1,2],ymm1[3],ymm0[4,5],ymm1[6],ymm0[7],ymm1[8],ymm0[9,10],ymm1[11],ymm0[12,13],ymm1[14],ymm0[15]
1080 ; AVX512-NEXT: vextracti128 $1, %ymm3, %xmm5
1081 ; AVX512-NEXT: vpshufb {{.*#+}} xmm6 = zero,zero,zero,xmm5[4,10],zero,zero,zero,xmm5[u,u,u,u,u,u,u,u]
1082 ; AVX512-NEXT: vpshufb {{.*#+}} xmm7 = xmm3[2,8,14],zero,zero,xmm3[0,6,12,u,u,u,u,u,u,u,u]
1083 ; AVX512-NEXT: vpor %xmm6, %xmm7, %xmm6
1084 ; AVX512-NEXT: vpshufb {{.*#+}} xmm5 = zero,zero,zero,xmm5[5,11],zero,zero,zero,xmm5[u,u,u,u,u,u,u,u]
1085 ; AVX512-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[3,9,15],zero,zero,xmm3[1,7,13,u,u,u,u,u,u,u,u]
1086 ; AVX512-NEXT: vpor %xmm5, %xmm3, %xmm3
1087 ; AVX512-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7],ymm0[8],ymm1[9],ymm0[10,11],ymm1[12],ymm0[13,14],ymm1[15]
1088 ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
1089 ; AVX512-NEXT: vpshufb {{.*#+}} xmm5 = zero,zero,xmm1[0,6,12],zero,zero,zero,xmm1[u,u,u,u,u,u,u,u]
1090 ; AVX512-NEXT: vpshufb {{.*#+}} xmm7 = xmm0[4,10],zero,zero,zero,xmm0[2,8,14,u,u,u,u,u,u,u,u]
1091 ; AVX512-NEXT: vpor %xmm5, %xmm7, %xmm5
1092 ; AVX512-NEXT: vpshufb {{.*#+}} xmm1 = zero,zero,xmm1[1,7,13],zero,zero,zero,xmm1[u,u,u,u,u,u,u,u]
1093 ; AVX512-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[5,11],zero,zero,zero,xmm0[3,9,15,u,u,u,u,u,u,u,u]
1094 ; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0
1095 ; AVX512-NEXT: vmovq %xmm4, (%rsi)
1096 ; AVX512-NEXT: vmovq %xmm2, (%rdx)
1097 ; AVX512-NEXT: vmovq %xmm6, (%rcx)
1098 ; AVX512-NEXT: vmovq %xmm3, (%r8)
1099 ; AVX512-NEXT: vmovq %xmm5, (%r9)
1100 ; AVX512-NEXT: vmovq %xmm0, (%rax)
1101 ; AVX512-NEXT: vzeroupper
1104 ; AVX512-FCP-LABEL: load_i8_stride6_vf8:
1105 ; AVX512-FCP: # %bb.0:
1106 ; AVX512-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
1107 ; AVX512-FCP-NEXT: vmovdqa (%rdi), %ymm0
1108 ; AVX512-FCP-NEXT: vmovdqa 32(%rdi), %ymm1
1109 ; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm2 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7,8,9],ymm1[10],ymm0[11,12],ymm1[13],ymm0[14,15]
1110 ; AVX512-FCP-NEXT: vextracti128 $1, %ymm2, %xmm3
1111 ; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm4 = zero,zero,zero,xmm3[2,8,14],zero,zero,xmm3[u,u,u,u,u,u,u,u]
1112 ; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm5 = xmm2[0,6,12],zero,zero,zero,xmm2[4,10,u,u,u,u,u,u,u,u]
1113 ; AVX512-FCP-NEXT: vpor %xmm4, %xmm5, %xmm4
1114 ; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm3 = zero,zero,zero,xmm3[3,9,15],zero,zero,xmm3[u,u,u,u,u,u,u,u]
1115 ; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[1,7,13],zero,zero,zero,xmm2[5,11,u,u,u,u,u,u,u,u]
1116 ; AVX512-FCP-NEXT: vpor %xmm3, %xmm2, %xmm2
1117 ; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm3 = ymm1[0],ymm0[1,2],ymm1[3],ymm0[4,5],ymm1[6],ymm0[7],ymm1[8],ymm0[9,10],ymm1[11],ymm0[12,13],ymm1[14],ymm0[15]
1118 ; AVX512-FCP-NEXT: vextracti128 $1, %ymm3, %xmm5
1119 ; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm6 = zero,zero,zero,xmm5[4,10],zero,zero,zero,xmm5[u,u,u,u,u,u,u,u]
1120 ; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm3[2,8,14],zero,zero,xmm3[0,6,12,u,u,u,u,u,u,u,u]
1121 ; AVX512-FCP-NEXT: vpor %xmm6, %xmm7, %xmm6
1122 ; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm5 = zero,zero,zero,xmm5[5,11],zero,zero,zero,xmm5[u,u,u,u,u,u,u,u]
1123 ; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[3,9,15],zero,zero,xmm3[1,7,13,u,u,u,u,u,u,u,u]
1124 ; AVX512-FCP-NEXT: vpor %xmm5, %xmm3, %xmm3
1125 ; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7],ymm0[8],ymm1[9],ymm0[10,11],ymm1[12],ymm0[13,14],ymm1[15]
1126 ; AVX512-FCP-NEXT: vextracti128 $1, %ymm0, %xmm1
1127 ; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm5 = zero,zero,xmm1[0,6,12],zero,zero,zero,xmm1[u,u,u,u,u,u,u,u]
1128 ; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm0[4,10],zero,zero,zero,xmm0[2,8,14,u,u,u,u,u,u,u,u]
1129 ; AVX512-FCP-NEXT: vpor %xmm5, %xmm7, %xmm5
1130 ; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm1 = zero,zero,xmm1[1,7,13],zero,zero,zero,xmm1[u,u,u,u,u,u,u,u]
1131 ; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[5,11],zero,zero,zero,xmm0[3,9,15,u,u,u,u,u,u,u,u]
1132 ; AVX512-FCP-NEXT: vpor %xmm1, %xmm0, %xmm0
1133 ; AVX512-FCP-NEXT: vmovq %xmm4, (%rsi)
1134 ; AVX512-FCP-NEXT: vmovq %xmm2, (%rdx)
1135 ; AVX512-FCP-NEXT: vmovq %xmm6, (%rcx)
1136 ; AVX512-FCP-NEXT: vmovq %xmm3, (%r8)
1137 ; AVX512-FCP-NEXT: vmovq %xmm5, (%r9)
1138 ; AVX512-FCP-NEXT: vmovq %xmm0, (%rax)
1139 ; AVX512-FCP-NEXT: vzeroupper
1140 ; AVX512-FCP-NEXT: retq
1142 ; AVX512DQ-LABEL: load_i8_stride6_vf8:
1143 ; AVX512DQ: # %bb.0:
1144 ; AVX512DQ-NEXT: movq {{[0-9]+}}(%rsp), %rax
1145 ; AVX512DQ-NEXT: vmovdqa (%rdi), %ymm0
1146 ; AVX512DQ-NEXT: vmovdqa 32(%rdi), %ymm1
1147 ; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm2 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7,8,9],ymm1[10],ymm0[11,12],ymm1[13],ymm0[14,15]
1148 ; AVX512DQ-NEXT: vextracti128 $1, %ymm2, %xmm3
1149 ; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm4 = zero,zero,zero,xmm3[2,8,14],zero,zero,xmm3[u,u,u,u,u,u,u,u]
1150 ; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm5 = xmm2[0,6,12],zero,zero,zero,xmm2[4,10,u,u,u,u,u,u,u,u]
1151 ; AVX512DQ-NEXT: vpor %xmm4, %xmm5, %xmm4
1152 ; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm3 = zero,zero,zero,xmm3[3,9,15],zero,zero,xmm3[u,u,u,u,u,u,u,u]
1153 ; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[1,7,13],zero,zero,zero,xmm2[5,11,u,u,u,u,u,u,u,u]
1154 ; AVX512DQ-NEXT: vpor %xmm3, %xmm2, %xmm2
1155 ; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm3 = ymm1[0],ymm0[1,2],ymm1[3],ymm0[4,5],ymm1[6],ymm0[7],ymm1[8],ymm0[9,10],ymm1[11],ymm0[12,13],ymm1[14],ymm0[15]
1156 ; AVX512DQ-NEXT: vextracti128 $1, %ymm3, %xmm5
1157 ; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm6 = zero,zero,zero,xmm5[4,10],zero,zero,zero,xmm5[u,u,u,u,u,u,u,u]
1158 ; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm7 = xmm3[2,8,14],zero,zero,xmm3[0,6,12,u,u,u,u,u,u,u,u]
1159 ; AVX512DQ-NEXT: vpor %xmm6, %xmm7, %xmm6
1160 ; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm5 = zero,zero,zero,xmm5[5,11],zero,zero,zero,xmm5[u,u,u,u,u,u,u,u]
1161 ; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[3,9,15],zero,zero,xmm3[1,7,13,u,u,u,u,u,u,u,u]
1162 ; AVX512DQ-NEXT: vpor %xmm5, %xmm3, %xmm3
1163 ; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7],ymm0[8],ymm1[9],ymm0[10,11],ymm1[12],ymm0[13,14],ymm1[15]
1164 ; AVX512DQ-NEXT: vextracti128 $1, %ymm0, %xmm1
1165 ; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm5 = zero,zero,xmm1[0,6,12],zero,zero,zero,xmm1[u,u,u,u,u,u,u,u]
1166 ; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm7 = xmm0[4,10],zero,zero,zero,xmm0[2,8,14,u,u,u,u,u,u,u,u]
1167 ; AVX512DQ-NEXT: vpor %xmm5, %xmm7, %xmm5
1168 ; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm1 = zero,zero,xmm1[1,7,13],zero,zero,zero,xmm1[u,u,u,u,u,u,u,u]
1169 ; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[5,11],zero,zero,zero,xmm0[3,9,15,u,u,u,u,u,u,u,u]
1170 ; AVX512DQ-NEXT: vpor %xmm1, %xmm0, %xmm0
1171 ; AVX512DQ-NEXT: vmovq %xmm4, (%rsi)
1172 ; AVX512DQ-NEXT: vmovq %xmm2, (%rdx)
1173 ; AVX512DQ-NEXT: vmovq %xmm6, (%rcx)
1174 ; AVX512DQ-NEXT: vmovq %xmm3, (%r8)
1175 ; AVX512DQ-NEXT: vmovq %xmm5, (%r9)
1176 ; AVX512DQ-NEXT: vmovq %xmm0, (%rax)
1177 ; AVX512DQ-NEXT: vzeroupper
1178 ; AVX512DQ-NEXT: retq
1180 ; AVX512DQ-FCP-LABEL: load_i8_stride6_vf8:
1181 ; AVX512DQ-FCP: # %bb.0:
1182 ; AVX512DQ-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
1183 ; AVX512DQ-FCP-NEXT: vmovdqa (%rdi), %ymm0
1184 ; AVX512DQ-FCP-NEXT: vmovdqa 32(%rdi), %ymm1
1185 ; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm2 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7,8,9],ymm1[10],ymm0[11,12],ymm1[13],ymm0[14,15]
1186 ; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm2, %xmm3
1187 ; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm4 = zero,zero,zero,xmm3[2,8,14],zero,zero,xmm3[u,u,u,u,u,u,u,u]
1188 ; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm5 = xmm2[0,6,12],zero,zero,zero,xmm2[4,10,u,u,u,u,u,u,u,u]
1189 ; AVX512DQ-FCP-NEXT: vpor %xmm4, %xmm5, %xmm4
1190 ; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm3 = zero,zero,zero,xmm3[3,9,15],zero,zero,xmm3[u,u,u,u,u,u,u,u]
1191 ; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[1,7,13],zero,zero,zero,xmm2[5,11,u,u,u,u,u,u,u,u]
1192 ; AVX512DQ-FCP-NEXT: vpor %xmm3, %xmm2, %xmm2
1193 ; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm3 = ymm1[0],ymm0[1,2],ymm1[3],ymm0[4,5],ymm1[6],ymm0[7],ymm1[8],ymm0[9,10],ymm1[11],ymm0[12,13],ymm1[14],ymm0[15]
1194 ; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm3, %xmm5
1195 ; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm6 = zero,zero,zero,xmm5[4,10],zero,zero,zero,xmm5[u,u,u,u,u,u,u,u]
1196 ; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm3[2,8,14],zero,zero,xmm3[0,6,12,u,u,u,u,u,u,u,u]
1197 ; AVX512DQ-FCP-NEXT: vpor %xmm6, %xmm7, %xmm6
1198 ; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm5 = zero,zero,zero,xmm5[5,11],zero,zero,zero,xmm5[u,u,u,u,u,u,u,u]
1199 ; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[3,9,15],zero,zero,xmm3[1,7,13,u,u,u,u,u,u,u,u]
1200 ; AVX512DQ-FCP-NEXT: vpor %xmm5, %xmm3, %xmm3
1201 ; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7],ymm0[8],ymm1[9],ymm0[10,11],ymm1[12],ymm0[13,14],ymm1[15]
1202 ; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm0, %xmm1
1203 ; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm5 = zero,zero,xmm1[0,6,12],zero,zero,zero,xmm1[u,u,u,u,u,u,u,u]
1204 ; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm0[4,10],zero,zero,zero,xmm0[2,8,14,u,u,u,u,u,u,u,u]
1205 ; AVX512DQ-FCP-NEXT: vpor %xmm5, %xmm7, %xmm5
1206 ; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm1 = zero,zero,xmm1[1,7,13],zero,zero,zero,xmm1[u,u,u,u,u,u,u,u]
1207 ; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[5,11],zero,zero,zero,xmm0[3,9,15,u,u,u,u,u,u,u,u]
1208 ; AVX512DQ-FCP-NEXT: vpor %xmm1, %xmm0, %xmm0
1209 ; AVX512DQ-FCP-NEXT: vmovq %xmm4, (%rsi)
1210 ; AVX512DQ-FCP-NEXT: vmovq %xmm2, (%rdx)
1211 ; AVX512DQ-FCP-NEXT: vmovq %xmm6, (%rcx)
1212 ; AVX512DQ-FCP-NEXT: vmovq %xmm3, (%r8)
1213 ; AVX512DQ-FCP-NEXT: vmovq %xmm5, (%r9)
1214 ; AVX512DQ-FCP-NEXT: vmovq %xmm0, (%rax)
1215 ; AVX512DQ-FCP-NEXT: vzeroupper
1216 ; AVX512DQ-FCP-NEXT: retq
1218 ; AVX512BW-LABEL: load_i8_stride6_vf8:
1219 ; AVX512BW: # %bb.0:
1220 ; AVX512BW-NEXT: movq {{[0-9]+}}(%rsp), %rax
1221 ; AVX512BW-NEXT: vmovdqa (%rdi), %ymm0
1222 ; AVX512BW-NEXT: vmovdqa 32(%rdi), %ymm1
1223 ; AVX512BW-NEXT: vpblendw {{.*#+}} ymm2 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7,8,9],ymm1[10],ymm0[11,12],ymm1[13],ymm0[14,15]
1224 ; AVX512BW-NEXT: vextracti128 $1, %ymm2, %xmm3
1225 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm4 = zero,zero,zero,xmm3[2,8,14],zero,zero,xmm3[u,u,u,u,u,u,u,u]
1226 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm5 = xmm2[0,6,12],zero,zero,zero,xmm2[4,10,u,u,u,u,u,u,u,u]
1227 ; AVX512BW-NEXT: vpor %xmm4, %xmm5, %xmm4
1228 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm3 = zero,zero,zero,xmm3[3,9,15],zero,zero,xmm3[u,u,u,u,u,u,u,u]
1229 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[1,7,13],zero,zero,zero,xmm2[5,11,u,u,u,u,u,u,u,u]
1230 ; AVX512BW-NEXT: vpor %xmm3, %xmm2, %xmm2
1231 ; AVX512BW-NEXT: vpblendw {{.*#+}} ymm3 = ymm1[0],ymm0[1,2],ymm1[3],ymm0[4,5],ymm1[6],ymm0[7],ymm1[8],ymm0[9,10],ymm1[11],ymm0[12,13],ymm1[14],ymm0[15]
1232 ; AVX512BW-NEXT: vextracti128 $1, %ymm3, %xmm5
1233 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm6 = zero,zero,zero,xmm5[4,10],zero,zero,zero,xmm5[u,u,u,u,u,u,u,u]
1234 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm7 = xmm3[2,8,14],zero,zero,xmm3[0,6,12,u,u,u,u,u,u,u,u]
1235 ; AVX512BW-NEXT: vpor %xmm6, %xmm7, %xmm6
1236 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm5 = zero,zero,zero,xmm5[5,11],zero,zero,zero,xmm5[u,u,u,u,u,u,u,u]
1237 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[3,9,15],zero,zero,xmm3[1,7,13,u,u,u,u,u,u,u,u]
1238 ; AVX512BW-NEXT: vpor %xmm5, %xmm3, %xmm3
1239 ; AVX512BW-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7],ymm0[8],ymm1[9],ymm0[10,11],ymm1[12],ymm0[13,14],ymm1[15]
1240 ; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm1
1241 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm5 = zero,zero,xmm1[0,6,12],zero,zero,zero,xmm1[u,u,u,u,u,u,u,u]
1242 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm7 = xmm0[4,10],zero,zero,zero,xmm0[2,8,14,u,u,u,u,u,u,u,u]
1243 ; AVX512BW-NEXT: vpor %xmm5, %xmm7, %xmm5
1244 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm1 = zero,zero,xmm1[1,7,13],zero,zero,zero,xmm1[u,u,u,u,u,u,u,u]
1245 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[5,11],zero,zero,zero,xmm0[3,9,15,u,u,u,u,u,u,u,u]
1246 ; AVX512BW-NEXT: vpor %xmm1, %xmm0, %xmm0
1247 ; AVX512BW-NEXT: vmovq %xmm4, (%rsi)
1248 ; AVX512BW-NEXT: vmovq %xmm2, (%rdx)
1249 ; AVX512BW-NEXT: vmovq %xmm6, (%rcx)
1250 ; AVX512BW-NEXT: vmovq %xmm3, (%r8)
1251 ; AVX512BW-NEXT: vmovq %xmm5, (%r9)
1252 ; AVX512BW-NEXT: vmovq %xmm0, (%rax)
1253 ; AVX512BW-NEXT: vzeroupper
1254 ; AVX512BW-NEXT: retq
1256 ; AVX512BW-FCP-LABEL: load_i8_stride6_vf8:
1257 ; AVX512BW-FCP: # %bb.0:
1258 ; AVX512BW-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
1259 ; AVX512BW-FCP-NEXT: vmovdqa (%rdi), %ymm0
1260 ; AVX512BW-FCP-NEXT: vmovdqa 32(%rdi), %ymm1
1261 ; AVX512BW-FCP-NEXT: vpblendw {{.*#+}} ymm2 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7,8,9],ymm1[10],ymm0[11,12],ymm1[13],ymm0[14,15]
1262 ; AVX512BW-FCP-NEXT: vextracti128 $1, %ymm2, %xmm3
1263 ; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm4 = zero,zero,zero,xmm3[2,8,14],zero,zero,xmm3[u,u,u,u,u,u,u,u]
1264 ; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm5 = xmm2[0,6,12],zero,zero,zero,xmm2[4,10,u,u,u,u,u,u,u,u]
1265 ; AVX512BW-FCP-NEXT: vpor %xmm4, %xmm5, %xmm4
1266 ; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm3 = zero,zero,zero,xmm3[3,9,15],zero,zero,xmm3[u,u,u,u,u,u,u,u]
1267 ; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[1,7,13],zero,zero,zero,xmm2[5,11,u,u,u,u,u,u,u,u]
1268 ; AVX512BW-FCP-NEXT: vpor %xmm3, %xmm2, %xmm2
1269 ; AVX512BW-FCP-NEXT: vpblendw {{.*#+}} ymm3 = ymm1[0],ymm0[1,2],ymm1[3],ymm0[4,5],ymm1[6],ymm0[7],ymm1[8],ymm0[9,10],ymm1[11],ymm0[12,13],ymm1[14],ymm0[15]
1270 ; AVX512BW-FCP-NEXT: vextracti128 $1, %ymm3, %xmm5
1271 ; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm6 = zero,zero,zero,xmm5[4,10],zero,zero,zero,xmm5[u,u,u,u,u,u,u,u]
1272 ; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm3[2,8,14],zero,zero,xmm3[0,6,12,u,u,u,u,u,u,u,u]
1273 ; AVX512BW-FCP-NEXT: vpor %xmm6, %xmm7, %xmm6
1274 ; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm5 = zero,zero,zero,xmm5[5,11],zero,zero,zero,xmm5[u,u,u,u,u,u,u,u]
1275 ; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[3,9,15],zero,zero,xmm3[1,7,13,u,u,u,u,u,u,u,u]
1276 ; AVX512BW-FCP-NEXT: vpor %xmm5, %xmm3, %xmm3
1277 ; AVX512BW-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7],ymm0[8],ymm1[9],ymm0[10,11],ymm1[12],ymm0[13,14],ymm1[15]
1278 ; AVX512BW-FCP-NEXT: vextracti128 $1, %ymm0, %xmm1
1279 ; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm5 = zero,zero,xmm1[0,6,12],zero,zero,zero,xmm1[u,u,u,u,u,u,u,u]
1280 ; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm0[4,10],zero,zero,zero,xmm0[2,8,14,u,u,u,u,u,u,u,u]
1281 ; AVX512BW-FCP-NEXT: vpor %xmm5, %xmm7, %xmm5
1282 ; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm1 = zero,zero,xmm1[1,7,13],zero,zero,zero,xmm1[u,u,u,u,u,u,u,u]
1283 ; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[5,11],zero,zero,zero,xmm0[3,9,15,u,u,u,u,u,u,u,u]
1284 ; AVX512BW-FCP-NEXT: vpor %xmm1, %xmm0, %xmm0
1285 ; AVX512BW-FCP-NEXT: vmovq %xmm4, (%rsi)
1286 ; AVX512BW-FCP-NEXT: vmovq %xmm2, (%rdx)
1287 ; AVX512BW-FCP-NEXT: vmovq %xmm6, (%rcx)
1288 ; AVX512BW-FCP-NEXT: vmovq %xmm3, (%r8)
1289 ; AVX512BW-FCP-NEXT: vmovq %xmm5, (%r9)
1290 ; AVX512BW-FCP-NEXT: vmovq %xmm0, (%rax)
1291 ; AVX512BW-FCP-NEXT: vzeroupper
1292 ; AVX512BW-FCP-NEXT: retq
1294 ; AVX512DQ-BW-LABEL: load_i8_stride6_vf8:
1295 ; AVX512DQ-BW: # %bb.0:
1296 ; AVX512DQ-BW-NEXT: movq {{[0-9]+}}(%rsp), %rax
1297 ; AVX512DQ-BW-NEXT: vmovdqa (%rdi), %ymm0
1298 ; AVX512DQ-BW-NEXT: vmovdqa 32(%rdi), %ymm1
1299 ; AVX512DQ-BW-NEXT: vpblendw {{.*#+}} ymm2 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7,8,9],ymm1[10],ymm0[11,12],ymm1[13],ymm0[14,15]
1300 ; AVX512DQ-BW-NEXT: vextracti128 $1, %ymm2, %xmm3
1301 ; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm4 = zero,zero,zero,xmm3[2,8,14],zero,zero,xmm3[u,u,u,u,u,u,u,u]
1302 ; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm5 = xmm2[0,6,12],zero,zero,zero,xmm2[4,10,u,u,u,u,u,u,u,u]
1303 ; AVX512DQ-BW-NEXT: vpor %xmm4, %xmm5, %xmm4
1304 ; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm3 = zero,zero,zero,xmm3[3,9,15],zero,zero,xmm3[u,u,u,u,u,u,u,u]
1305 ; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[1,7,13],zero,zero,zero,xmm2[5,11,u,u,u,u,u,u,u,u]
1306 ; AVX512DQ-BW-NEXT: vpor %xmm3, %xmm2, %xmm2
1307 ; AVX512DQ-BW-NEXT: vpblendw {{.*#+}} ymm3 = ymm1[0],ymm0[1,2],ymm1[3],ymm0[4,5],ymm1[6],ymm0[7],ymm1[8],ymm0[9,10],ymm1[11],ymm0[12,13],ymm1[14],ymm0[15]
1308 ; AVX512DQ-BW-NEXT: vextracti128 $1, %ymm3, %xmm5
1309 ; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm6 = zero,zero,zero,xmm5[4,10],zero,zero,zero,xmm5[u,u,u,u,u,u,u,u]
1310 ; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm7 = xmm3[2,8,14],zero,zero,xmm3[0,6,12,u,u,u,u,u,u,u,u]
1311 ; AVX512DQ-BW-NEXT: vpor %xmm6, %xmm7, %xmm6
1312 ; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm5 = zero,zero,zero,xmm5[5,11],zero,zero,zero,xmm5[u,u,u,u,u,u,u,u]
1313 ; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[3,9,15],zero,zero,xmm3[1,7,13,u,u,u,u,u,u,u,u]
1314 ; AVX512DQ-BW-NEXT: vpor %xmm5, %xmm3, %xmm3
1315 ; AVX512DQ-BW-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7],ymm0[8],ymm1[9],ymm0[10,11],ymm1[12],ymm0[13,14],ymm1[15]
1316 ; AVX512DQ-BW-NEXT: vextracti128 $1, %ymm0, %xmm1
1317 ; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm5 = zero,zero,xmm1[0,6,12],zero,zero,zero,xmm1[u,u,u,u,u,u,u,u]
1318 ; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm7 = xmm0[4,10],zero,zero,zero,xmm0[2,8,14,u,u,u,u,u,u,u,u]
1319 ; AVX512DQ-BW-NEXT: vpor %xmm5, %xmm7, %xmm5
1320 ; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm1 = zero,zero,xmm1[1,7,13],zero,zero,zero,xmm1[u,u,u,u,u,u,u,u]
1321 ; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[5,11],zero,zero,zero,xmm0[3,9,15,u,u,u,u,u,u,u,u]
1322 ; AVX512DQ-BW-NEXT: vpor %xmm1, %xmm0, %xmm0
1323 ; AVX512DQ-BW-NEXT: vmovq %xmm4, (%rsi)
1324 ; AVX512DQ-BW-NEXT: vmovq %xmm2, (%rdx)
1325 ; AVX512DQ-BW-NEXT: vmovq %xmm6, (%rcx)
1326 ; AVX512DQ-BW-NEXT: vmovq %xmm3, (%r8)
1327 ; AVX512DQ-BW-NEXT: vmovq %xmm5, (%r9)
1328 ; AVX512DQ-BW-NEXT: vmovq %xmm0, (%rax)
1329 ; AVX512DQ-BW-NEXT: vzeroupper
1330 ; AVX512DQ-BW-NEXT: retq
1332 ; AVX512DQ-BW-FCP-LABEL: load_i8_stride6_vf8:
1333 ; AVX512DQ-BW-FCP: # %bb.0:
1334 ; AVX512DQ-BW-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
1335 ; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdi), %ymm0
1336 ; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%rdi), %ymm1
1337 ; AVX512DQ-BW-FCP-NEXT: vpblendw {{.*#+}} ymm2 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7,8,9],ymm1[10],ymm0[11,12],ymm1[13],ymm0[14,15]
1338 ; AVX512DQ-BW-FCP-NEXT: vextracti128 $1, %ymm2, %xmm3
1339 ; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm4 = zero,zero,zero,xmm3[2,8,14],zero,zero,xmm3[u,u,u,u,u,u,u,u]
1340 ; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm5 = xmm2[0,6,12],zero,zero,zero,xmm2[4,10,u,u,u,u,u,u,u,u]
1341 ; AVX512DQ-BW-FCP-NEXT: vpor %xmm4, %xmm5, %xmm4
1342 ; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm3 = zero,zero,zero,xmm3[3,9,15],zero,zero,xmm3[u,u,u,u,u,u,u,u]
1343 ; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[1,7,13],zero,zero,zero,xmm2[5,11,u,u,u,u,u,u,u,u]
1344 ; AVX512DQ-BW-FCP-NEXT: vpor %xmm3, %xmm2, %xmm2
1345 ; AVX512DQ-BW-FCP-NEXT: vpblendw {{.*#+}} ymm3 = ymm1[0],ymm0[1,2],ymm1[3],ymm0[4,5],ymm1[6],ymm0[7],ymm1[8],ymm0[9,10],ymm1[11],ymm0[12,13],ymm1[14],ymm0[15]
1346 ; AVX512DQ-BW-FCP-NEXT: vextracti128 $1, %ymm3, %xmm5
1347 ; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm6 = zero,zero,zero,xmm5[4,10],zero,zero,zero,xmm5[u,u,u,u,u,u,u,u]
1348 ; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm3[2,8,14],zero,zero,xmm3[0,6,12,u,u,u,u,u,u,u,u]
1349 ; AVX512DQ-BW-FCP-NEXT: vpor %xmm6, %xmm7, %xmm6
1350 ; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm5 = zero,zero,zero,xmm5[5,11],zero,zero,zero,xmm5[u,u,u,u,u,u,u,u]
1351 ; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[3,9,15],zero,zero,xmm3[1,7,13,u,u,u,u,u,u,u,u]
1352 ; AVX512DQ-BW-FCP-NEXT: vpor %xmm5, %xmm3, %xmm3
1353 ; AVX512DQ-BW-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7],ymm0[8],ymm1[9],ymm0[10,11],ymm1[12],ymm0[13,14],ymm1[15]
1354 ; AVX512DQ-BW-FCP-NEXT: vextracti128 $1, %ymm0, %xmm1
1355 ; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm5 = zero,zero,xmm1[0,6,12],zero,zero,zero,xmm1[u,u,u,u,u,u,u,u]
1356 ; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm0[4,10],zero,zero,zero,xmm0[2,8,14,u,u,u,u,u,u,u,u]
1357 ; AVX512DQ-BW-FCP-NEXT: vpor %xmm5, %xmm7, %xmm5
1358 ; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm1 = zero,zero,xmm1[1,7,13],zero,zero,zero,xmm1[u,u,u,u,u,u,u,u]
1359 ; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[5,11],zero,zero,zero,xmm0[3,9,15,u,u,u,u,u,u,u,u]
1360 ; AVX512DQ-BW-FCP-NEXT: vpor %xmm1, %xmm0, %xmm0
1361 ; AVX512DQ-BW-FCP-NEXT: vmovq %xmm4, (%rsi)
1362 ; AVX512DQ-BW-FCP-NEXT: vmovq %xmm2, (%rdx)
1363 ; AVX512DQ-BW-FCP-NEXT: vmovq %xmm6, (%rcx)
1364 ; AVX512DQ-BW-FCP-NEXT: vmovq %xmm3, (%r8)
1365 ; AVX512DQ-BW-FCP-NEXT: vmovq %xmm5, (%r9)
1366 ; AVX512DQ-BW-FCP-NEXT: vmovq %xmm0, (%rax)
1367 ; AVX512DQ-BW-FCP-NEXT: vzeroupper
1368 ; AVX512DQ-BW-FCP-NEXT: retq
1369 %wide.vec = load <48 x i8>, ptr %in.vec, align 64
1370 %strided.vec0 = shufflevector <48 x i8> %wide.vec, <48 x i8> poison, <8 x i32> <i32 0, i32 6, i32 12, i32 18, i32 24, i32 30, i32 36, i32 42>
1371 %strided.vec1 = shufflevector <48 x i8> %wide.vec, <48 x i8> poison, <8 x i32> <i32 1, i32 7, i32 13, i32 19, i32 25, i32 31, i32 37, i32 43>
1372 %strided.vec2 = shufflevector <48 x i8> %wide.vec, <48 x i8> poison, <8 x i32> <i32 2, i32 8, i32 14, i32 20, i32 26, i32 32, i32 38, i32 44>
1373 %strided.vec3 = shufflevector <48 x i8> %wide.vec, <48 x i8> poison, <8 x i32> <i32 3, i32 9, i32 15, i32 21, i32 27, i32 33, i32 39, i32 45>
1374 %strided.vec4 = shufflevector <48 x i8> %wide.vec, <48 x i8> poison, <8 x i32> <i32 4, i32 10, i32 16, i32 22, i32 28, i32 34, i32 40, i32 46>
1375 %strided.vec5 = shufflevector <48 x i8> %wide.vec, <48 x i8> poison, <8 x i32> <i32 5, i32 11, i32 17, i32 23, i32 29, i32 35, i32 41, i32 47>
1376 store <8 x i8> %strided.vec0, ptr %out.vec0, align 64
1377 store <8 x i8> %strided.vec1, ptr %out.vec1, align 64
1378 store <8 x i8> %strided.vec2, ptr %out.vec2, align 64
1379 store <8 x i8> %strided.vec3, ptr %out.vec3, align 64
1380 store <8 x i8> %strided.vec4, ptr %out.vec4, align 64
1381 store <8 x i8> %strided.vec5, ptr %out.vec5, align 64
1385 define void @load_i8_stride6_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2, ptr %out.vec3, ptr %out.vec4, ptr %out.vec5) nounwind {
1386 ; SSE-LABEL: load_i8_stride6_vf16:
1388 ; SSE-NEXT: movdqa 64(%rdi), %xmm10
1389 ; SSE-NEXT: movdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1390 ; SSE-NEXT: movdqa (%rdi), %xmm5
1391 ; SSE-NEXT: movdqa 16(%rdi), %xmm1
1392 ; SSE-NEXT: movdqa 32(%rdi), %xmm7
1393 ; SSE-NEXT: movdqa 48(%rdi), %xmm6
1394 ; SSE-NEXT: movdqa {{.*#+}} xmm4 = [65535,65535,0,65535,65535,0,65535,65535]
1395 ; SSE-NEXT: movdqa %xmm4, %xmm0
1396 ; SSE-NEXT: pandn %xmm7, %xmm0
1397 ; SSE-NEXT: movdqa {{.*#+}} xmm2 = [65535,0,65535,65535,0,65535,65535,0]
1398 ; SSE-NEXT: movdqa %xmm2, %xmm3
1399 ; SSE-NEXT: pandn %xmm6, %xmm3
1400 ; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1401 ; SSE-NEXT: movdqa %xmm4, %xmm3
1402 ; SSE-NEXT: pandn %xmm6, %xmm3
1403 ; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1404 ; SSE-NEXT: pand %xmm4, %xmm6
1405 ; SSE-NEXT: por %xmm0, %xmm6
1406 ; SSE-NEXT: movdqa %xmm6, %xmm0
1407 ; SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
1408 ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm0[0,3,2,3,4,5,6,7]
1409 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,3,2,3]
1410 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,1]
1411 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6,5]
1412 ; SSE-NEXT: packuswb %xmm3, %xmm0
1413 ; SSE-NEXT: movdqa {{.*#+}} xmm8 = [65535,65535,65535,0,0,0,65535,65535]
1414 ; SSE-NEXT: movdqa %xmm8, %xmm9
1415 ; SSE-NEXT: pandn %xmm0, %xmm9
1416 ; SSE-NEXT: movdqa %xmm2, %xmm0
1417 ; SSE-NEXT: movdqa %xmm2, %xmm11
1418 ; SSE-NEXT: pandn %xmm1, %xmm11
1419 ; SSE-NEXT: pand %xmm4, %xmm10
1420 ; SSE-NEXT: movdqa %xmm4, %xmm2
1421 ; SSE-NEXT: pandn %xmm1, %xmm2
1422 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1423 ; SSE-NEXT: movdqa %xmm1, %xmm2
1424 ; SSE-NEXT: movdqa %xmm5, %xmm14
1425 ; SSE-NEXT: pand %xmm4, %xmm14
1426 ; SSE-NEXT: movdqa 80(%rdi), %xmm3
1427 ; SSE-NEXT: movdqa %xmm3, %xmm13
1428 ; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1429 ; SSE-NEXT: pand %xmm4, %xmm13
1430 ; SSE-NEXT: movdqa %xmm7, %xmm15
1431 ; SSE-NEXT: pand %xmm4, %xmm7
1432 ; SSE-NEXT: pand %xmm4, %xmm2
1433 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1434 ; SSE-NEXT: movdqa %xmm4, %xmm12
1435 ; SSE-NEXT: movdqa %xmm4, %xmm2
1436 ; SSE-NEXT: pandn %xmm5, %xmm4
1437 ; SSE-NEXT: pand %xmm0, %xmm5
1438 ; SSE-NEXT: por %xmm11, %xmm5
1439 ; SSE-NEXT: pshufd {{.*#+}} xmm11 = xmm5[0,2,1,3]
1440 ; SSE-NEXT: movdqa {{.*#+}} xmm1 = [255,255,255,255,255,255,255,255]
1441 ; SSE-NEXT: pand %xmm1, %xmm11
1442 ; SSE-NEXT: pshufhw {{.*#+}} xmm11 = xmm11[0,1,2,3,6,5,6,7]
1443 ; SSE-NEXT: pshufd {{.*#+}} xmm11 = xmm11[0,2,1,3]
1444 ; SSE-NEXT: pshuflw {{.*#+}} xmm11 = xmm11[0,3,2,1,4,5,6,7]
1445 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm11[0,1,2,3,4,7,6,7]
1446 ; SSE-NEXT: packuswb %xmm0, %xmm0
1447 ; SSE-NEXT: pand %xmm8, %xmm0
1448 ; SSE-NEXT: por %xmm9, %xmm0
1449 ; SSE-NEXT: pandn %xmm3, %xmm12
1450 ; SSE-NEXT: por %xmm12, %xmm10
1451 ; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm10[3,1,2,0]
1452 ; SSE-NEXT: pand %xmm1, %xmm9
1453 ; SSE-NEXT: movdqa %xmm1, %xmm3
1454 ; SSE-NEXT: pshuflw {{.*#+}} xmm9 = xmm9[2,1,2,3,4,5,6,7]
1455 ; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm9[0,3,2,0]
1456 ; SSE-NEXT: pshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,4,7,6,5]
1457 ; SSE-NEXT: packuswb %xmm9, %xmm9
1458 ; SSE-NEXT: movdqa {{.*#+}} xmm11 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0]
1459 ; SSE-NEXT: movdqa %xmm11, %xmm12
1460 ; SSE-NEXT: pandn %xmm9, %xmm12
1461 ; SSE-NEXT: pand %xmm11, %xmm0
1462 ; SSE-NEXT: por %xmm0, %xmm12
1463 ; SSE-NEXT: pxor %xmm9, %xmm9
1464 ; SSE-NEXT: movdqa %xmm6, %xmm0
1465 ; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm9[8],xmm0[9],xmm9[9],xmm0[10],xmm9[10],xmm0[11],xmm9[11],xmm0[12],xmm9[12],xmm0[13],xmm9[13],xmm0[14],xmm9[14],xmm0[15],xmm9[15]
1466 ; SSE-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm9[0],xmm6[1],xmm9[1],xmm6[2],xmm9[2],xmm6[3],xmm9[3],xmm6[4],xmm9[4],xmm6[5],xmm9[5],xmm6[6],xmm9[6],xmm6[7],xmm9[7]
1467 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm6[2,2,3,3]
1468 ; SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
1469 ; SSE-NEXT: psrld $16, %xmm0
1470 ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,1,0,3]
1471 ; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,5,7,6,7]
1472 ; SSE-NEXT: punpckhdq {{.*#+}} xmm6 = xmm6[2],xmm0[2],xmm6[3],xmm0[3]
1473 ; SSE-NEXT: packuswb %xmm6, %xmm1
1474 ; SSE-NEXT: movdqa %xmm5, %xmm0
1475 ; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm9[8],xmm0[9],xmm9[9],xmm0[10],xmm9[10],xmm0[11],xmm9[11],xmm0[12],xmm9[12],xmm0[13],xmm9[13],xmm0[14],xmm9[14],xmm0[15],xmm9[15]
1476 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,1,0,3]
1477 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,1,1,1,4,5,6,7]
1478 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,7,6,7]
1479 ; SSE-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm9[0],xmm5[1],xmm9[1],xmm5[2],xmm9[2],xmm5[3],xmm9[3],xmm5[4],xmm9[4],xmm5[5],xmm9[5],xmm5[6],xmm9[6],xmm5[7],xmm9[7]
1480 ; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[3,1,2,3,4,5,6,7]
1481 ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,3,2,3]
1482 ; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[1,3,2,0,4,5,6,7]
1483 ; SSE-NEXT: movdqa {{.*#+}} xmm6 = [65535,65535,0,65535,0,0,65535,65535]
1484 ; SSE-NEXT: pand %xmm6, %xmm5
1485 ; SSE-NEXT: pandn %xmm0, %xmm6
1486 ; SSE-NEXT: por %xmm5, %xmm6
1487 ; SSE-NEXT: packuswb %xmm6, %xmm6
1488 ; SSE-NEXT: pand %xmm8, %xmm6
1489 ; SSE-NEXT: pandn %xmm1, %xmm8
1490 ; SSE-NEXT: por %xmm8, %xmm6
1491 ; SSE-NEXT: movdqa %xmm10, %xmm0
1492 ; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm9[0],xmm0[1],xmm9[1],xmm0[2],xmm9[2],xmm0[3],xmm9[3],xmm0[4],xmm9[4],xmm0[5],xmm9[5],xmm0[6],xmm9[6],xmm0[7],xmm9[7]
1493 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,5,5,5]
1494 ; SSE-NEXT: punpckhbw {{.*#+}} xmm10 = xmm10[8],xmm9[8],xmm10[9],xmm9[9],xmm10[10],xmm9[10],xmm10[11],xmm9[11],xmm10[12],xmm9[12],xmm10[13],xmm9[13],xmm10[14],xmm9[14],xmm10[15],xmm9[15]
1495 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm10[3,1,2,3,4,5,6,7]
1496 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,3]
1497 ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,7,6,4]
1498 ; SSE-NEXT: movdqa {{.*#+}} xmm5 = [65535,65535,65535,0,65535,65535,0,65535]
1499 ; SSE-NEXT: pand %xmm5, %xmm1
1500 ; SSE-NEXT: pandn %xmm0, %xmm5
1501 ; SSE-NEXT: por %xmm1, %xmm5
1502 ; SSE-NEXT: packuswb %xmm5, %xmm0
1503 ; SSE-NEXT: movdqa %xmm11, %xmm10
1504 ; SSE-NEXT: pandn %xmm0, %xmm10
1505 ; SSE-NEXT: pand %xmm11, %xmm6
1506 ; SSE-NEXT: por %xmm6, %xmm10
1507 ; SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm15
1508 ; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Folded Reload
1509 ; SSE-NEXT: movdqa %xmm15, %xmm0
1510 ; SSE-NEXT: pand %xmm3, %xmm0
1511 ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm0[0,1,2,3,4,7,6,7]
1512 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
1513 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[1,2,3,3,4,5,6,7]
1514 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,3,2,3,4,5,6,7]
1515 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,3]
1516 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,5,6]
1517 ; SSE-NEXT: packuswb %xmm1, %xmm0
1518 ; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Folded Reload
1519 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm14[2,1,2,3,4,5,6,7]
1520 ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,4,7]
1521 ; SSE-NEXT: pand %xmm3, %xmm1
1522 ; SSE-NEXT: movdqa %xmm3, %xmm8
1523 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,3,2,3]
1524 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[1,2,3,0,4,5,6,7]
1525 ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,5,5,5]
1526 ; SSE-NEXT: packuswb %xmm1, %xmm1
1527 ; SSE-NEXT: movdqa {{.*#+}} xmm3 = [0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255]
1528 ; SSE-NEXT: movdqa %xmm3, %xmm5
1529 ; SSE-NEXT: pandn %xmm1, %xmm5
1530 ; SSE-NEXT: pand %xmm3, %xmm0
1531 ; SSE-NEXT: por %xmm0, %xmm5
1532 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
1533 ; SSE-NEXT: pandn %xmm6, %xmm2
1534 ; SSE-NEXT: por %xmm2, %xmm13
1535 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm13[0,3,2,3,4,5,6,7]
1536 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,6,7]
1537 ; SSE-NEXT: pand %xmm8, %xmm0
1538 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,0]
1539 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[2,2,2,2,4,5,6,7]
1540 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,6,7,4]
1541 ; SSE-NEXT: packuswb %xmm0, %xmm0
1542 ; SSE-NEXT: movdqa %xmm11, %xmm8
1543 ; SSE-NEXT: pandn %xmm0, %xmm8
1544 ; SSE-NEXT: pand %xmm11, %xmm5
1545 ; SSE-NEXT: por %xmm5, %xmm8
1546 ; SSE-NEXT: movdqa %xmm15, %xmm0
1547 ; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm9[0],xmm0[1],xmm9[1],xmm0[2],xmm9[2],xmm0[3],xmm9[3],xmm0[4],xmm9[4],xmm0[5],xmm9[5],xmm0[6],xmm9[6],xmm0[7],xmm9[7]
1548 ; SSE-NEXT: punpckhbw {{.*#+}} xmm15 = xmm15[8],xmm9[8],xmm15[9],xmm9[9],xmm15[10],xmm9[10],xmm15[11],xmm9[11],xmm15[12],xmm9[12],xmm15[13],xmm9[13],xmm15[14],xmm9[14],xmm15[15],xmm9[15]
1549 ; SSE-NEXT: movdqa %xmm15, %xmm1
1550 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[3,0]
1551 ; SSE-NEXT: movaps %xmm0, %xmm2
1552 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1],xmm1[0,2]
1553 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm15[0,0]
1554 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm15[2,3]
1555 ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm2[0,1,2,3,7,5,6,7]
1556 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,2]
1557 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,1,2,3,4,5,6,7]
1558 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,3,2,3]
1559 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,3,3,4,5,6,7]
1560 ; SSE-NEXT: packuswb %xmm0, %xmm1
1561 ; SSE-NEXT: movdqa %xmm14, %xmm0
1562 ; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm9[0],xmm0[1],xmm9[1],xmm0[2],xmm9[2],xmm0[3],xmm9[3],xmm0[4],xmm9[4],xmm0[5],xmm9[5],xmm0[6],xmm9[6],xmm0[7],xmm9[7]
1563 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,1,2,3]
1564 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,1,2,1,4,5,6,7]
1565 ; SSE-NEXT: punpckhbw {{.*#+}} xmm14 = xmm14[8],xmm9[8],xmm14[9],xmm9[9],xmm14[10],xmm9[10],xmm14[11],xmm9[11],xmm14[12],xmm9[12],xmm14[13],xmm9[13],xmm14[14],xmm9[14],xmm14[15],xmm9[15]
1566 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm14[0,3,2,1]
1567 ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,1,3,3,4,5,6,7]
1568 ; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,7,7,7]
1569 ; SSE-NEXT: movdqa {{.*#+}} xmm5 = [0,65535,65535,0,65535,65535,65535,65535]
1570 ; SSE-NEXT: pand %xmm5, %xmm2
1571 ; SSE-NEXT: pandn %xmm0, %xmm5
1572 ; SSE-NEXT: por %xmm2, %xmm5
1573 ; SSE-NEXT: pand %xmm3, %xmm1
1574 ; SSE-NEXT: packuswb %xmm5, %xmm5
1575 ; SSE-NEXT: pandn %xmm5, %xmm3
1576 ; SSE-NEXT: por %xmm1, %xmm3
1577 ; SSE-NEXT: movdqa %xmm13, %xmm0
1578 ; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm9[8],xmm0[9],xmm9[9],xmm0[10],xmm9[10],xmm0[11],xmm9[11],xmm0[12],xmm9[12],xmm0[13],xmm9[13],xmm0[14],xmm9[14],xmm0[15],xmm9[15]
1579 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,1]
1580 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,5,6,5]
1581 ; SSE-NEXT: punpcklbw {{.*#+}} xmm13 = xmm13[0],xmm9[0],xmm13[1],xmm9[1],xmm13[2],xmm9[2],xmm13[3],xmm9[3],xmm13[4],xmm9[4],xmm13[5],xmm9[5],xmm13[6],xmm9[6],xmm13[7],xmm9[7]
1582 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm13[0,2,0,3]
1583 ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,7,7]
1584 ; SSE-NEXT: movdqa {{.*#+}} xmm2 = [65535,65535,65535,65535,0,65535,65535,0]
1585 ; SSE-NEXT: pand %xmm2, %xmm1
1586 ; SSE-NEXT: pandn %xmm0, %xmm2
1587 ; SSE-NEXT: por %xmm1, %xmm2
1588 ; SSE-NEXT: pand %xmm11, %xmm3
1589 ; SSE-NEXT: packuswb %xmm2, %xmm0
1590 ; SSE-NEXT: pandn %xmm0, %xmm11
1591 ; SSE-NEXT: por %xmm3, %xmm11
1592 ; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload
1593 ; SSE-NEXT: movdqa %xmm7, %xmm0
1594 ; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
1595 ; SSE-NEXT: pand %xmm2, %xmm0
1596 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,1,2,3]
1597 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[2,1,2,3,4,5,6,7]
1598 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,7,6,7]
1599 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,2]
1600 ; SSE-NEXT: packuswb %xmm1, %xmm0
1601 ; SSE-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255,255,0,0,0,0,0,255,255,255,255,255,255]
1602 ; SSE-NEXT: movdqa %xmm3, %xmm1
1603 ; SSE-NEXT: pandn %xmm0, %xmm1
1604 ; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
1605 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm4[3,1,2,0]
1606 ; SSE-NEXT: pand %xmm2, %xmm0
1607 ; SSE-NEXT: movdqa %xmm2, %xmm5
1608 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,6,7]
1609 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,1,0,3]
1610 ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm0[2,1,0,3,4,5,6,7]
1611 ; SSE-NEXT: packuswb %xmm2, %xmm2
1612 ; SSE-NEXT: pand %xmm3, %xmm2
1613 ; SSE-NEXT: por %xmm1, %xmm2
1614 ; SSE-NEXT: movdqa {{.*#+}} xmm13 = [65535,0,65535,65535,0,65535,65535,0]
1615 ; SSE-NEXT: movdqa %xmm6, %xmm1
1616 ; SSE-NEXT: pand %xmm13, %xmm1
1617 ; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Folded Reload
1618 ; SSE-NEXT: movdqa {{.*#+}} xmm0 = [65535,65535,65535,65535,65535,0,0,0]
1619 ; SSE-NEXT: pand %xmm0, %xmm2
1620 ; SSE-NEXT: por %xmm1, %xmm13
1621 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm13[0,2,1,3]
1622 ; SSE-NEXT: pand %xmm5, %xmm1
1623 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,2,1,4,5,6,7]
1624 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,1,3]
1625 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,0,3,4,5,6,7]
1626 ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,5,4,7]
1627 ; SSE-NEXT: packuswb %xmm1, %xmm1
1628 ; SSE-NEXT: movdqa %xmm0, %xmm6
1629 ; SSE-NEXT: pandn %xmm1, %xmm6
1630 ; SSE-NEXT: por %xmm2, %xmm6
1631 ; SSE-NEXT: movdqa %xmm7, %xmm1
1632 ; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm9[8],xmm1[9],xmm9[9],xmm1[10],xmm9[10],xmm1[11],xmm9[11],xmm1[12],xmm9[12],xmm1[13],xmm9[13],xmm1[14],xmm9[14],xmm1[15],xmm9[15]
1633 ; SSE-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm9[0],xmm7[1],xmm9[1],xmm7[2],xmm9[2],xmm7[3],xmm9[3],xmm7[4],xmm9[4],xmm7[5],xmm9[5],xmm7[6],xmm9[6],xmm7[7],xmm9[7]
1634 ; SSE-NEXT: movdqa %xmm7, %xmm2
1635 ; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[1,0],xmm1[0,0]
1636 ; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[2,0],xmm1[2,3]
1637 ; SSE-NEXT: psrlq $48, %xmm1
1638 ; SSE-NEXT: psrldq {{.*#+}} xmm2 = xmm2[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
1639 ; SSE-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
1640 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm7[3,1,2,3,4,5,6,7]
1641 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,3]
1642 ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,4,5,7]
1643 ; SSE-NEXT: packuswb %xmm2, %xmm1
1644 ; SSE-NEXT: movdqa %xmm4, %xmm2
1645 ; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm9[8],xmm2[9],xmm9[9],xmm2[10],xmm9[10],xmm2[11],xmm9[11],xmm2[12],xmm9[12],xmm2[13],xmm9[13],xmm2[14],xmm9[14],xmm2[15],xmm9[15]
1646 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,2,3]
1647 ; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm2[0,1,2,3,5,5,5,5]
1648 ; SSE-NEXT: movdqa {{.*#+}} xmm2 = [65535,0,65535,65535,0,65535,65535,65535]
1649 ; SSE-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm9[0],xmm4[1],xmm9[1],xmm4[2],xmm9[2],xmm4[3],xmm9[3],xmm4[4],xmm9[4],xmm4[5],xmm9[5],xmm4[6],xmm9[6],xmm4[7],xmm9[7]
1650 ; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,7,5,6,7]
1651 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,2,2,3]
1652 ; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[3,1,1,2,4,5,6,7]
1653 ; SSE-NEXT: pand %xmm2, %xmm4
1654 ; SSE-NEXT: pandn %xmm5, %xmm2
1655 ; SSE-NEXT: por %xmm4, %xmm2
1656 ; SSE-NEXT: packuswb %xmm2, %xmm2
1657 ; SSE-NEXT: pand %xmm3, %xmm2
1658 ; SSE-NEXT: pandn %xmm1, %xmm3
1659 ; SSE-NEXT: por %xmm3, %xmm2
1660 ; SSE-NEXT: movdqa %xmm13, %xmm1
1661 ; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm9[0],xmm1[1],xmm9[1],xmm1[2],xmm9[2],xmm1[3],xmm9[3],xmm1[4],xmm9[4],xmm1[5],xmm9[5],xmm1[6],xmm9[6],xmm1[7],xmm9[7]
1662 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,3,1,1]
1663 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,1,3,4,5,6,7]
1664 ; SSE-NEXT: punpckhbw {{.*#+}} xmm13 = xmm13[8],xmm9[8],xmm13[9],xmm9[9],xmm13[10],xmm9[10],xmm13[11],xmm9[11],xmm13[12],xmm9[12],xmm13[13],xmm9[13],xmm13[14],xmm9[14],xmm13[15],xmm9[15]
1665 ; SSE-NEXT: movdqa {{.*#+}} xmm3 = [65535,65535,65535,65535,0,65535,0,0]
1666 ; SSE-NEXT: pand %xmm3, %xmm1
1667 ; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm13[0,1,2,3,7,5,6,7]
1668 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,1,2,0]
1669 ; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,5,5,7,4]
1670 ; SSE-NEXT: pandn %xmm4, %xmm3
1671 ; SSE-NEXT: por %xmm1, %xmm3
1672 ; SSE-NEXT: pand %xmm0, %xmm2
1673 ; SSE-NEXT: packuswb %xmm3, %xmm1
1674 ; SSE-NEXT: pandn %xmm1, %xmm0
1675 ; SSE-NEXT: por %xmm2, %xmm0
1676 ; SSE-NEXT: movdqa %xmm12, (%rsi)
1677 ; SSE-NEXT: movdqa %xmm10, (%rdx)
1678 ; SSE-NEXT: movdqa %xmm8, (%rcx)
1679 ; SSE-NEXT: movdqa %xmm11, (%r8)
1680 ; SSE-NEXT: movdqa %xmm6, (%r9)
1681 ; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax
1682 ; SSE-NEXT: movdqa %xmm0, (%rax)
1685 ; AVX-LABEL: load_i8_stride6_vf16:
1687 ; AVX-NEXT: movq {{[0-9]+}}(%rsp), %rax
1688 ; AVX-NEXT: vmovdqa (%rdi), %xmm1
1689 ; AVX-NEXT: vmovdqa 16(%rdi), %xmm2
1690 ; AVX-NEXT: vmovdqa 32(%rdi), %xmm0
1691 ; AVX-NEXT: vmovdqa 48(%rdi), %xmm3
1692 ; AVX-NEXT: vpshufb {{.*#+}} xmm4 = xmm0[u,u,4,10,u,u,u,u,u,u,u,u,u,u,u,u]
1693 ; AVX-NEXT: vpshufb {{.*#+}} xmm5 = xmm3[u,u,u,u,0,6,12,u,u,u,u,u,u,u,u,u]
1694 ; AVX-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm5[0],xmm4[0],xmm5[1],xmm4[1]
1695 ; AVX-NEXT: vpshufb {{.*#+}} xmm5 = zero,zero,zero,xmm2[2,8,14,u,u,u,u,u,u,u,u,u,u]
1696 ; AVX-NEXT: vpshufb {{.*#+}} xmm6 = xmm1[0,6,12],zero,zero,zero,xmm1[u,u,u,u,u,u,u,u,u,u]
1697 ; AVX-NEXT: vpor %xmm5, %xmm6, %xmm5
1698 ; AVX-NEXT: vpblendw {{.*#+}} xmm6 = xmm5[0,1,2],xmm4[3,4,5],xmm5[6,7]
1699 ; AVX-NEXT: vmovdqa 80(%rdi), %xmm4
1700 ; AVX-NEXT: vpshufb {{.*#+}} xmm7 = xmm4[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm4[4,10]
1701 ; AVX-NEXT: vmovdqa 64(%rdi), %xmm5
1702 ; AVX-NEXT: vpshufb {{.*#+}} xmm8 = xmm5[u,u,u,u,u,u,u,u,u,u,u,2,8,14],zero,zero
1703 ; AVX-NEXT: vpor %xmm7, %xmm8, %xmm7
1704 ; AVX-NEXT: vpmovsxdq {{.*#+}} xmm9 = [18446744073709551615,16777215]
1705 ; AVX-NEXT: vpblendvb %xmm9, %xmm6, %xmm7, %xmm6
1706 ; AVX-NEXT: vpshufb {{.*#+}} xmm7 = xmm0[u,u,5,11,u,u,u,u,u,u,u,u,u,u,u,u]
1707 ; AVX-NEXT: vpshufb {{.*#+}} xmm8 = xmm3[u,u,u,u,1,7,13,u,u,u,u,u,u,u,u,u]
1708 ; AVX-NEXT: vpunpckldq {{.*#+}} xmm7 = xmm8[0],xmm7[0],xmm8[1],xmm7[1]
1709 ; AVX-NEXT: vpshufb {{.*#+}} xmm8 = zero,zero,zero,xmm2[3,9,15,u,u,u,u,u,u,u,u,u,u]
1710 ; AVX-NEXT: vpshufb {{.*#+}} xmm10 = xmm1[1,7,13],zero,zero,zero,xmm1[u,u,u,u,u,u,u,u,u,u]
1711 ; AVX-NEXT: vpor %xmm8, %xmm10, %xmm8
1712 ; AVX-NEXT: vpblendw {{.*#+}} xmm7 = xmm8[0,1,2],xmm7[3,4,5],xmm8[6,7]
1713 ; AVX-NEXT: vpshufb {{.*#+}} xmm8 = xmm4[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm4[5,11]
1714 ; AVX-NEXT: vpshufb {{.*#+}} xmm10 = xmm5[u,u,u,u,u,u,u,u,u,u,u,3,9,15],zero,zero
1715 ; AVX-NEXT: vpor %xmm8, %xmm10, %xmm8
1716 ; AVX-NEXT: vpblendvb %xmm9, %xmm7, %xmm8, %xmm7
1717 ; AVX-NEXT: vpshufb {{.*#+}} xmm8 = xmm3[2,8,14,u,u,u,u,u,u,u,u,u,u,u,u,u]
1718 ; AVX-NEXT: vpshufb {{.*#+}} xmm10 = xmm0[u,u,u,u,u,0,6,12,u,u,u,u,u,u,u,u]
1719 ; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm8 = xmm10[0],xmm8[0]
1720 ; AVX-NEXT: vpshufb {{.*#+}} xmm10 = zero,zero,zero,xmm2[4,10,u,u,u,u,u,u,u,u,u,u,u]
1721 ; AVX-NEXT: vpshufb {{.*#+}} xmm11 = xmm1[2,8,14],zero,zero,xmm1[u,u,u,u,u,u,u,u,u,u,u]
1722 ; AVX-NEXT: vpor %xmm10, %xmm11, %xmm10
1723 ; AVX-NEXT: vmovdqa {{.*#+}} xmm11 = [0,0,0,0,0,255,255,255,255,255,255,u,u,u,u,u]
1724 ; AVX-NEXT: vpblendvb %xmm11, %xmm8, %xmm10, %xmm8
1725 ; AVX-NEXT: vpshufb {{.*#+}} xmm10 = xmm5[u,u,u,u,u,u,u,u,u,u,u,4,10],zero,zero,zero
1726 ; AVX-NEXT: vpshufb {{.*#+}} xmm12 = xmm4[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm4[0,6,12]
1727 ; AVX-NEXT: vpor %xmm10, %xmm12, %xmm10
1728 ; AVX-NEXT: vpblendvb %xmm9, %xmm8, %xmm10, %xmm8
1729 ; AVX-NEXT: vpshufb {{.*#+}} xmm10 = xmm3[3,9,15,u,u,u,u,u,u,u,u,u,u,u,u,u]
1730 ; AVX-NEXT: vpshufb {{.*#+}} xmm12 = xmm0[u,u,u,u,u,1,7,13,u,u,u,u,u,u,u,u]
1731 ; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm10 = xmm12[0],xmm10[0]
1732 ; AVX-NEXT: vpshufb {{.*#+}} xmm12 = zero,zero,zero,xmm2[5,11,u,u,u,u,u,u,u,u,u,u,u]
1733 ; AVX-NEXT: vpshufb {{.*#+}} xmm13 = xmm1[3,9,15],zero,zero,xmm1[u,u,u,u,u,u,u,u,u,u,u]
1734 ; AVX-NEXT: vpor %xmm12, %xmm13, %xmm12
1735 ; AVX-NEXT: vpblendvb %xmm11, %xmm10, %xmm12, %xmm10
1736 ; AVX-NEXT: vpshufb {{.*#+}} xmm11 = xmm5[u,u,u,u,u,u,u,u,u,u,u,5,11],zero,zero,zero
1737 ; AVX-NEXT: vpshufb {{.*#+}} xmm12 = xmm4[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm4[1,7,13]
1738 ; AVX-NEXT: vpor %xmm11, %xmm12, %xmm11
1739 ; AVX-NEXT: vpblendvb %xmm9, %xmm10, %xmm11, %xmm9
1740 ; AVX-NEXT: vpshufb {{.*#+}} xmm10 = xmm1[4,10],zero,zero,zero,xmm1[u,u,u,u,u,u,u,u,u,u,u]
1741 ; AVX-NEXT: vpshufb {{.*#+}} xmm11 = zero,zero,xmm2[0,6,12,u,u,u,u,u,u,u,u,u,u,u]
1742 ; AVX-NEXT: vpor %xmm10, %xmm11, %xmm10
1743 ; AVX-NEXT: vpshufb {{.*#+}} xmm11 = xmm3[u,u,u,u,u,u,u,u,4,10,u,u,u,u,u,u]
1744 ; AVX-NEXT: vpshufb {{.*#+}} xmm12 = xmm0[u,u,u,u,u,u,u,u,u,u,u,u,u,2,8,14]
1745 ; AVX-NEXT: vpunpckhqdq {{.*#+}} xmm11 = xmm12[1],xmm11[1]
1746 ; AVX-NEXT: vmovq {{.*#+}} xmm12 = [255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0]
1747 ; AVX-NEXT: vpblendvb %xmm12, %xmm10, %xmm11, %xmm10
1748 ; AVX-NEXT: vpshufb {{.*#+}} xmm11 = xmm4[u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm4[2,8,14]
1749 ; AVX-NEXT: vpshufb {{.*#+}} xmm13 = xmm5[u,u,u,u,u,u,u,u,u,u,0,6,12],zero,zero,zero
1750 ; AVX-NEXT: vpor %xmm11, %xmm13, %xmm11
1751 ; AVX-NEXT: vpblendw {{.*#+}} xmm10 = xmm10[0,1,2,3,4],xmm11[5,6,7]
1752 ; AVX-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[5,11],zero,zero,zero,xmm1[u,u,u,u,u,u,u,u,u,u,u]
1753 ; AVX-NEXT: vpshufb {{.*#+}} xmm2 = zero,zero,xmm2[1,7,13,u,u,u,u,u,u,u,u,u,u,u]
1754 ; AVX-NEXT: vpor %xmm1, %xmm2, %xmm1
1755 ; AVX-NEXT: vpshufb {{.*#+}} xmm2 = xmm3[u,u,u,u,u,u,u,u,5,11,u,u,u,u,u,u]
1756 ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,u,u,u,u,u,u,u,3,9,15]
1757 ; AVX-NEXT: vpunpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm2[1]
1758 ; AVX-NEXT: vpblendvb %xmm12, %xmm1, %xmm0, %xmm0
1759 ; AVX-NEXT: vpshufb {{.*#+}} xmm1 = xmm4[u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm4[3,9,15]
1760 ; AVX-NEXT: vpshufb {{.*#+}} xmm2 = xmm5[u,u,u,u,u,u,u,u,u,u,1,7,13],zero,zero,zero
1761 ; AVX-NEXT: vpor %xmm1, %xmm2, %xmm1
1762 ; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm1[5,6,7]
1763 ; AVX-NEXT: vmovdqa %xmm6, (%rsi)
1764 ; AVX-NEXT: vmovdqa %xmm7, (%rdx)
1765 ; AVX-NEXT: vmovdqa %xmm8, (%rcx)
1766 ; AVX-NEXT: vmovdqa %xmm9, (%r8)
1767 ; AVX-NEXT: vmovdqa %xmm10, (%r9)
1768 ; AVX-NEXT: vmovdqa %xmm0, (%rax)
1771 ; AVX2-LABEL: load_i8_stride6_vf16:
1773 ; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax
1774 ; AVX2-NEXT: vmovdqa (%rdi), %ymm3
1775 ; AVX2-NEXT: vmovdqa 32(%rdi), %ymm4
1776 ; AVX2-NEXT: vpmovsxbw {{.*#+}} ymm0 = [65535,0,0,65535,0,0,65535,0,0,65535,0,0,65535,0,0,65535]
1777 ; AVX2-NEXT: vpblendvb %ymm0, %ymm3, %ymm4, %ymm5
1778 ; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm5[0,6,12],zero,zero,zero,xmm5[4,10],zero,zero,zero,xmm5[u,u,u,u,u]
1779 ; AVX2-NEXT: vextracti128 $1, %ymm5, %xmm6
1780 ; AVX2-NEXT: vpshufb {{.*#+}} xmm1 = zero,zero,zero,xmm6[2,8,14],zero,zero,xmm6[0,6,12,u,u,u,u,u]
1781 ; AVX2-NEXT: vpor %xmm0, %xmm1, %xmm2
1782 ; AVX2-NEXT: vmovdqa 80(%rdi), %xmm0
1783 ; AVX2-NEXT: vpshufb {{.*#+}} xmm7 = xmm0[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm0[4,10]
1784 ; AVX2-NEXT: vmovdqa 64(%rdi), %xmm1
1785 ; AVX2-NEXT: vpshufb {{.*#+}} xmm8 = xmm1[u,u,u,u,u,u,u,u,u,u,u,2,8,14],zero,zero
1786 ; AVX2-NEXT: vpor %xmm7, %xmm8, %xmm7
1787 ; AVX2-NEXT: vpmovsxdq {{.*#+}} xmm8 = [18446744073709551615,16777215]
1788 ; AVX2-NEXT: vpblendvb %xmm8, %xmm2, %xmm7, %xmm2
1789 ; AVX2-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[1,7,13],zero,zero,zero,xmm5[5,11],zero,zero,zero,xmm5[u,u,u,u,u]
1790 ; AVX2-NEXT: vpshufb {{.*#+}} xmm6 = zero,zero,zero,xmm6[3,9,15],zero,zero,xmm6[1,7,13,u,u,u,u,u]
1791 ; AVX2-NEXT: vpor %xmm5, %xmm6, %xmm5
1792 ; AVX2-NEXT: vpshufb {{.*#+}} xmm6 = xmm0[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm0[5,11]
1793 ; AVX2-NEXT: vpshufb {{.*#+}} xmm7 = xmm1[u,u,u,u,u,u,u,u,u,u,u,3,9,15],zero,zero
1794 ; AVX2-NEXT: vpor %xmm6, %xmm7, %xmm6
1795 ; AVX2-NEXT: vpblendvb %xmm8, %xmm5, %xmm6, %xmm5
1796 ; AVX2-NEXT: vpmovsxbw {{.*#+}} ymm6 = [65535,0,0,65535,0,0,65535,0,0,65535,0,0,65535,0,0,65535]
1797 ; AVX2-NEXT: vpblendvb %ymm6, %ymm4, %ymm3, %ymm6
1798 ; AVX2-NEXT: vextracti128 $1, %ymm6, %xmm7
1799 ; AVX2-NEXT: vpshufb {{.*#+}} xmm9 = zero,zero,zero,xmm7[4,10],zero,zero,zero,xmm7[2,8,14,u,u,u,u,u]
1800 ; AVX2-NEXT: vpshufb {{.*#+}} xmm10 = xmm6[2,8,14],zero,zero,xmm6[0,6,12],zero,zero,zero,xmm6[u,u,u,u,u]
1801 ; AVX2-NEXT: vpor %xmm9, %xmm10, %xmm9
1802 ; AVX2-NEXT: vpshufb {{.*#+}} xmm10 = xmm1[u,u,u,u,u,u,u,u,u,u,u,4,10],zero,zero,zero
1803 ; AVX2-NEXT: vpshufb {{.*#+}} xmm11 = xmm0[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm0[0,6,12]
1804 ; AVX2-NEXT: vpor %xmm10, %xmm11, %xmm10
1805 ; AVX2-NEXT: vpblendvb %xmm8, %xmm9, %xmm10, %xmm9
1806 ; AVX2-NEXT: vpshufb {{.*#+}} xmm7 = zero,zero,zero,xmm7[5,11],zero,zero,zero,xmm7[3,9,15,u,u,u,u,u]
1807 ; AVX2-NEXT: vpshufb {{.*#+}} xmm6 = xmm6[3,9,15],zero,zero,xmm6[1,7,13],zero,zero,zero,xmm6[u,u,u,u,u]
1808 ; AVX2-NEXT: vpor %xmm7, %xmm6, %xmm6
1809 ; AVX2-NEXT: vpshufb {{.*#+}} xmm7 = xmm1[u,u,u,u,u,u,u,u,u,u,u,5,11],zero,zero,zero
1810 ; AVX2-NEXT: vpshufb {{.*#+}} xmm10 = xmm0[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm0[1,7,13]
1811 ; AVX2-NEXT: vpor %xmm7, %xmm10, %xmm7
1812 ; AVX2-NEXT: vpblendvb %xmm8, %xmm6, %xmm7, %xmm6
1813 ; AVX2-NEXT: vpmovsxbw {{.*#+}} ymm7 = [0,65535,0,0,65535,0,0,65535,0,0,65535,0,0,65535,0,0]
1814 ; AVX2-NEXT: vpblendvb %ymm7, %ymm4, %ymm3, %ymm3
1815 ; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm4
1816 ; AVX2-NEXT: vpshufb {{.*#+}} xmm7 = zero,zero,xmm4[0,6,12],zero,zero,zero,xmm4[4,10,u,u,u,u,u,u]
1817 ; AVX2-NEXT: vpshufb {{.*#+}} xmm8 = xmm3[4,10],zero,zero,zero,xmm3[2,8,14],zero,zero,xmm3[u,u,u,u,u,u]
1818 ; AVX2-NEXT: vpor %xmm7, %xmm8, %xmm7
1819 ; AVX2-NEXT: vpshufb {{.*#+}} xmm8 = xmm0[u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm0[2,8,14]
1820 ; AVX2-NEXT: vpshufb {{.*#+}} xmm10 = xmm1[u,u,u,u,u,u,u,u,u,u,0,6,12],zero,zero,zero
1821 ; AVX2-NEXT: vpor %xmm8, %xmm10, %xmm8
1822 ; AVX2-NEXT: vpblendw {{.*#+}} xmm7 = xmm7[0,1,2,3,4],xmm8[5,6,7]
1823 ; AVX2-NEXT: vpshufb {{.*#+}} xmm4 = zero,zero,xmm4[1,7,13],zero,zero,zero,xmm4[5,11,u,u,u,u,u,u]
1824 ; AVX2-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[5,11],zero,zero,zero,xmm3[3,9,15],zero,zero,xmm3[u,u,u,u,u,u]
1825 ; AVX2-NEXT: vpor %xmm4, %xmm3, %xmm3
1826 ; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm0[3,9,15]
1827 ; AVX2-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[u,u,u,u,u,u,u,u,u,u,1,7,13],zero,zero,zero
1828 ; AVX2-NEXT: vpor %xmm0, %xmm1, %xmm0
1829 ; AVX2-NEXT: vpblendw {{.*#+}} xmm0 = xmm3[0,1,2,3,4],xmm0[5,6,7]
1830 ; AVX2-NEXT: vmovdqa %xmm2, (%rsi)
1831 ; AVX2-NEXT: vmovdqa %xmm5, (%rdx)
1832 ; AVX2-NEXT: vmovdqa %xmm9, (%rcx)
1833 ; AVX2-NEXT: vmovdqa %xmm6, (%r8)
1834 ; AVX2-NEXT: vmovdqa %xmm7, (%r9)
1835 ; AVX2-NEXT: vmovdqa %xmm0, (%rax)
1836 ; AVX2-NEXT: vzeroupper
1839 ; AVX2-FP-LABEL: load_i8_stride6_vf16:
1841 ; AVX2-FP-NEXT: movq {{[0-9]+}}(%rsp), %rax
1842 ; AVX2-FP-NEXT: vmovdqa (%rdi), %ymm3
1843 ; AVX2-FP-NEXT: vmovdqa 32(%rdi), %ymm4
1844 ; AVX2-FP-NEXT: vpmovsxbw {{.*#+}} ymm0 = [65535,0,0,65535,0,0,65535,0,0,65535,0,0,65535,0,0,65535]
1845 ; AVX2-FP-NEXT: vpblendvb %ymm0, %ymm3, %ymm4, %ymm5
1846 ; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm0 = xmm5[0,6,12],zero,zero,zero,xmm5[4,10],zero,zero,zero,xmm5[u,u,u,u,u]
1847 ; AVX2-FP-NEXT: vextracti128 $1, %ymm5, %xmm6
1848 ; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm1 = zero,zero,zero,xmm6[2,8,14],zero,zero,xmm6[0,6,12,u,u,u,u,u]
1849 ; AVX2-FP-NEXT: vpor %xmm0, %xmm1, %xmm2
1850 ; AVX2-FP-NEXT: vmovdqa 80(%rdi), %xmm0
1851 ; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm7 = xmm0[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm0[4,10]
1852 ; AVX2-FP-NEXT: vmovdqa 64(%rdi), %xmm1
1853 ; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm8 = xmm1[u,u,u,u,u,u,u,u,u,u,u,2,8,14],zero,zero
1854 ; AVX2-FP-NEXT: vpor %xmm7, %xmm8, %xmm7
1855 ; AVX2-FP-NEXT: vpmovsxdq {{.*#+}} xmm8 = [18446744073709551615,16777215]
1856 ; AVX2-FP-NEXT: vpblendvb %xmm8, %xmm2, %xmm7, %xmm2
1857 ; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[1,7,13],zero,zero,zero,xmm5[5,11],zero,zero,zero,xmm5[u,u,u,u,u]
1858 ; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm6 = zero,zero,zero,xmm6[3,9,15],zero,zero,xmm6[1,7,13,u,u,u,u,u]
1859 ; AVX2-FP-NEXT: vpor %xmm5, %xmm6, %xmm5
1860 ; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm6 = xmm0[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm0[5,11]
1861 ; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm7 = xmm1[u,u,u,u,u,u,u,u,u,u,u,3,9,15],zero,zero
1862 ; AVX2-FP-NEXT: vpor %xmm6, %xmm7, %xmm6
1863 ; AVX2-FP-NEXT: vpblendvb %xmm8, %xmm5, %xmm6, %xmm5
1864 ; AVX2-FP-NEXT: vpmovsxbw {{.*#+}} ymm6 = [65535,0,0,65535,0,0,65535,0,0,65535,0,0,65535,0,0,65535]
1865 ; AVX2-FP-NEXT: vpblendvb %ymm6, %ymm4, %ymm3, %ymm6
1866 ; AVX2-FP-NEXT: vextracti128 $1, %ymm6, %xmm7
1867 ; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm9 = zero,zero,zero,xmm7[4,10],zero,zero,zero,xmm7[2,8,14,u,u,u,u,u]
1868 ; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm10 = xmm6[2,8,14],zero,zero,xmm6[0,6,12],zero,zero,zero,xmm6[u,u,u,u,u]
1869 ; AVX2-FP-NEXT: vpor %xmm9, %xmm10, %xmm9
1870 ; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm10 = xmm1[u,u,u,u,u,u,u,u,u,u,u,4,10],zero,zero,zero
1871 ; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm11 = xmm0[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm0[0,6,12]
1872 ; AVX2-FP-NEXT: vpor %xmm10, %xmm11, %xmm10
1873 ; AVX2-FP-NEXT: vpblendvb %xmm8, %xmm9, %xmm10, %xmm9
1874 ; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm7 = zero,zero,zero,xmm7[5,11],zero,zero,zero,xmm7[3,9,15,u,u,u,u,u]
1875 ; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm6 = xmm6[3,9,15],zero,zero,xmm6[1,7,13],zero,zero,zero,xmm6[u,u,u,u,u]
1876 ; AVX2-FP-NEXT: vpor %xmm7, %xmm6, %xmm6
1877 ; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm7 = xmm1[u,u,u,u,u,u,u,u,u,u,u,5,11],zero,zero,zero
1878 ; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm10 = xmm0[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm0[1,7,13]
1879 ; AVX2-FP-NEXT: vpor %xmm7, %xmm10, %xmm7
1880 ; AVX2-FP-NEXT: vpblendvb %xmm8, %xmm6, %xmm7, %xmm6
1881 ; AVX2-FP-NEXT: vpmovsxbw {{.*#+}} ymm7 = [0,65535,0,0,65535,0,0,65535,0,0,65535,0,0,65535,0,0]
1882 ; AVX2-FP-NEXT: vpblendvb %ymm7, %ymm4, %ymm3, %ymm3
1883 ; AVX2-FP-NEXT: vextracti128 $1, %ymm3, %xmm4
1884 ; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm7 = zero,zero,xmm4[0,6,12],zero,zero,zero,xmm4[4,10,u,u,u,u,u,u]
1885 ; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm8 = xmm3[4,10],zero,zero,zero,xmm3[2,8,14],zero,zero,xmm3[u,u,u,u,u,u]
1886 ; AVX2-FP-NEXT: vpor %xmm7, %xmm8, %xmm7
1887 ; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm8 = xmm0[u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm0[2,8,14]
1888 ; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm10 = xmm1[u,u,u,u,u,u,u,u,u,u,0,6,12],zero,zero,zero
1889 ; AVX2-FP-NEXT: vpor %xmm8, %xmm10, %xmm8
1890 ; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm7 = xmm7[0,1,2,3,4],xmm8[5,6,7]
1891 ; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm4 = zero,zero,xmm4[1,7,13],zero,zero,zero,xmm4[5,11,u,u,u,u,u,u]
1892 ; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[5,11],zero,zero,zero,xmm3[3,9,15],zero,zero,xmm3[u,u,u,u,u,u]
1893 ; AVX2-FP-NEXT: vpor %xmm4, %xmm3, %xmm3
1894 ; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm0[3,9,15]
1895 ; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[u,u,u,u,u,u,u,u,u,u,1,7,13],zero,zero,zero
1896 ; AVX2-FP-NEXT: vpor %xmm0, %xmm1, %xmm0
1897 ; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm0 = xmm3[0,1,2,3,4],xmm0[5,6,7]
1898 ; AVX2-FP-NEXT: vmovdqa %xmm2, (%rsi)
1899 ; AVX2-FP-NEXT: vmovdqa %xmm5, (%rdx)
1900 ; AVX2-FP-NEXT: vmovdqa %xmm9, (%rcx)
1901 ; AVX2-FP-NEXT: vmovdqa %xmm6, (%r8)
1902 ; AVX2-FP-NEXT: vmovdqa %xmm7, (%r9)
1903 ; AVX2-FP-NEXT: vmovdqa %xmm0, (%rax)
1904 ; AVX2-FP-NEXT: vzeroupper
1905 ; AVX2-FP-NEXT: retq
1907 ; AVX2-FCP-LABEL: load_i8_stride6_vf16:
1908 ; AVX2-FCP: # %bb.0:
1909 ; AVX2-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
1910 ; AVX2-FCP-NEXT: vmovdqa (%rdi), %ymm3
1911 ; AVX2-FCP-NEXT: vmovdqa 32(%rdi), %ymm4
1912 ; AVX2-FCP-NEXT: vpmovsxbw {{.*#+}} ymm0 = [65535,0,0,65535,0,0,65535,0,0,65535,0,0,65535,0,0,65535]
1913 ; AVX2-FCP-NEXT: vpblendvb %ymm0, %ymm3, %ymm4, %ymm5
1914 ; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm0 = xmm5[0,6,12],zero,zero,zero,xmm5[4,10],zero,zero,zero,xmm5[u,u,u,u,u]
1915 ; AVX2-FCP-NEXT: vextracti128 $1, %ymm5, %xmm6
1916 ; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm1 = zero,zero,zero,xmm6[2,8,14],zero,zero,xmm6[0,6,12,u,u,u,u,u]
1917 ; AVX2-FCP-NEXT: vpor %xmm0, %xmm1, %xmm2
1918 ; AVX2-FCP-NEXT: vmovdqa 80(%rdi), %xmm0
1919 ; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm0[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm0[4,10]
1920 ; AVX2-FCP-NEXT: vmovdqa 64(%rdi), %xmm1
1921 ; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm8 = xmm1[u,u,u,u,u,u,u,u,u,u,u,2,8,14],zero,zero
1922 ; AVX2-FCP-NEXT: vpor %xmm7, %xmm8, %xmm7
1923 ; AVX2-FCP-NEXT: vpmovsxdq {{.*#+}} xmm8 = [18446744073709551615,16777215]
1924 ; AVX2-FCP-NEXT: vpblendvb %xmm8, %xmm2, %xmm7, %xmm2
1925 ; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[1,7,13],zero,zero,zero,xmm5[5,11],zero,zero,zero,xmm5[u,u,u,u,u]
1926 ; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm6 = zero,zero,zero,xmm6[3,9,15],zero,zero,xmm6[1,7,13,u,u,u,u,u]
1927 ; AVX2-FCP-NEXT: vpor %xmm5, %xmm6, %xmm5
1928 ; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm6 = xmm0[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm0[5,11]
1929 ; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm1[u,u,u,u,u,u,u,u,u,u,u,3,9,15],zero,zero
1930 ; AVX2-FCP-NEXT: vpor %xmm6, %xmm7, %xmm6
1931 ; AVX2-FCP-NEXT: vpblendvb %xmm8, %xmm5, %xmm6, %xmm5
1932 ; AVX2-FCP-NEXT: vpmovsxbw {{.*#+}} ymm6 = [65535,0,0,65535,0,0,65535,0,0,65535,0,0,65535,0,0,65535]
1933 ; AVX2-FCP-NEXT: vpblendvb %ymm6, %ymm4, %ymm3, %ymm6
1934 ; AVX2-FCP-NEXT: vextracti128 $1, %ymm6, %xmm7
1935 ; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm9 = zero,zero,zero,xmm7[4,10],zero,zero,zero,xmm7[2,8,14,u,u,u,u,u]
1936 ; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm10 = xmm6[2,8,14],zero,zero,xmm6[0,6,12],zero,zero,zero,xmm6[u,u,u,u,u]
1937 ; AVX2-FCP-NEXT: vpor %xmm9, %xmm10, %xmm9
1938 ; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm10 = xmm1[u,u,u,u,u,u,u,u,u,u,u,4,10],zero,zero,zero
1939 ; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm11 = xmm0[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm0[0,6,12]
1940 ; AVX2-FCP-NEXT: vpor %xmm10, %xmm11, %xmm10
1941 ; AVX2-FCP-NEXT: vpblendvb %xmm8, %xmm9, %xmm10, %xmm9
1942 ; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm7 = zero,zero,zero,xmm7[5,11],zero,zero,zero,xmm7[3,9,15,u,u,u,u,u]
1943 ; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm6 = xmm6[3,9,15],zero,zero,xmm6[1,7,13],zero,zero,zero,xmm6[u,u,u,u,u]
1944 ; AVX2-FCP-NEXT: vpor %xmm7, %xmm6, %xmm6
1945 ; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm1[u,u,u,u,u,u,u,u,u,u,u,5,11],zero,zero,zero
1946 ; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm10 = xmm0[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm0[1,7,13]
1947 ; AVX2-FCP-NEXT: vpor %xmm7, %xmm10, %xmm7
1948 ; AVX2-FCP-NEXT: vpblendvb %xmm8, %xmm6, %xmm7, %xmm6
1949 ; AVX2-FCP-NEXT: vpmovsxbw {{.*#+}} ymm7 = [0,65535,0,0,65535,0,0,65535,0,0,65535,0,0,65535,0,0]
1950 ; AVX2-FCP-NEXT: vpblendvb %ymm7, %ymm4, %ymm3, %ymm3
1951 ; AVX2-FCP-NEXT: vextracti128 $1, %ymm3, %xmm4
1952 ; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm7 = zero,zero,xmm4[0,6,12],zero,zero,zero,xmm4[4,10,u,u,u,u,u,u]
1953 ; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm8 = xmm3[4,10],zero,zero,zero,xmm3[2,8,14],zero,zero,xmm3[u,u,u,u,u,u]
1954 ; AVX2-FCP-NEXT: vpor %xmm7, %xmm8, %xmm7
1955 ; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm8 = xmm0[u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm0[2,8,14]
1956 ; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm10 = xmm1[u,u,u,u,u,u,u,u,u,u,0,6,12],zero,zero,zero
1957 ; AVX2-FCP-NEXT: vpor %xmm8, %xmm10, %xmm8
1958 ; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm7 = xmm7[0,1,2,3,4],xmm8[5,6,7]
1959 ; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm4 = zero,zero,xmm4[1,7,13],zero,zero,zero,xmm4[5,11,u,u,u,u,u,u]
1960 ; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[5,11],zero,zero,zero,xmm3[3,9,15],zero,zero,xmm3[u,u,u,u,u,u]
1961 ; AVX2-FCP-NEXT: vpor %xmm4, %xmm3, %xmm3
1962 ; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm0[3,9,15]
1963 ; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[u,u,u,u,u,u,u,u,u,u,1,7,13],zero,zero,zero
1964 ; AVX2-FCP-NEXT: vpor %xmm0, %xmm1, %xmm0
1965 ; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm3[0,1,2,3,4],xmm0[5,6,7]
1966 ; AVX2-FCP-NEXT: vmovdqa %xmm2, (%rsi)
1967 ; AVX2-FCP-NEXT: vmovdqa %xmm5, (%rdx)
1968 ; AVX2-FCP-NEXT: vmovdqa %xmm9, (%rcx)
1969 ; AVX2-FCP-NEXT: vmovdqa %xmm6, (%r8)
1970 ; AVX2-FCP-NEXT: vmovdqa %xmm7, (%r9)
1971 ; AVX2-FCP-NEXT: vmovdqa %xmm0, (%rax)
1972 ; AVX2-FCP-NEXT: vzeroupper
1973 ; AVX2-FCP-NEXT: retq
1975 ; AVX512-LABEL: load_i8_stride6_vf16:
1977 ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax
1978 ; AVX512-NEXT: vmovdqa {{.*#+}} ymm0 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535]
1979 ; AVX512-NEXT: vmovdqa (%rdi), %ymm3
1980 ; AVX512-NEXT: vmovdqa 32(%rdi), %ymm4
1981 ; AVX512-NEXT: vmovdqa %ymm0, %ymm5
1982 ; AVX512-NEXT: vpternlogq $202, %ymm4, %ymm3, %ymm5
1983 ; AVX512-NEXT: vpshufb {{.*#+}} xmm1 = xmm5[0,6,12],zero,zero,zero,xmm5[4,10],zero,zero,zero,xmm5[u,u,u,u,u]
1984 ; AVX512-NEXT: vextracti128 $1, %ymm5, %xmm6
1985 ; AVX512-NEXT: vpshufb {{.*#+}} xmm2 = zero,zero,zero,xmm6[2,8,14],zero,zero,xmm6[0,6,12,u,u,u,u,u]
1986 ; AVX512-NEXT: vpor %xmm1, %xmm2, %xmm7
1987 ; AVX512-NEXT: vmovdqa 80(%rdi), %xmm2
1988 ; AVX512-NEXT: vpshufb {{.*#+}} xmm8 = xmm2[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm2[4,10]
1989 ; AVX512-NEXT: vmovdqa 64(%rdi), %xmm1
1990 ; AVX512-NEXT: vpshufb {{.*#+}} xmm9 = xmm1[u,u,u,u,u,u,u,u,u,u,u,2,8,14],zero,zero
1991 ; AVX512-NEXT: vpor %xmm8, %xmm9, %xmm8
1992 ; AVX512-NEXT: vpmovsxdq {{.*#+}} xmm9 = [18446744073709551615,16777215]
1993 ; AVX512-NEXT: vpternlogq $184, %xmm7, %xmm9, %xmm8
1994 ; AVX512-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[1,7,13],zero,zero,zero,xmm5[5,11],zero,zero,zero,xmm5[u,u,u,u,u]
1995 ; AVX512-NEXT: vpshufb {{.*#+}} xmm6 = zero,zero,zero,xmm6[3,9,15],zero,zero,xmm6[1,7,13,u,u,u,u,u]
1996 ; AVX512-NEXT: vpor %xmm5, %xmm6, %xmm5
1997 ; AVX512-NEXT: vpshufb {{.*#+}} xmm6 = xmm2[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm2[5,11]
1998 ; AVX512-NEXT: vpshufb {{.*#+}} xmm7 = xmm1[u,u,u,u,u,u,u,u,u,u,u,3,9,15],zero,zero
1999 ; AVX512-NEXT: vpor %xmm6, %xmm7, %xmm6
2000 ; AVX512-NEXT: vpternlogq $184, %xmm5, %xmm9, %xmm6
2001 ; AVX512-NEXT: vpshufb {{.*#+}} xmm5 = xmm1[u,u,u,u,u,u,u,u,u,u,u,4,10],zero,zero,zero
2002 ; AVX512-NEXT: vpshufb {{.*#+}} xmm7 = xmm2[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm2[0,6,12]
2003 ; AVX512-NEXT: vpor %xmm5, %xmm7, %xmm5
2004 ; AVX512-NEXT: vmovdqa {{.*#+}} ymm7 = [65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535]
2005 ; AVX512-NEXT: vpternlogq $202, %ymm3, %ymm4, %ymm7
2006 ; AVX512-NEXT: vextracti128 $1, %ymm7, %xmm10
2007 ; AVX512-NEXT: vpshufb {{.*#+}} xmm11 = zero,zero,zero,xmm10[4,10],zero,zero,zero,xmm10[2,8,14,u,u,u,u,u]
2008 ; AVX512-NEXT: vpshufb {{.*#+}} xmm12 = xmm7[2,8,14],zero,zero,xmm7[0,6,12],zero,zero,zero,xmm7[u,u,u,u,u]
2009 ; AVX512-NEXT: vpor %xmm11, %xmm12, %xmm11
2010 ; AVX512-NEXT: vpternlogq $226, %xmm5, %xmm9, %xmm11
2011 ; AVX512-NEXT: vpshufb {{.*#+}} xmm5 = xmm1[u,u,u,u,u,u,u,u,u,u,u,5,11],zero,zero,zero
2012 ; AVX512-NEXT: vpshufb {{.*#+}} xmm12 = xmm2[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm2[1,7,13]
2013 ; AVX512-NEXT: vpor %xmm5, %xmm12, %xmm5
2014 ; AVX512-NEXT: vpshufb {{.*#+}} xmm10 = zero,zero,zero,xmm10[5,11],zero,zero,zero,xmm10[3,9,15,u,u,u,u,u]
2015 ; AVX512-NEXT: vpshufb {{.*#+}} xmm7 = xmm7[3,9,15],zero,zero,xmm7[1,7,13],zero,zero,zero,xmm7[u,u,u,u,u]
2016 ; AVX512-NEXT: vpor %xmm7, %xmm10, %xmm7
2017 ; AVX512-NEXT: vpternlogq $226, %xmm5, %xmm9, %xmm7
2018 ; AVX512-NEXT: vmovdqa {{.*#+}} xmm5 = [128,128,0,6,12,128,128,128,4,10,128,128,128,2,8,14]
2019 ; AVX512-NEXT: vpshufb %xmm5, %xmm2, %xmm9
2020 ; AVX512-NEXT: vpternlogq $202, %ymm3, %ymm4, %ymm0
2021 ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm3
2022 ; AVX512-NEXT: vpshufb %xmm5, %xmm3, %xmm4
2023 ; AVX512-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3,4],xmm9[5,6,7]
2024 ; AVX512-NEXT: vmovdqa {{.*#+}} xmm5 = [4,10,128,128,128,2,8,14,128,128,0,6,12,128,128,128]
2025 ; AVX512-NEXT: vpshufb %xmm5, %xmm1, %xmm9
2026 ; AVX512-NEXT: vpshufb %xmm5, %xmm0, %xmm5
2027 ; AVX512-NEXT: vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3,4],xmm9[5,6,7]
2028 ; AVX512-NEXT: vpor %xmm4, %xmm5, %xmm4
2029 ; AVX512-NEXT: vmovdqa {{.*#+}} xmm5 = [128,128,1,7,13,128,128,128,5,11,128,128,128,3,9,15]
2030 ; AVX512-NEXT: vpshufb %xmm5, %xmm2, %xmm2
2031 ; AVX512-NEXT: vpshufb %xmm5, %xmm3, %xmm3
2032 ; AVX512-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3,4],xmm2[5,6,7]
2033 ; AVX512-NEXT: vmovdqa {{.*#+}} xmm3 = [5,11,128,128,128,3,9,15,128,128,1,7,13,128,128,128]
2034 ; AVX512-NEXT: vpshufb %xmm3, %xmm1, %xmm1
2035 ; AVX512-NEXT: vpshufb %xmm3, %xmm0, %xmm0
2036 ; AVX512-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm1[5,6,7]
2037 ; AVX512-NEXT: vpor %xmm2, %xmm0, %xmm0
2038 ; AVX512-NEXT: vmovdqa %xmm8, (%rsi)
2039 ; AVX512-NEXT: vmovdqa %xmm6, (%rdx)
2040 ; AVX512-NEXT: vmovdqa %xmm11, (%rcx)
2041 ; AVX512-NEXT: vmovdqa %xmm7, (%r8)
2042 ; AVX512-NEXT: vmovdqa %xmm4, (%r9)
2043 ; AVX512-NEXT: vmovdqa %xmm0, (%rax)
2044 ; AVX512-NEXT: vzeroupper
2047 ; AVX512-FCP-LABEL: load_i8_stride6_vf16:
2048 ; AVX512-FCP: # %bb.0:
2049 ; AVX512-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
2050 ; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm0 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535]
2051 ; AVX512-FCP-NEXT: vmovdqa (%rdi), %ymm3
2052 ; AVX512-FCP-NEXT: vmovdqa 32(%rdi), %ymm4
2053 ; AVX512-FCP-NEXT: vmovdqa %ymm0, %ymm5
2054 ; AVX512-FCP-NEXT: vpternlogq $202, %ymm4, %ymm3, %ymm5
2055 ; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm5[0,6,12],zero,zero,zero,xmm5[4,10],zero,zero,zero,xmm5[u,u,u,u,u]
2056 ; AVX512-FCP-NEXT: vextracti128 $1, %ymm5, %xmm6
2057 ; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm2 = zero,zero,zero,xmm6[2,8,14],zero,zero,xmm6[0,6,12,u,u,u,u,u]
2058 ; AVX512-FCP-NEXT: vpor %xmm1, %xmm2, %xmm7
2059 ; AVX512-FCP-NEXT: vmovdqa 80(%rdi), %xmm2
2060 ; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm8 = xmm2[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm2[4,10]
2061 ; AVX512-FCP-NEXT: vmovdqa 64(%rdi), %xmm1
2062 ; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm9 = xmm1[u,u,u,u,u,u,u,u,u,u,u,2,8,14],zero,zero
2063 ; AVX512-FCP-NEXT: vpor %xmm8, %xmm9, %xmm8
2064 ; AVX512-FCP-NEXT: vpmovsxdq {{.*#+}} xmm9 = [18446744073709551615,16777215]
2065 ; AVX512-FCP-NEXT: vpternlogq $184, %xmm7, %xmm9, %xmm8
2066 ; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[1,7,13],zero,zero,zero,xmm5[5,11],zero,zero,zero,xmm5[u,u,u,u,u]
2067 ; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm6 = zero,zero,zero,xmm6[3,9,15],zero,zero,xmm6[1,7,13,u,u,u,u,u]
2068 ; AVX512-FCP-NEXT: vpor %xmm5, %xmm6, %xmm5
2069 ; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm6 = xmm2[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm2[5,11]
2070 ; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm1[u,u,u,u,u,u,u,u,u,u,u,3,9,15],zero,zero
2071 ; AVX512-FCP-NEXT: vpor %xmm6, %xmm7, %xmm6
2072 ; AVX512-FCP-NEXT: vpternlogq $184, %xmm5, %xmm9, %xmm6
2073 ; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm5 = xmm1[u,u,u,u,u,u,u,u,u,u,u,4,10],zero,zero,zero
2074 ; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm2[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm2[0,6,12]
2075 ; AVX512-FCP-NEXT: vpor %xmm5, %xmm7, %xmm5
2076 ; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm7 = [65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535]
2077 ; AVX512-FCP-NEXT: vpternlogq $202, %ymm3, %ymm4, %ymm7
2078 ; AVX512-FCP-NEXT: vextracti128 $1, %ymm7, %xmm10
2079 ; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm11 = zero,zero,zero,xmm10[4,10],zero,zero,zero,xmm10[2,8,14,u,u,u,u,u]
2080 ; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm12 = xmm7[2,8,14],zero,zero,xmm7[0,6,12],zero,zero,zero,xmm7[u,u,u,u,u]
2081 ; AVX512-FCP-NEXT: vpor %xmm11, %xmm12, %xmm11
2082 ; AVX512-FCP-NEXT: vpternlogq $226, %xmm5, %xmm9, %xmm11
2083 ; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm5 = xmm1[u,u,u,u,u,u,u,u,u,u,u,5,11],zero,zero,zero
2084 ; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm12 = xmm2[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm2[1,7,13]
2085 ; AVX512-FCP-NEXT: vpor %xmm5, %xmm12, %xmm5
2086 ; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm10 = zero,zero,zero,xmm10[5,11],zero,zero,zero,xmm10[3,9,15,u,u,u,u,u]
2087 ; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm7[3,9,15],zero,zero,xmm7[1,7,13],zero,zero,zero,xmm7[u,u,u,u,u]
2088 ; AVX512-FCP-NEXT: vpor %xmm7, %xmm10, %xmm7
2089 ; AVX512-FCP-NEXT: vpternlogq $226, %xmm5, %xmm9, %xmm7
2090 ; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm5 = [128,128,0,6,12,128,128,128,4,10,128,128,128,2,8,14]
2091 ; AVX512-FCP-NEXT: vpshufb %xmm5, %xmm2, %xmm9
2092 ; AVX512-FCP-NEXT: vpternlogq $202, %ymm3, %ymm4, %ymm0
2093 ; AVX512-FCP-NEXT: vextracti128 $1, %ymm0, %xmm3
2094 ; AVX512-FCP-NEXT: vpshufb %xmm5, %xmm3, %xmm4
2095 ; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3,4],xmm9[5,6,7]
2096 ; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm5 = [4,10,128,128,128,2,8,14,128,128,0,6,12,128,128,128]
2097 ; AVX512-FCP-NEXT: vpshufb %xmm5, %xmm1, %xmm9
2098 ; AVX512-FCP-NEXT: vpshufb %xmm5, %xmm0, %xmm5
2099 ; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3,4],xmm9[5,6,7]
2100 ; AVX512-FCP-NEXT: vpor %xmm4, %xmm5, %xmm4
2101 ; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm5 = [128,128,1,7,13,128,128,128,5,11,128,128,128,3,9,15]
2102 ; AVX512-FCP-NEXT: vpshufb %xmm5, %xmm2, %xmm2
2103 ; AVX512-FCP-NEXT: vpshufb %xmm5, %xmm3, %xmm3
2104 ; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3,4],xmm2[5,6,7]
2105 ; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm3 = [5,11,128,128,128,3,9,15,128,128,1,7,13,128,128,128]
2106 ; AVX512-FCP-NEXT: vpshufb %xmm3, %xmm1, %xmm1
2107 ; AVX512-FCP-NEXT: vpshufb %xmm3, %xmm0, %xmm0
2108 ; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm1[5,6,7]
2109 ; AVX512-FCP-NEXT: vpor %xmm2, %xmm0, %xmm0
2110 ; AVX512-FCP-NEXT: vmovdqa %xmm8, (%rsi)
2111 ; AVX512-FCP-NEXT: vmovdqa %xmm6, (%rdx)
2112 ; AVX512-FCP-NEXT: vmovdqa %xmm11, (%rcx)
2113 ; AVX512-FCP-NEXT: vmovdqa %xmm7, (%r8)
2114 ; AVX512-FCP-NEXT: vmovdqa %xmm4, (%r9)
2115 ; AVX512-FCP-NEXT: vmovdqa %xmm0, (%rax)
2116 ; AVX512-FCP-NEXT: vzeroupper
2117 ; AVX512-FCP-NEXT: retq
2119 ; AVX512DQ-LABEL: load_i8_stride6_vf16:
2120 ; AVX512DQ: # %bb.0:
2121 ; AVX512DQ-NEXT: movq {{[0-9]+}}(%rsp), %rax
2122 ; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm0 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535]
2123 ; AVX512DQ-NEXT: vmovdqa (%rdi), %ymm3
2124 ; AVX512DQ-NEXT: vmovdqa 32(%rdi), %ymm4
2125 ; AVX512DQ-NEXT: vmovdqa %ymm0, %ymm5
2126 ; AVX512DQ-NEXT: vpternlogq $202, %ymm4, %ymm3, %ymm5
2127 ; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm1 = xmm5[0,6,12],zero,zero,zero,xmm5[4,10],zero,zero,zero,xmm5[u,u,u,u,u]
2128 ; AVX512DQ-NEXT: vextracti128 $1, %ymm5, %xmm6
2129 ; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm2 = zero,zero,zero,xmm6[2,8,14],zero,zero,xmm6[0,6,12,u,u,u,u,u]
2130 ; AVX512DQ-NEXT: vpor %xmm1, %xmm2, %xmm7
2131 ; AVX512DQ-NEXT: vmovdqa 80(%rdi), %xmm2
2132 ; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm8 = xmm2[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm2[4,10]
2133 ; AVX512DQ-NEXT: vmovdqa 64(%rdi), %xmm1
2134 ; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm9 = xmm1[u,u,u,u,u,u,u,u,u,u,u,2,8,14],zero,zero
2135 ; AVX512DQ-NEXT: vpor %xmm8, %xmm9, %xmm8
2136 ; AVX512DQ-NEXT: vpmovsxdq {{.*#+}} xmm9 = [18446744073709551615,16777215]
2137 ; AVX512DQ-NEXT: vpternlogq $184, %xmm7, %xmm9, %xmm8
2138 ; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[1,7,13],zero,zero,zero,xmm5[5,11],zero,zero,zero,xmm5[u,u,u,u,u]
2139 ; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm6 = zero,zero,zero,xmm6[3,9,15],zero,zero,xmm6[1,7,13,u,u,u,u,u]
2140 ; AVX512DQ-NEXT: vpor %xmm5, %xmm6, %xmm5
2141 ; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm6 = xmm2[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm2[5,11]
2142 ; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm7 = xmm1[u,u,u,u,u,u,u,u,u,u,u,3,9,15],zero,zero
2143 ; AVX512DQ-NEXT: vpor %xmm6, %xmm7, %xmm6
2144 ; AVX512DQ-NEXT: vpternlogq $184, %xmm5, %xmm9, %xmm6
2145 ; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm5 = xmm1[u,u,u,u,u,u,u,u,u,u,u,4,10],zero,zero,zero
2146 ; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm7 = xmm2[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm2[0,6,12]
2147 ; AVX512DQ-NEXT: vpor %xmm5, %xmm7, %xmm5
2148 ; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm7 = [65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535]
2149 ; AVX512DQ-NEXT: vpternlogq $202, %ymm3, %ymm4, %ymm7
2150 ; AVX512DQ-NEXT: vextracti128 $1, %ymm7, %xmm10
2151 ; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm11 = zero,zero,zero,xmm10[4,10],zero,zero,zero,xmm10[2,8,14,u,u,u,u,u]
2152 ; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm12 = xmm7[2,8,14],zero,zero,xmm7[0,6,12],zero,zero,zero,xmm7[u,u,u,u,u]
2153 ; AVX512DQ-NEXT: vpor %xmm11, %xmm12, %xmm11
2154 ; AVX512DQ-NEXT: vpternlogq $226, %xmm5, %xmm9, %xmm11
2155 ; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm5 = xmm1[u,u,u,u,u,u,u,u,u,u,u,5,11],zero,zero,zero
2156 ; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm12 = xmm2[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm2[1,7,13]
2157 ; AVX512DQ-NEXT: vpor %xmm5, %xmm12, %xmm5
2158 ; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm10 = zero,zero,zero,xmm10[5,11],zero,zero,zero,xmm10[3,9,15,u,u,u,u,u]
2159 ; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm7 = xmm7[3,9,15],zero,zero,xmm7[1,7,13],zero,zero,zero,xmm7[u,u,u,u,u]
2160 ; AVX512DQ-NEXT: vpor %xmm7, %xmm10, %xmm7
2161 ; AVX512DQ-NEXT: vpternlogq $226, %xmm5, %xmm9, %xmm7
2162 ; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm5 = [128,128,0,6,12,128,128,128,4,10,128,128,128,2,8,14]
2163 ; AVX512DQ-NEXT: vpshufb %xmm5, %xmm2, %xmm9
2164 ; AVX512DQ-NEXT: vpternlogq $202, %ymm3, %ymm4, %ymm0
2165 ; AVX512DQ-NEXT: vextracti128 $1, %ymm0, %xmm3
2166 ; AVX512DQ-NEXT: vpshufb %xmm5, %xmm3, %xmm4
2167 ; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3,4],xmm9[5,6,7]
2168 ; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm5 = [4,10,128,128,128,2,8,14,128,128,0,6,12,128,128,128]
2169 ; AVX512DQ-NEXT: vpshufb %xmm5, %xmm1, %xmm9
2170 ; AVX512DQ-NEXT: vpshufb %xmm5, %xmm0, %xmm5
2171 ; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3,4],xmm9[5,6,7]
2172 ; AVX512DQ-NEXT: vpor %xmm4, %xmm5, %xmm4
2173 ; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm5 = [128,128,1,7,13,128,128,128,5,11,128,128,128,3,9,15]
2174 ; AVX512DQ-NEXT: vpshufb %xmm5, %xmm2, %xmm2
2175 ; AVX512DQ-NEXT: vpshufb %xmm5, %xmm3, %xmm3
2176 ; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3,4],xmm2[5,6,7]
2177 ; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm3 = [5,11,128,128,128,3,9,15,128,128,1,7,13,128,128,128]
2178 ; AVX512DQ-NEXT: vpshufb %xmm3, %xmm1, %xmm1
2179 ; AVX512DQ-NEXT: vpshufb %xmm3, %xmm0, %xmm0
2180 ; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm1[5,6,7]
2181 ; AVX512DQ-NEXT: vpor %xmm2, %xmm0, %xmm0
2182 ; AVX512DQ-NEXT: vmovdqa %xmm8, (%rsi)
2183 ; AVX512DQ-NEXT: vmovdqa %xmm6, (%rdx)
2184 ; AVX512DQ-NEXT: vmovdqa %xmm11, (%rcx)
2185 ; AVX512DQ-NEXT: vmovdqa %xmm7, (%r8)
2186 ; AVX512DQ-NEXT: vmovdqa %xmm4, (%r9)
2187 ; AVX512DQ-NEXT: vmovdqa %xmm0, (%rax)
2188 ; AVX512DQ-NEXT: vzeroupper
2189 ; AVX512DQ-NEXT: retq
2191 ; AVX512DQ-FCP-LABEL: load_i8_stride6_vf16:
2192 ; AVX512DQ-FCP: # %bb.0:
2193 ; AVX512DQ-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
2194 ; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm0 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535]
2195 ; AVX512DQ-FCP-NEXT: vmovdqa (%rdi), %ymm3
2196 ; AVX512DQ-FCP-NEXT: vmovdqa 32(%rdi), %ymm4
2197 ; AVX512DQ-FCP-NEXT: vmovdqa %ymm0, %ymm5
2198 ; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm4, %ymm3, %ymm5
2199 ; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm5[0,6,12],zero,zero,zero,xmm5[4,10],zero,zero,zero,xmm5[u,u,u,u,u]
2200 ; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm5, %xmm6
2201 ; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm2 = zero,zero,zero,xmm6[2,8,14],zero,zero,xmm6[0,6,12,u,u,u,u,u]
2202 ; AVX512DQ-FCP-NEXT: vpor %xmm1, %xmm2, %xmm7
2203 ; AVX512DQ-FCP-NEXT: vmovdqa 80(%rdi), %xmm2
2204 ; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm8 = xmm2[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm2[4,10]
2205 ; AVX512DQ-FCP-NEXT: vmovdqa 64(%rdi), %xmm1
2206 ; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm9 = xmm1[u,u,u,u,u,u,u,u,u,u,u,2,8,14],zero,zero
2207 ; AVX512DQ-FCP-NEXT: vpor %xmm8, %xmm9, %xmm8
2208 ; AVX512DQ-FCP-NEXT: vpmovsxdq {{.*#+}} xmm9 = [18446744073709551615,16777215]
2209 ; AVX512DQ-FCP-NEXT: vpternlogq $184, %xmm7, %xmm9, %xmm8
2210 ; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[1,7,13],zero,zero,zero,xmm5[5,11],zero,zero,zero,xmm5[u,u,u,u,u]
2211 ; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm6 = zero,zero,zero,xmm6[3,9,15],zero,zero,xmm6[1,7,13,u,u,u,u,u]
2212 ; AVX512DQ-FCP-NEXT: vpor %xmm5, %xmm6, %xmm5
2213 ; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm6 = xmm2[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm2[5,11]
2214 ; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm1[u,u,u,u,u,u,u,u,u,u,u,3,9,15],zero,zero
2215 ; AVX512DQ-FCP-NEXT: vpor %xmm6, %xmm7, %xmm6
2216 ; AVX512DQ-FCP-NEXT: vpternlogq $184, %xmm5, %xmm9, %xmm6
2217 ; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm5 = xmm1[u,u,u,u,u,u,u,u,u,u,u,4,10],zero,zero,zero
2218 ; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm2[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm2[0,6,12]
2219 ; AVX512DQ-FCP-NEXT: vpor %xmm5, %xmm7, %xmm5
2220 ; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm7 = [65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535]
2221 ; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm3, %ymm4, %ymm7
2222 ; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm7, %xmm10
2223 ; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm11 = zero,zero,zero,xmm10[4,10],zero,zero,zero,xmm10[2,8,14,u,u,u,u,u]
2224 ; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm12 = xmm7[2,8,14],zero,zero,xmm7[0,6,12],zero,zero,zero,xmm7[u,u,u,u,u]
2225 ; AVX512DQ-FCP-NEXT: vpor %xmm11, %xmm12, %xmm11
2226 ; AVX512DQ-FCP-NEXT: vpternlogq $226, %xmm5, %xmm9, %xmm11
2227 ; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm5 = xmm1[u,u,u,u,u,u,u,u,u,u,u,5,11],zero,zero,zero
2228 ; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm12 = xmm2[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm2[1,7,13]
2229 ; AVX512DQ-FCP-NEXT: vpor %xmm5, %xmm12, %xmm5
2230 ; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm10 = zero,zero,zero,xmm10[5,11],zero,zero,zero,xmm10[3,9,15,u,u,u,u,u]
2231 ; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm7[3,9,15],zero,zero,xmm7[1,7,13],zero,zero,zero,xmm7[u,u,u,u,u]
2232 ; AVX512DQ-FCP-NEXT: vpor %xmm7, %xmm10, %xmm7
2233 ; AVX512DQ-FCP-NEXT: vpternlogq $226, %xmm5, %xmm9, %xmm7
2234 ; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm5 = [128,128,0,6,12,128,128,128,4,10,128,128,128,2,8,14]
2235 ; AVX512DQ-FCP-NEXT: vpshufb %xmm5, %xmm2, %xmm9
2236 ; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm3, %ymm4, %ymm0
2237 ; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm0, %xmm3
2238 ; AVX512DQ-FCP-NEXT: vpshufb %xmm5, %xmm3, %xmm4
2239 ; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3,4],xmm9[5,6,7]
2240 ; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm5 = [4,10,128,128,128,2,8,14,128,128,0,6,12,128,128,128]
2241 ; AVX512DQ-FCP-NEXT: vpshufb %xmm5, %xmm1, %xmm9
2242 ; AVX512DQ-FCP-NEXT: vpshufb %xmm5, %xmm0, %xmm5
2243 ; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3,4],xmm9[5,6,7]
2244 ; AVX512DQ-FCP-NEXT: vpor %xmm4, %xmm5, %xmm4
2245 ; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm5 = [128,128,1,7,13,128,128,128,5,11,128,128,128,3,9,15]
2246 ; AVX512DQ-FCP-NEXT: vpshufb %xmm5, %xmm2, %xmm2
2247 ; AVX512DQ-FCP-NEXT: vpshufb %xmm5, %xmm3, %xmm3
2248 ; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3,4],xmm2[5,6,7]
2249 ; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm3 = [5,11,128,128,128,3,9,15,128,128,1,7,13,128,128,128]
2250 ; AVX512DQ-FCP-NEXT: vpshufb %xmm3, %xmm1, %xmm1
2251 ; AVX512DQ-FCP-NEXT: vpshufb %xmm3, %xmm0, %xmm0
2252 ; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm1[5,6,7]
2253 ; AVX512DQ-FCP-NEXT: vpor %xmm2, %xmm0, %xmm0
2254 ; AVX512DQ-FCP-NEXT: vmovdqa %xmm8, (%rsi)
2255 ; AVX512DQ-FCP-NEXT: vmovdqa %xmm6, (%rdx)
2256 ; AVX512DQ-FCP-NEXT: vmovdqa %xmm11, (%rcx)
2257 ; AVX512DQ-FCP-NEXT: vmovdqa %xmm7, (%r8)
2258 ; AVX512DQ-FCP-NEXT: vmovdqa %xmm4, (%r9)
2259 ; AVX512DQ-FCP-NEXT: vmovdqa %xmm0, (%rax)
2260 ; AVX512DQ-FCP-NEXT: vzeroupper
2261 ; AVX512DQ-FCP-NEXT: retq
2263 ; AVX512BW-LABEL: load_i8_stride6_vf16:
2264 ; AVX512BW: # %bb.0:
2265 ; AVX512BW-NEXT: movq {{[0-9]+}}(%rsp), %rax
2266 ; AVX512BW-NEXT: vmovdqa (%rdi), %ymm1
2267 ; AVX512BW-NEXT: vmovdqa 32(%rdi), %ymm0
2268 ; AVX512BW-NEXT: movw $18724, %r10w # imm = 0x4924
2269 ; AVX512BW-NEXT: kmovd %r10d, %k1
2270 ; AVX512BW-NEXT: vpblendmw %ymm0, %ymm1, %ymm2 {%k1}
2271 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm3 = xmm2[0,6,12],zero,zero,zero,xmm2[4,10],zero,zero,zero,xmm2[u,u,u,u,u]
2272 ; AVX512BW-NEXT: vextracti128 $1, %ymm2, %xmm4
2273 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm5 = zero,zero,zero,xmm4[2,8,14],zero,zero,xmm4[0,6,12,u,u,u,u,u]
2274 ; AVX512BW-NEXT: vpor %xmm3, %xmm5, %xmm3
2275 ; AVX512BW-NEXT: vmovdqa 80(%rdi), %xmm5
2276 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm6 = xmm5[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm5[4,10]
2277 ; AVX512BW-NEXT: vmovdqa 64(%rdi), %xmm7
2278 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm8 = xmm7[u,u,u,u,u,u,u,u,u,u,u,2,8,14],zero,zero
2279 ; AVX512BW-NEXT: vpor %xmm6, %xmm8, %xmm6
2280 ; AVX512BW-NEXT: movw $-2048, %di # imm = 0xF800
2281 ; AVX512BW-NEXT: kmovd %edi, %k2
2282 ; AVX512BW-NEXT: vmovdqu8 %xmm6, %xmm3 {%k2}
2283 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[1,7,13],zero,zero,zero,xmm2[5,11],zero,zero,zero,xmm2[u,u,u,u,u]
2284 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm4 = zero,zero,zero,xmm4[3,9,15],zero,zero,xmm4[1,7,13,u,u,u,u,u]
2285 ; AVX512BW-NEXT: vpor %xmm2, %xmm4, %xmm2
2286 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm4 = xmm5[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm5[5,11]
2287 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm6 = xmm7[u,u,u,u,u,u,u,u,u,u,u,3,9,15],zero,zero
2288 ; AVX512BW-NEXT: vpor %xmm4, %xmm6, %xmm4
2289 ; AVX512BW-NEXT: vmovdqu8 %xmm4, %xmm2 {%k2}
2290 ; AVX512BW-NEXT: movw $9362, %di # imm = 0x2492
2291 ; AVX512BW-NEXT: kmovd %edi, %k3
2292 ; AVX512BW-NEXT: vpblendmw %ymm1, %ymm0, %ymm4 {%k3}
2293 ; AVX512BW-NEXT: vextracti128 $1, %ymm4, %xmm6
2294 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm8 = zero,zero,zero,xmm6[4,10],zero,zero,zero,xmm6[2,8,14,u,u,u,u,u]
2295 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm9 = xmm4[2,8,14],zero,zero,xmm4[0,6,12],zero,zero,zero,xmm4[u,u,u,u,u]
2296 ; AVX512BW-NEXT: vpor %xmm8, %xmm9, %xmm8
2297 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm9 = xmm7[u,u,u,u,u,u,u,u,u,u,u,4,10],zero,zero,zero
2298 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm10 = xmm5[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm5[0,6,12]
2299 ; AVX512BW-NEXT: vpor %xmm9, %xmm10, %xmm9
2300 ; AVX512BW-NEXT: vmovdqu8 %xmm9, %xmm8 {%k2}
2301 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm6 = zero,zero,zero,xmm6[5,11],zero,zero,zero,xmm6[3,9,15,u,u,u,u,u]
2302 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[3,9,15],zero,zero,xmm4[1,7,13],zero,zero,zero,xmm4[u,u,u,u,u]
2303 ; AVX512BW-NEXT: vpor %xmm6, %xmm4, %xmm4
2304 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm6 = xmm7[u,u,u,u,u,u,u,u,u,u,u,5,11],zero,zero,zero
2305 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm9 = xmm5[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm5[1,7,13]
2306 ; AVX512BW-NEXT: vpor %xmm6, %xmm9, %xmm6
2307 ; AVX512BW-NEXT: vmovdqu8 %xmm6, %xmm4 {%k2}
2308 ; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm6 = [128,128,0,6,12,128,128,128,4,10,128,128,128,2,8,14]
2309 ; AVX512BW-NEXT: vpshufb %xmm6, %xmm5, %xmm9
2310 ; AVX512BW-NEXT: vmovdqu16 %ymm1, %ymm0 {%k1}
2311 ; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm1
2312 ; AVX512BW-NEXT: vpshufb %xmm6, %xmm1, %xmm6
2313 ; AVX512BW-NEXT: vpblendw {{.*#+}} xmm6 = xmm6[0,1,2,3,4],xmm9[5,6,7]
2314 ; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm9 = [4,10,128,128,128,2,8,14,128,128,0,6,12,128,128,128]
2315 ; AVX512BW-NEXT: vpshufb %xmm9, %xmm7, %xmm10
2316 ; AVX512BW-NEXT: vpshufb %xmm9, %xmm0, %xmm9
2317 ; AVX512BW-NEXT: vpblendw {{.*#+}} xmm9 = xmm9[0,1,2,3,4],xmm10[5,6,7]
2318 ; AVX512BW-NEXT: vpor %xmm6, %xmm9, %xmm6
2319 ; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm9 = [128,128,1,7,13,128,128,128,5,11,128,128,128,3,9,15]
2320 ; AVX512BW-NEXT: vpshufb %xmm9, %xmm5, %xmm5
2321 ; AVX512BW-NEXT: vpshufb %xmm9, %xmm1, %xmm1
2322 ; AVX512BW-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm5[5,6,7]
2323 ; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm5 = [5,11,128,128,128,3,9,15,128,128,1,7,13,128,128,128]
2324 ; AVX512BW-NEXT: vpshufb %xmm5, %xmm7, %xmm7
2325 ; AVX512BW-NEXT: vpshufb %xmm5, %xmm0, %xmm0
2326 ; AVX512BW-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm7[5,6,7]
2327 ; AVX512BW-NEXT: vpor %xmm1, %xmm0, %xmm0
2328 ; AVX512BW-NEXT: vmovdqa %xmm3, (%rsi)
2329 ; AVX512BW-NEXT: vmovdqa %xmm2, (%rdx)
2330 ; AVX512BW-NEXT: vmovdqa %xmm8, (%rcx)
2331 ; AVX512BW-NEXT: vmovdqa %xmm4, (%r8)
2332 ; AVX512BW-NEXT: vmovdqa %xmm6, (%r9)
2333 ; AVX512BW-NEXT: vmovdqa %xmm0, (%rax)
2334 ; AVX512BW-NEXT: vzeroupper
2335 ; AVX512BW-NEXT: retq
2337 ; AVX512BW-FCP-LABEL: load_i8_stride6_vf16:
2338 ; AVX512BW-FCP: # %bb.0:
2339 ; AVX512BW-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
2340 ; AVX512BW-FCP-NEXT: vmovdqa (%rdi), %ymm1
2341 ; AVX512BW-FCP-NEXT: vmovdqa 32(%rdi), %ymm0
2342 ; AVX512BW-FCP-NEXT: movw $18724, %r10w # imm = 0x4924
2343 ; AVX512BW-FCP-NEXT: kmovd %r10d, %k1
2344 ; AVX512BW-FCP-NEXT: vpblendmw %ymm0, %ymm1, %ymm2 {%k1}
2345 ; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm2[0,6,12],zero,zero,zero,xmm2[4,10],zero,zero,zero,xmm2[u,u,u,u,u]
2346 ; AVX512BW-FCP-NEXT: vextracti128 $1, %ymm2, %xmm4
2347 ; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm5 = zero,zero,zero,xmm4[2,8,14],zero,zero,xmm4[0,6,12,u,u,u,u,u]
2348 ; AVX512BW-FCP-NEXT: vpor %xmm3, %xmm5, %xmm3
2349 ; AVX512BW-FCP-NEXT: vmovdqa 80(%rdi), %xmm5
2350 ; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm6 = xmm5[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm5[4,10]
2351 ; AVX512BW-FCP-NEXT: vmovdqa 64(%rdi), %xmm7
2352 ; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm8 = xmm7[u,u,u,u,u,u,u,u,u,u,u,2,8,14],zero,zero
2353 ; AVX512BW-FCP-NEXT: vpor %xmm6, %xmm8, %xmm6
2354 ; AVX512BW-FCP-NEXT: movw $-2048, %di # imm = 0xF800
2355 ; AVX512BW-FCP-NEXT: kmovd %edi, %k2
2356 ; AVX512BW-FCP-NEXT: vmovdqu8 %xmm6, %xmm3 {%k2}
2357 ; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[1,7,13],zero,zero,zero,xmm2[5,11],zero,zero,zero,xmm2[u,u,u,u,u]
2358 ; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm4 = zero,zero,zero,xmm4[3,9,15],zero,zero,xmm4[1,7,13,u,u,u,u,u]
2359 ; AVX512BW-FCP-NEXT: vpor %xmm2, %xmm4, %xmm2
2360 ; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm4 = xmm5[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm5[5,11]
2361 ; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm6 = xmm7[u,u,u,u,u,u,u,u,u,u,u,3,9,15],zero,zero
2362 ; AVX512BW-FCP-NEXT: vpor %xmm4, %xmm6, %xmm4
2363 ; AVX512BW-FCP-NEXT: vmovdqu8 %xmm4, %xmm2 {%k2}
2364 ; AVX512BW-FCP-NEXT: movw $9362, %di # imm = 0x2492
2365 ; AVX512BW-FCP-NEXT: kmovd %edi, %k3
2366 ; AVX512BW-FCP-NEXT: vpblendmw %ymm1, %ymm0, %ymm4 {%k3}
2367 ; AVX512BW-FCP-NEXT: vextracti128 $1, %ymm4, %xmm6
2368 ; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm8 = zero,zero,zero,xmm6[4,10],zero,zero,zero,xmm6[2,8,14,u,u,u,u,u]
2369 ; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm9 = xmm4[2,8,14],zero,zero,xmm4[0,6,12],zero,zero,zero,xmm4[u,u,u,u,u]
2370 ; AVX512BW-FCP-NEXT: vpor %xmm8, %xmm9, %xmm8
2371 ; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm9 = xmm7[u,u,u,u,u,u,u,u,u,u,u,4,10],zero,zero,zero
2372 ; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm10 = xmm5[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm5[0,6,12]
2373 ; AVX512BW-FCP-NEXT: vpor %xmm9, %xmm10, %xmm9
2374 ; AVX512BW-FCP-NEXT: vmovdqu8 %xmm9, %xmm8 {%k2}
2375 ; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm6 = zero,zero,zero,xmm6[5,11],zero,zero,zero,xmm6[3,9,15,u,u,u,u,u]
2376 ; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[3,9,15],zero,zero,xmm4[1,7,13],zero,zero,zero,xmm4[u,u,u,u,u]
2377 ; AVX512BW-FCP-NEXT: vpor %xmm6, %xmm4, %xmm4
2378 ; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm6 = xmm7[u,u,u,u,u,u,u,u,u,u,u,5,11],zero,zero,zero
2379 ; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm9 = xmm5[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm5[1,7,13]
2380 ; AVX512BW-FCP-NEXT: vpor %xmm6, %xmm9, %xmm6
2381 ; AVX512BW-FCP-NEXT: vmovdqu8 %xmm6, %xmm4 {%k2}
2382 ; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} xmm6 = [128,128,0,6,12,128,128,128,4,10,128,128,128,2,8,14]
2383 ; AVX512BW-FCP-NEXT: vpshufb %xmm6, %xmm5, %xmm9
2384 ; AVX512BW-FCP-NEXT: vmovdqu16 %ymm1, %ymm0 {%k1}
2385 ; AVX512BW-FCP-NEXT: vextracti128 $1, %ymm0, %xmm1
2386 ; AVX512BW-FCP-NEXT: vpshufb %xmm6, %xmm1, %xmm6
2387 ; AVX512BW-FCP-NEXT: vpblendw {{.*#+}} xmm6 = xmm6[0,1,2,3,4],xmm9[5,6,7]
2388 ; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} xmm9 = [4,10,128,128,128,2,8,14,128,128,0,6,12,128,128,128]
2389 ; AVX512BW-FCP-NEXT: vpshufb %xmm9, %xmm7, %xmm10
2390 ; AVX512BW-FCP-NEXT: vpshufb %xmm9, %xmm0, %xmm9
2391 ; AVX512BW-FCP-NEXT: vpblendw {{.*#+}} xmm9 = xmm9[0,1,2,3,4],xmm10[5,6,7]
2392 ; AVX512BW-FCP-NEXT: vpor %xmm6, %xmm9, %xmm6
2393 ; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} xmm9 = [128,128,1,7,13,128,128,128,5,11,128,128,128,3,9,15]
2394 ; AVX512BW-FCP-NEXT: vpshufb %xmm9, %xmm5, %xmm5
2395 ; AVX512BW-FCP-NEXT: vpshufb %xmm9, %xmm1, %xmm1
2396 ; AVX512BW-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm5[5,6,7]
2397 ; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} xmm5 = [5,11,128,128,128,3,9,15,128,128,1,7,13,128,128,128]
2398 ; AVX512BW-FCP-NEXT: vpshufb %xmm5, %xmm7, %xmm7
2399 ; AVX512BW-FCP-NEXT: vpshufb %xmm5, %xmm0, %xmm0
2400 ; AVX512BW-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm7[5,6,7]
2401 ; AVX512BW-FCP-NEXT: vpor %xmm1, %xmm0, %xmm0
2402 ; AVX512BW-FCP-NEXT: vmovdqa %xmm3, (%rsi)
2403 ; AVX512BW-FCP-NEXT: vmovdqa %xmm2, (%rdx)
2404 ; AVX512BW-FCP-NEXT: vmovdqa %xmm8, (%rcx)
2405 ; AVX512BW-FCP-NEXT: vmovdqa %xmm4, (%r8)
2406 ; AVX512BW-FCP-NEXT: vmovdqa %xmm6, (%r9)
2407 ; AVX512BW-FCP-NEXT: vmovdqa %xmm0, (%rax)
2408 ; AVX512BW-FCP-NEXT: vzeroupper
2409 ; AVX512BW-FCP-NEXT: retq
2411 ; AVX512DQ-BW-LABEL: load_i8_stride6_vf16:
2412 ; AVX512DQ-BW: # %bb.0:
2413 ; AVX512DQ-BW-NEXT: movq {{[0-9]+}}(%rsp), %rax
2414 ; AVX512DQ-BW-NEXT: vmovdqa (%rdi), %ymm1
2415 ; AVX512DQ-BW-NEXT: vmovdqa 32(%rdi), %ymm0
2416 ; AVX512DQ-BW-NEXT: movw $18724, %r10w # imm = 0x4924
2417 ; AVX512DQ-BW-NEXT: kmovd %r10d, %k1
2418 ; AVX512DQ-BW-NEXT: vpblendmw %ymm0, %ymm1, %ymm2 {%k1}
2419 ; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm3 = xmm2[0,6,12],zero,zero,zero,xmm2[4,10],zero,zero,zero,xmm2[u,u,u,u,u]
2420 ; AVX512DQ-BW-NEXT: vextracti128 $1, %ymm2, %xmm4
2421 ; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm5 = zero,zero,zero,xmm4[2,8,14],zero,zero,xmm4[0,6,12,u,u,u,u,u]
2422 ; AVX512DQ-BW-NEXT: vpor %xmm3, %xmm5, %xmm3
2423 ; AVX512DQ-BW-NEXT: vmovdqa 80(%rdi), %xmm5
2424 ; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm6 = xmm5[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm5[4,10]
2425 ; AVX512DQ-BW-NEXT: vmovdqa 64(%rdi), %xmm7
2426 ; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm8 = xmm7[u,u,u,u,u,u,u,u,u,u,u,2,8,14],zero,zero
2427 ; AVX512DQ-BW-NEXT: vpor %xmm6, %xmm8, %xmm6
2428 ; AVX512DQ-BW-NEXT: movw $-2048, %di # imm = 0xF800
2429 ; AVX512DQ-BW-NEXT: kmovd %edi, %k2
2430 ; AVX512DQ-BW-NEXT: vmovdqu8 %xmm6, %xmm3 {%k2}
2431 ; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[1,7,13],zero,zero,zero,xmm2[5,11],zero,zero,zero,xmm2[u,u,u,u,u]
2432 ; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm4 = zero,zero,zero,xmm4[3,9,15],zero,zero,xmm4[1,7,13,u,u,u,u,u]
2433 ; AVX512DQ-BW-NEXT: vpor %xmm2, %xmm4, %xmm2
2434 ; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm4 = xmm5[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm5[5,11]
2435 ; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm6 = xmm7[u,u,u,u,u,u,u,u,u,u,u,3,9,15],zero,zero
2436 ; AVX512DQ-BW-NEXT: vpor %xmm4, %xmm6, %xmm4
2437 ; AVX512DQ-BW-NEXT: vmovdqu8 %xmm4, %xmm2 {%k2}
2438 ; AVX512DQ-BW-NEXT: movw $9362, %di # imm = 0x2492
2439 ; AVX512DQ-BW-NEXT: kmovd %edi, %k3
2440 ; AVX512DQ-BW-NEXT: vpblendmw %ymm1, %ymm0, %ymm4 {%k3}
2441 ; AVX512DQ-BW-NEXT: vextracti128 $1, %ymm4, %xmm6
2442 ; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm8 = zero,zero,zero,xmm6[4,10],zero,zero,zero,xmm6[2,8,14,u,u,u,u,u]
2443 ; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm9 = xmm4[2,8,14],zero,zero,xmm4[0,6,12],zero,zero,zero,xmm4[u,u,u,u,u]
2444 ; AVX512DQ-BW-NEXT: vpor %xmm8, %xmm9, %xmm8
2445 ; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm9 = xmm7[u,u,u,u,u,u,u,u,u,u,u,4,10],zero,zero,zero
2446 ; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm10 = xmm5[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm5[0,6,12]
2447 ; AVX512DQ-BW-NEXT: vpor %xmm9, %xmm10, %xmm9
2448 ; AVX512DQ-BW-NEXT: vmovdqu8 %xmm9, %xmm8 {%k2}
2449 ; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm6 = zero,zero,zero,xmm6[5,11],zero,zero,zero,xmm6[3,9,15,u,u,u,u,u]
2450 ; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[3,9,15],zero,zero,xmm4[1,7,13],zero,zero,zero,xmm4[u,u,u,u,u]
2451 ; AVX512DQ-BW-NEXT: vpor %xmm6, %xmm4, %xmm4
2452 ; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm6 = xmm7[u,u,u,u,u,u,u,u,u,u,u,5,11],zero,zero,zero
2453 ; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm9 = xmm5[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm5[1,7,13]
2454 ; AVX512DQ-BW-NEXT: vpor %xmm6, %xmm9, %xmm6
2455 ; AVX512DQ-BW-NEXT: vmovdqu8 %xmm6, %xmm4 {%k2}
2456 ; AVX512DQ-BW-NEXT: vmovdqa {{.*#+}} xmm6 = [128,128,0,6,12,128,128,128,4,10,128,128,128,2,8,14]
2457 ; AVX512DQ-BW-NEXT: vpshufb %xmm6, %xmm5, %xmm9
2458 ; AVX512DQ-BW-NEXT: vmovdqu16 %ymm1, %ymm0 {%k1}
2459 ; AVX512DQ-BW-NEXT: vextracti128 $1, %ymm0, %xmm1
2460 ; AVX512DQ-BW-NEXT: vpshufb %xmm6, %xmm1, %xmm6
2461 ; AVX512DQ-BW-NEXT: vpblendw {{.*#+}} xmm6 = xmm6[0,1,2,3,4],xmm9[5,6,7]
2462 ; AVX512DQ-BW-NEXT: vmovdqa {{.*#+}} xmm9 = [4,10,128,128,128,2,8,14,128,128,0,6,12,128,128,128]
2463 ; AVX512DQ-BW-NEXT: vpshufb %xmm9, %xmm7, %xmm10
2464 ; AVX512DQ-BW-NEXT: vpshufb %xmm9, %xmm0, %xmm9
2465 ; AVX512DQ-BW-NEXT: vpblendw {{.*#+}} xmm9 = xmm9[0,1,2,3,4],xmm10[5,6,7]
2466 ; AVX512DQ-BW-NEXT: vpor %xmm6, %xmm9, %xmm6
2467 ; AVX512DQ-BW-NEXT: vmovdqa {{.*#+}} xmm9 = [128,128,1,7,13,128,128,128,5,11,128,128,128,3,9,15]
2468 ; AVX512DQ-BW-NEXT: vpshufb %xmm9, %xmm5, %xmm5
2469 ; AVX512DQ-BW-NEXT: vpshufb %xmm9, %xmm1, %xmm1
2470 ; AVX512DQ-BW-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm5[5,6,7]
2471 ; AVX512DQ-BW-NEXT: vmovdqa {{.*#+}} xmm5 = [5,11,128,128,128,3,9,15,128,128,1,7,13,128,128,128]
2472 ; AVX512DQ-BW-NEXT: vpshufb %xmm5, %xmm7, %xmm7
2473 ; AVX512DQ-BW-NEXT: vpshufb %xmm5, %xmm0, %xmm0
2474 ; AVX512DQ-BW-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm7[5,6,7]
2475 ; AVX512DQ-BW-NEXT: vpor %xmm1, %xmm0, %xmm0
2476 ; AVX512DQ-BW-NEXT: vmovdqa %xmm3, (%rsi)
2477 ; AVX512DQ-BW-NEXT: vmovdqa %xmm2, (%rdx)
2478 ; AVX512DQ-BW-NEXT: vmovdqa %xmm8, (%rcx)
2479 ; AVX512DQ-BW-NEXT: vmovdqa %xmm4, (%r8)
2480 ; AVX512DQ-BW-NEXT: vmovdqa %xmm6, (%r9)
2481 ; AVX512DQ-BW-NEXT: vmovdqa %xmm0, (%rax)
2482 ; AVX512DQ-BW-NEXT: vzeroupper
2483 ; AVX512DQ-BW-NEXT: retq
2485 ; AVX512DQ-BW-FCP-LABEL: load_i8_stride6_vf16:
2486 ; AVX512DQ-BW-FCP: # %bb.0:
2487 ; AVX512DQ-BW-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
2488 ; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdi), %ymm1
2489 ; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%rdi), %ymm0
2490 ; AVX512DQ-BW-FCP-NEXT: movw $18724, %r10w # imm = 0x4924
2491 ; AVX512DQ-BW-FCP-NEXT: kmovd %r10d, %k1
2492 ; AVX512DQ-BW-FCP-NEXT: vpblendmw %ymm0, %ymm1, %ymm2 {%k1}
2493 ; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm2[0,6,12],zero,zero,zero,xmm2[4,10],zero,zero,zero,xmm2[u,u,u,u,u]
2494 ; AVX512DQ-BW-FCP-NEXT: vextracti128 $1, %ymm2, %xmm4
2495 ; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm5 = zero,zero,zero,xmm4[2,8,14],zero,zero,xmm4[0,6,12,u,u,u,u,u]
2496 ; AVX512DQ-BW-FCP-NEXT: vpor %xmm3, %xmm5, %xmm3
2497 ; AVX512DQ-BW-FCP-NEXT: vmovdqa 80(%rdi), %xmm5
2498 ; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm6 = xmm5[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm5[4,10]
2499 ; AVX512DQ-BW-FCP-NEXT: vmovdqa 64(%rdi), %xmm7
2500 ; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm8 = xmm7[u,u,u,u,u,u,u,u,u,u,u,2,8,14],zero,zero
2501 ; AVX512DQ-BW-FCP-NEXT: vpor %xmm6, %xmm8, %xmm6
2502 ; AVX512DQ-BW-FCP-NEXT: movw $-2048, %di # imm = 0xF800
2503 ; AVX512DQ-BW-FCP-NEXT: kmovd %edi, %k2
2504 ; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %xmm6, %xmm3 {%k2}
2505 ; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[1,7,13],zero,zero,zero,xmm2[5,11],zero,zero,zero,xmm2[u,u,u,u,u]
2506 ; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm4 = zero,zero,zero,xmm4[3,9,15],zero,zero,xmm4[1,7,13,u,u,u,u,u]
2507 ; AVX512DQ-BW-FCP-NEXT: vpor %xmm2, %xmm4, %xmm2
2508 ; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm4 = xmm5[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm5[5,11]
2509 ; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm6 = xmm7[u,u,u,u,u,u,u,u,u,u,u,3,9,15],zero,zero
2510 ; AVX512DQ-BW-FCP-NEXT: vpor %xmm4, %xmm6, %xmm4
2511 ; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %xmm4, %xmm2 {%k2}
2512 ; AVX512DQ-BW-FCP-NEXT: movw $9362, %di # imm = 0x2492
2513 ; AVX512DQ-BW-FCP-NEXT: kmovd %edi, %k3
2514 ; AVX512DQ-BW-FCP-NEXT: vpblendmw %ymm1, %ymm0, %ymm4 {%k3}
2515 ; AVX512DQ-BW-FCP-NEXT: vextracti128 $1, %ymm4, %xmm6
2516 ; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm8 = zero,zero,zero,xmm6[4,10],zero,zero,zero,xmm6[2,8,14,u,u,u,u,u]
2517 ; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm9 = xmm4[2,8,14],zero,zero,xmm4[0,6,12],zero,zero,zero,xmm4[u,u,u,u,u]
2518 ; AVX512DQ-BW-FCP-NEXT: vpor %xmm8, %xmm9, %xmm8
2519 ; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm9 = xmm7[u,u,u,u,u,u,u,u,u,u,u,4,10],zero,zero,zero
2520 ; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm10 = xmm5[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm5[0,6,12]
2521 ; AVX512DQ-BW-FCP-NEXT: vpor %xmm9, %xmm10, %xmm9
2522 ; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %xmm9, %xmm8 {%k2}
2523 ; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm6 = zero,zero,zero,xmm6[5,11],zero,zero,zero,xmm6[3,9,15,u,u,u,u,u]
2524 ; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[3,9,15],zero,zero,xmm4[1,7,13],zero,zero,zero,xmm4[u,u,u,u,u]
2525 ; AVX512DQ-BW-FCP-NEXT: vpor %xmm6, %xmm4, %xmm4
2526 ; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm6 = xmm7[u,u,u,u,u,u,u,u,u,u,u,5,11],zero,zero,zero
2527 ; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm9 = xmm5[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm5[1,7,13]
2528 ; AVX512DQ-BW-FCP-NEXT: vpor %xmm6, %xmm9, %xmm6
2529 ; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %xmm6, %xmm4 {%k2}
2530 ; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} xmm6 = [128,128,0,6,12,128,128,128,4,10,128,128,128,2,8,14]
2531 ; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm6, %xmm5, %xmm9
2532 ; AVX512DQ-BW-FCP-NEXT: vmovdqu16 %ymm1, %ymm0 {%k1}
2533 ; AVX512DQ-BW-FCP-NEXT: vextracti128 $1, %ymm0, %xmm1
2534 ; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm6, %xmm1, %xmm6
2535 ; AVX512DQ-BW-FCP-NEXT: vpblendw {{.*#+}} xmm6 = xmm6[0,1,2,3,4],xmm9[5,6,7]
2536 ; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} xmm9 = [4,10,128,128,128,2,8,14,128,128,0,6,12,128,128,128]
2537 ; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm9, %xmm7, %xmm10
2538 ; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm9, %xmm0, %xmm9
2539 ; AVX512DQ-BW-FCP-NEXT: vpblendw {{.*#+}} xmm9 = xmm9[0,1,2,3,4],xmm10[5,6,7]
2540 ; AVX512DQ-BW-FCP-NEXT: vpor %xmm6, %xmm9, %xmm6
2541 ; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} xmm9 = [128,128,1,7,13,128,128,128,5,11,128,128,128,3,9,15]
2542 ; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm9, %xmm5, %xmm5
2543 ; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm9, %xmm1, %xmm1
2544 ; AVX512DQ-BW-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm5[5,6,7]
2545 ; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} xmm5 = [5,11,128,128,128,3,9,15,128,128,1,7,13,128,128,128]
2546 ; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm5, %xmm7, %xmm7
2547 ; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm5, %xmm0, %xmm0
2548 ; AVX512DQ-BW-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm7[5,6,7]
2549 ; AVX512DQ-BW-FCP-NEXT: vpor %xmm1, %xmm0, %xmm0
2550 ; AVX512DQ-BW-FCP-NEXT: vmovdqa %xmm3, (%rsi)
2551 ; AVX512DQ-BW-FCP-NEXT: vmovdqa %xmm2, (%rdx)
2552 ; AVX512DQ-BW-FCP-NEXT: vmovdqa %xmm8, (%rcx)
2553 ; AVX512DQ-BW-FCP-NEXT: vmovdqa %xmm4, (%r8)
2554 ; AVX512DQ-BW-FCP-NEXT: vmovdqa %xmm6, (%r9)
2555 ; AVX512DQ-BW-FCP-NEXT: vmovdqa %xmm0, (%rax)
2556 ; AVX512DQ-BW-FCP-NEXT: vzeroupper
2557 ; AVX512DQ-BW-FCP-NEXT: retq
2558 %wide.vec = load <96 x i8>, ptr %in.vec, align 64
2559 %strided.vec0 = shufflevector <96 x i8> %wide.vec, <96 x i8> poison, <16 x i32> <i32 0, i32 6, i32 12, i32 18, i32 24, i32 30, i32 36, i32 42, i32 48, i32 54, i32 60, i32 66, i32 72, i32 78, i32 84, i32 90>
2560 %strided.vec1 = shufflevector <96 x i8> %wide.vec, <96 x i8> poison, <16 x i32> <i32 1, i32 7, i32 13, i32 19, i32 25, i32 31, i32 37, i32 43, i32 49, i32 55, i32 61, i32 67, i32 73, i32 79, i32 85, i32 91>
2561 %strided.vec2 = shufflevector <96 x i8> %wide.vec, <96 x i8> poison, <16 x i32> <i32 2, i32 8, i32 14, i32 20, i32 26, i32 32, i32 38, i32 44, i32 50, i32 56, i32 62, i32 68, i32 74, i32 80, i32 86, i32 92>
2562 %strided.vec3 = shufflevector <96 x i8> %wide.vec, <96 x i8> poison, <16 x i32> <i32 3, i32 9, i32 15, i32 21, i32 27, i32 33, i32 39, i32 45, i32 51, i32 57, i32 63, i32 69, i32 75, i32 81, i32 87, i32 93>
2563 %strided.vec4 = shufflevector <96 x i8> %wide.vec, <96 x i8> poison, <16 x i32> <i32 4, i32 10, i32 16, i32 22, i32 28, i32 34, i32 40, i32 46, i32 52, i32 58, i32 64, i32 70, i32 76, i32 82, i32 88, i32 94>
2564 %strided.vec5 = shufflevector <96 x i8> %wide.vec, <96 x i8> poison, <16 x i32> <i32 5, i32 11, i32 17, i32 23, i32 29, i32 35, i32 41, i32 47, i32 53, i32 59, i32 65, i32 71, i32 77, i32 83, i32 89, i32 95>
2565 store <16 x i8> %strided.vec0, ptr %out.vec0, align 64
2566 store <16 x i8> %strided.vec1, ptr %out.vec1, align 64
2567 store <16 x i8> %strided.vec2, ptr %out.vec2, align 64
2568 store <16 x i8> %strided.vec3, ptr %out.vec3, align 64
2569 store <16 x i8> %strided.vec4, ptr %out.vec4, align 64
2570 store <16 x i8> %strided.vec5, ptr %out.vec5, align 64
2574 define void @load_i8_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2, ptr %out.vec3, ptr %out.vec4, ptr %out.vec5) nounwind {
2575 ; SSE-LABEL: load_i8_stride6_vf32:
2577 ; SSE-NEXT: subq $264, %rsp # imm = 0x108
2578 ; SSE-NEXT: movdqa 64(%rdi), %xmm7
2579 ; SSE-NEXT: movdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2580 ; SSE-NEXT: movdqa 80(%rdi), %xmm9
2581 ; SSE-NEXT: movdqa (%rdi), %xmm12
2582 ; SSE-NEXT: movdqa 16(%rdi), %xmm14
2583 ; SSE-NEXT: movdqa 32(%rdi), %xmm1
2584 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2585 ; SSE-NEXT: movdqa 48(%rdi), %xmm5
2586 ; SSE-NEXT: movdqa {{.*#+}} xmm10 = [65535,65535,0,65535,65535,0,65535,65535]
2587 ; SSE-NEXT: movdqa %xmm10, %xmm0
2588 ; SSE-NEXT: pandn %xmm1, %xmm0
2589 ; SSE-NEXT: movdqa {{.*#+}} xmm11 = [65535,0,65535,65535,0,65535,65535,0]
2590 ; SSE-NEXT: movdqa %xmm11, %xmm1
2591 ; SSE-NEXT: pandn %xmm5, %xmm1
2592 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2593 ; SSE-NEXT: movdqa %xmm10, %xmm1
2594 ; SSE-NEXT: pandn %xmm5, %xmm1
2595 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2596 ; SSE-NEXT: movdqa %xmm5, %xmm15
2597 ; SSE-NEXT: pand %xmm10, %xmm15
2598 ; SSE-NEXT: por %xmm0, %xmm15
2599 ; SSE-NEXT: movdqa {{.*#+}} xmm1 = [255,255,255,255,255,255,255,255]
2600 ; SSE-NEXT: movdqa %xmm15, %xmm0
2601 ; SSE-NEXT: pand %xmm1, %xmm0
2602 ; SSE-NEXT: movdqa %xmm1, %xmm3
2603 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm0[0,3,2,3,4,5,6,7]
2604 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,3,2,3]
2605 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,1]
2606 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6,5]
2607 ; SSE-NEXT: packuswb %xmm1, %xmm0
2608 ; SSE-NEXT: movdqa {{.*#+}} xmm2 = [65535,65535,65535,0,0,0,65535,65535]
2609 ; SSE-NEXT: movdqa %xmm11, %xmm1
2610 ; SSE-NEXT: pandn %xmm14, %xmm1
2611 ; SSE-NEXT: movdqa %xmm12, %xmm8
2612 ; SSE-NEXT: pand %xmm11, %xmm8
2613 ; SSE-NEXT: por %xmm1, %xmm8
2614 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm8[0,2,1,3]
2615 ; SSE-NEXT: pand %xmm3, %xmm1
2616 ; SSE-NEXT: movdqa %xmm3, %xmm6
2617 ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,5,6,7]
2618 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,1,3]
2619 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,3,2,1,4,5,6,7]
2620 ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,7,6,7]
2621 ; SSE-NEXT: packuswb %xmm1, %xmm1
2622 ; SSE-NEXT: pand %xmm2, %xmm1
2623 ; SSE-NEXT: movdqa %xmm2, %xmm3
2624 ; SSE-NEXT: movdqa %xmm2, %xmm5
2625 ; SSE-NEXT: pandn %xmm0, %xmm3
2626 ; SSE-NEXT: por %xmm3, %xmm1
2627 ; SSE-NEXT: movdqa %xmm10, %xmm0
2628 ; SSE-NEXT: pandn %xmm9, %xmm0
2629 ; SSE-NEXT: pand %xmm10, %xmm7
2630 ; SSE-NEXT: por %xmm0, %xmm7
2631 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm7[3,1,2,0]
2632 ; SSE-NEXT: pand %xmm6, %xmm0
2633 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[2,1,2,3,4,5,6,7]
2634 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,3,2,0]
2635 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,7,6,5]
2636 ; SSE-NEXT: packuswb %xmm0, %xmm0
2637 ; SSE-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0]
2638 ; SSE-NEXT: movdqa %xmm3, %xmm2
2639 ; SSE-NEXT: pandn %xmm0, %xmm2
2640 ; SSE-NEXT: pand %xmm3, %xmm1
2641 ; SSE-NEXT: por %xmm1, %xmm2
2642 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2643 ; SSE-NEXT: movdqa 128(%rdi), %xmm1
2644 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2645 ; SSE-NEXT: movdqa %xmm10, %xmm0
2646 ; SSE-NEXT: pandn %xmm1, %xmm0
2647 ; SSE-NEXT: movdqa 144(%rdi), %xmm1
2648 ; SSE-NEXT: movdqa %xmm11, %xmm2
2649 ; SSE-NEXT: pandn %xmm1, %xmm2
2650 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2651 ; SSE-NEXT: movdqa %xmm10, %xmm2
2652 ; SSE-NEXT: pandn %xmm1, %xmm2
2653 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2654 ; SSE-NEXT: movdqa %xmm1, %xmm2
2655 ; SSE-NEXT: pand %xmm10, %xmm2
2656 ; SSE-NEXT: por %xmm0, %xmm2
2657 ; SSE-NEXT: movdqa %xmm2, %xmm0
2658 ; SSE-NEXT: pand %xmm6, %xmm0
2659 ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm0[0,3,2,3,4,5,6,7]
2660 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,3,2,3]
2661 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,1]
2662 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6,5]
2663 ; SSE-NEXT: packuswb %xmm3, %xmm0
2664 ; SSE-NEXT: movdqa %xmm5, %xmm6
2665 ; SSE-NEXT: pandn %xmm0, %xmm6
2666 ; SSE-NEXT: movdqa %xmm10, %xmm1
2667 ; SSE-NEXT: movdqa %xmm10, %xmm0
2668 ; SSE-NEXT: pandn %xmm12, %xmm0
2669 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2670 ; SSE-NEXT: movdqa 112(%rdi), %xmm0
2671 ; SSE-NEXT: movdqa %xmm11, %xmm3
2672 ; SSE-NEXT: pandn %xmm0, %xmm3
2673 ; SSE-NEXT: movdqa 160(%rdi), %xmm5
2674 ; SSE-NEXT: movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2675 ; SSE-NEXT: pand %xmm10, %xmm5
2676 ; SSE-NEXT: movdqa %xmm10, %xmm4
2677 ; SSE-NEXT: pandn %xmm14, %xmm4
2678 ; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2679 ; SSE-NEXT: pand %xmm10, %xmm12
2680 ; SSE-NEXT: movdqa %xmm11, %xmm4
2681 ; SSE-NEXT: pandn %xmm9, %xmm4
2682 ; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2683 ; SSE-NEXT: movdqa %xmm9, %xmm11
2684 ; SSE-NEXT: pand %xmm10, %xmm11
2685 ; SSE-NEXT: movdqa %xmm10, %xmm4
2686 ; SSE-NEXT: pandn %xmm0, %xmm4
2687 ; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2688 ; SSE-NEXT: movdqa 96(%rdi), %xmm13
2689 ; SSE-NEXT: movdqa %xmm13, %xmm4
2690 ; SSE-NEXT: pand %xmm10, %xmm4
2691 ; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2692 ; SSE-NEXT: movdqa 176(%rdi), %xmm4
2693 ; SSE-NEXT: movdqa %xmm4, %xmm10
2694 ; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2695 ; SSE-NEXT: pand %xmm1, %xmm10
2696 ; SSE-NEXT: movdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2697 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
2698 ; SSE-NEXT: movdqa %xmm9, %xmm10
2699 ; SSE-NEXT: pand %xmm1, %xmm9
2700 ; SSE-NEXT: movdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2701 ; SSE-NEXT: pand %xmm1, %xmm14
2702 ; SSE-NEXT: movdqa %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2703 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
2704 ; SSE-NEXT: movdqa %xmm14, %xmm9
2705 ; SSE-NEXT: pand %xmm1, %xmm14
2706 ; SSE-NEXT: movdqa %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2707 ; SSE-NEXT: pand %xmm1, %xmm0
2708 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2709 ; SSE-NEXT: movdqa %xmm1, %xmm14
2710 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2711 ; SSE-NEXT: movdqa %xmm1, (%rsp) # 16-byte Spill
2712 ; SSE-NEXT: pandn %xmm13, %xmm1
2713 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2714 ; SSE-NEXT: movdqa %xmm13, %xmm1
2715 ; SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
2716 ; SSE-NEXT: por %xmm3, %xmm1
2717 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[0,2,1,3]
2718 ; SSE-NEXT: movdqa {{.*#+}} xmm0 = [255,255,255,255,255,255,255,255]
2719 ; SSE-NEXT: pand %xmm0, %xmm3
2720 ; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,6,5,6,7]
2721 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,1,3]
2722 ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,3,2,1,4,5,6,7]
2723 ; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,7,6,7]
2724 ; SSE-NEXT: packuswb %xmm3, %xmm3
2725 ; SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3
2726 ; SSE-NEXT: por %xmm6, %xmm3
2727 ; SSE-NEXT: pandn %xmm4, %xmm14
2728 ; SSE-NEXT: por %xmm14, %xmm5
2729 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm5[3,1,2,0]
2730 ; SSE-NEXT: pand %xmm0, %xmm4
2731 ; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[2,1,2,3,4,5,6,7]
2732 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,3,2,0]
2733 ; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,7,6,5]
2734 ; SSE-NEXT: packuswb %xmm4, %xmm4
2735 ; SSE-NEXT: movdqa {{.*#+}} xmm13 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0]
2736 ; SSE-NEXT: movdqa %xmm13, %xmm0
2737 ; SSE-NEXT: pandn %xmm4, %xmm0
2738 ; SSE-NEXT: pand %xmm13, %xmm3
2739 ; SSE-NEXT: por %xmm3, %xmm0
2740 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2741 ; SSE-NEXT: pxor %xmm4, %xmm4
2742 ; SSE-NEXT: movdqa %xmm15, %xmm3
2743 ; SSE-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm4[8],xmm3[9],xmm4[9],xmm3[10],xmm4[10],xmm3[11],xmm4[11],xmm3[12],xmm4[12],xmm3[13],xmm4[13],xmm3[14],xmm4[14],xmm3[15],xmm4[15]
2744 ; SSE-NEXT: punpcklbw {{.*#+}} xmm15 = xmm15[0],xmm4[0],xmm15[1],xmm4[1],xmm15[2],xmm4[2],xmm15[3],xmm4[3],xmm15[4],xmm4[4],xmm15[5],xmm4[5],xmm15[6],xmm4[6],xmm15[7],xmm4[7]
2745 ; SSE-NEXT: pshufd {{.*#+}} xmm14 = xmm15[2,2,3,3]
2746 ; SSE-NEXT: punpcklwd {{.*#+}} xmm14 = xmm14[0],xmm3[0],xmm14[1],xmm3[1],xmm14[2],xmm3[2],xmm14[3],xmm3[3]
2747 ; SSE-NEXT: psrld $16, %xmm3
2748 ; SSE-NEXT: pshufd {{.*#+}} xmm15 = xmm15[0,1,0,3]
2749 ; SSE-NEXT: pshufhw {{.*#+}} xmm15 = xmm15[0,1,2,3,5,7,6,7]
2750 ; SSE-NEXT: punpckhdq {{.*#+}} xmm15 = xmm15[2],xmm3[2],xmm15[3],xmm3[3]
2751 ; SSE-NEXT: packuswb %xmm15, %xmm14
2752 ; SSE-NEXT: movdqa {{.*#+}} xmm6 = [65535,65535,65535,0,0,0,65535,65535]
2753 ; SSE-NEXT: movdqa %xmm6, %xmm3
2754 ; SSE-NEXT: pandn %xmm14, %xmm3
2755 ; SSE-NEXT: movdqa %xmm8, %xmm14
2756 ; SSE-NEXT: punpckhbw {{.*#+}} xmm14 = xmm14[8],xmm4[8],xmm14[9],xmm4[9],xmm14[10],xmm4[10],xmm14[11],xmm4[11],xmm14[12],xmm4[12],xmm14[13],xmm4[13],xmm14[14],xmm4[14],xmm14[15],xmm4[15]
2757 ; SSE-NEXT: pshufd {{.*#+}} xmm14 = xmm14[2,1,0,3]
2758 ; SSE-NEXT: pshuflw {{.*#+}} xmm14 = xmm14[1,1,1,1,4,5,6,7]
2759 ; SSE-NEXT: pshufhw {{.*#+}} xmm14 = xmm14[0,1,2,3,5,7,6,7]
2760 ; SSE-NEXT: movdqa {{.*#+}} xmm15 = [65535,65535,0,65535,0,0,65535,65535]
2761 ; SSE-NEXT: movdqa %xmm15, %xmm0
2762 ; SSE-NEXT: pandn %xmm14, %xmm0
2763 ; SSE-NEXT: punpcklbw {{.*#+}} xmm8 = xmm8[0],xmm4[0],xmm8[1],xmm4[1],xmm8[2],xmm4[2],xmm8[3],xmm4[3],xmm8[4],xmm4[4],xmm8[5],xmm4[5],xmm8[6],xmm4[6],xmm8[7],xmm4[7]
2764 ; SSE-NEXT: pshuflw {{.*#+}} xmm8 = xmm8[3,1,2,3,4,5,6,7]
2765 ; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm8[0,3,2,3]
2766 ; SSE-NEXT: pshuflw {{.*#+}} xmm14 = xmm8[1,3,2,0,4,5,6,7]
2767 ; SSE-NEXT: pand %xmm15, %xmm14
2768 ; SSE-NEXT: por %xmm0, %xmm14
2769 ; SSE-NEXT: packuswb %xmm14, %xmm14
2770 ; SSE-NEXT: pand %xmm6, %xmm14
2771 ; SSE-NEXT: por %xmm3, %xmm14
2772 ; SSE-NEXT: movdqa %xmm7, %xmm0
2773 ; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
2774 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,5,5,5]
2775 ; SSE-NEXT: movdqa {{.*#+}} xmm8 = [65535,65535,65535,0,65535,65535,0,65535]
2776 ; SSE-NEXT: movdqa %xmm8, %xmm3
2777 ; SSE-NEXT: pandn %xmm0, %xmm3
2778 ; SSE-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm4[8],xmm7[9],xmm4[9],xmm7[10],xmm4[10],xmm7[11],xmm4[11],xmm7[12],xmm4[12],xmm7[13],xmm4[13],xmm7[14],xmm4[14],xmm7[15],xmm4[15]
2779 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm7[3,1,2,3,4,5,6,7]
2780 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,3]
2781 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,7,6,4]
2782 ; SSE-NEXT: pand %xmm8, %xmm0
2783 ; SSE-NEXT: por %xmm3, %xmm0
2784 ; SSE-NEXT: packuswb %xmm0, %xmm0
2785 ; SSE-NEXT: movdqa %xmm13, %xmm3
2786 ; SSE-NEXT: pandn %xmm0, %xmm3
2787 ; SSE-NEXT: pand %xmm13, %xmm14
2788 ; SSE-NEXT: por %xmm14, %xmm3
2789 ; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2790 ; SSE-NEXT: movdqa %xmm2, %xmm0
2791 ; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm4[8],xmm0[9],xmm4[9],xmm0[10],xmm4[10],xmm0[11],xmm4[11],xmm0[12],xmm4[12],xmm0[13],xmm4[13],xmm0[14],xmm4[14],xmm0[15],xmm4[15]
2792 ; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3],xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7]
2793 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm2[2,2,3,3]
2794 ; SSE-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3]
2795 ; SSE-NEXT: psrld $16, %xmm0
2796 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,0,3]
2797 ; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,5,7,6,7]
2798 ; SSE-NEXT: punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm0[2],xmm2[3],xmm0[3]
2799 ; SSE-NEXT: packuswb %xmm2, %xmm3
2800 ; SSE-NEXT: movdqa %xmm1, %xmm0
2801 ; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm4[8],xmm0[9],xmm4[9],xmm0[10],xmm4[10],xmm0[11],xmm4[11],xmm0[12],xmm4[12],xmm0[13],xmm4[13],xmm0[14],xmm4[14],xmm0[15],xmm4[15]
2802 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,1,0,3]
2803 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,1,1,1,4,5,6,7]
2804 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,7,6,7]
2805 ; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
2806 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[3,1,2,3,4,5,6,7]
2807 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,3,2,3]
2808 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[1,3,2,0,4,5,6,7]
2809 ; SSE-NEXT: pand %xmm15, %xmm1
2810 ; SSE-NEXT: pandn %xmm0, %xmm15
2811 ; SSE-NEXT: por %xmm1, %xmm15
2812 ; SSE-NEXT: packuswb %xmm15, %xmm15
2813 ; SSE-NEXT: pand %xmm6, %xmm15
2814 ; SSE-NEXT: pandn %xmm3, %xmm6
2815 ; SSE-NEXT: por %xmm6, %xmm15
2816 ; SSE-NEXT: movdqa %xmm5, %xmm0
2817 ; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
2818 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,5,5,5]
2819 ; SSE-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm4[8],xmm5[9],xmm4[9],xmm5[10],xmm4[10],xmm5[11],xmm4[11],xmm5[12],xmm4[12],xmm5[13],xmm4[13],xmm5[14],xmm4[14],xmm5[15],xmm4[15]
2820 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm5[3,1,2,3,4,5,6,7]
2821 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,3]
2822 ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,7,6,4]
2823 ; SSE-NEXT: pand %xmm8, %xmm1
2824 ; SSE-NEXT: pandn %xmm0, %xmm8
2825 ; SSE-NEXT: por %xmm1, %xmm8
2826 ; SSE-NEXT: packuswb %xmm8, %xmm0
2827 ; SSE-NEXT: movdqa %xmm13, %xmm1
2828 ; SSE-NEXT: pandn %xmm0, %xmm1
2829 ; SSE-NEXT: pand %xmm13, %xmm15
2830 ; SSE-NEXT: movdqa %xmm13, %xmm7
2831 ; SSE-NEXT: por %xmm15, %xmm1
2832 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2833 ; SSE-NEXT: movdqa {{.*#+}} xmm5 = [65535,0,65535,65535,0,65535,65535,0]
2834 ; SSE-NEXT: pand %xmm5, %xmm10
2835 ; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Folded Reload
2836 ; SSE-NEXT: movdqa %xmm10, %xmm0
2837 ; SSE-NEXT: movdqa {{.*#+}} xmm15 = [255,255,255,255,255,255,255,255]
2838 ; SSE-NEXT: pand %xmm15, %xmm0
2839 ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm0[0,1,2,3,4,7,6,7]
2840 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
2841 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[1,2,3,3,4,5,6,7]
2842 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,3,2,3,4,5,6,7]
2843 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,3]
2844 ; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm0[0,1,2,3,4,4,5,6]
2845 ; SSE-NEXT: packuswb %xmm1, %xmm2
2846 ; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Folded Reload
2847 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm12[2,1,2,3,4,5,6,7]
2848 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,4,7]
2849 ; SSE-NEXT: pand %xmm15, %xmm0
2850 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,3,2,3]
2851 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,2,3,0,4,5,6,7]
2852 ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm0[0,1,2,3,5,5,5,5]
2853 ; SSE-NEXT: packuswb %xmm1, %xmm1
2854 ; SSE-NEXT: movdqa {{.*#+}} xmm0 = [0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255]
2855 ; SSE-NEXT: movdqa %xmm0, %xmm3
2856 ; SSE-NEXT: pandn %xmm1, %xmm3
2857 ; SSE-NEXT: pand %xmm0, %xmm2
2858 ; SSE-NEXT: por %xmm2, %xmm3
2859 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
2860 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
2861 ; SSE-NEXT: pandn %xmm14, %xmm1
2862 ; SSE-NEXT: por %xmm1, %xmm11
2863 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm11[0,3,2,3,4,5,6,7]
2864 ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,5,6,7]
2865 ; SSE-NEXT: pand %xmm15, %xmm1
2866 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,2,0]
2867 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[2,2,2,2,4,5,6,7]
2868 ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,6,7,4]
2869 ; SSE-NEXT: packuswb %xmm1, %xmm1
2870 ; SSE-NEXT: movdqa %xmm13, %xmm2
2871 ; SSE-NEXT: pandn %xmm1, %xmm2
2872 ; SSE-NEXT: pand %xmm13, %xmm3
2873 ; SSE-NEXT: por %xmm3, %xmm2
2874 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2875 ; SSE-NEXT: pand %xmm5, %xmm9
2876 ; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Folded Reload
2877 ; SSE-NEXT: movdqa %xmm9, %xmm1
2878 ; SSE-NEXT: pand %xmm15, %xmm1
2879 ; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm1[0,1,2,3,4,7,6,7]
2880 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
2881 ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[1,2,3,3,4,5,6,7]
2882 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,3,2,3,4,5,6,7]
2883 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,3]
2884 ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,4,5,6]
2885 ; SSE-NEXT: packuswb %xmm2, %xmm1
2886 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
2887 ; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Folded Reload
2888 ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm13[2,1,2,3,4,5,6,7]
2889 ; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,4,7]
2890 ; SSE-NEXT: pand %xmm15, %xmm2
2891 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,3,2,3]
2892 ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[1,2,3,0,4,5,6,7]
2893 ; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,5,5,5,5]
2894 ; SSE-NEXT: packuswb %xmm2, %xmm2
2895 ; SSE-NEXT: movdqa %xmm0, %xmm3
2896 ; SSE-NEXT: pandn %xmm2, %xmm3
2897 ; SSE-NEXT: pand %xmm0, %xmm1
2898 ; SSE-NEXT: por %xmm1, %xmm3
2899 ; SSE-NEXT: movdqa (%rsp), %xmm1 # 16-byte Reload
2900 ; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
2901 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
2902 ; SSE-NEXT: por %xmm1, %xmm8
2903 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm8[0,3,2,3,4,5,6,7]
2904 ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,5,6,7]
2905 ; SSE-NEXT: pand %xmm15, %xmm1
2906 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,2,0]
2907 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[2,2,2,2,4,5,6,7]
2908 ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,6,7,4]
2909 ; SSE-NEXT: packuswb %xmm1, %xmm1
2910 ; SSE-NEXT: movdqa %xmm7, %xmm2
2911 ; SSE-NEXT: pandn %xmm1, %xmm2
2912 ; SSE-NEXT: pand %xmm7, %xmm3
2913 ; SSE-NEXT: por %xmm3, %xmm2
2914 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2915 ; SSE-NEXT: movdqa %xmm10, %xmm1
2916 ; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
2917 ; SSE-NEXT: punpckhbw {{.*#+}} xmm10 = xmm10[8],xmm4[8],xmm10[9],xmm4[9],xmm10[10],xmm4[10],xmm10[11],xmm4[11],xmm10[12],xmm4[12],xmm10[13],xmm4[13],xmm10[14],xmm4[14],xmm10[15],xmm4[15]
2918 ; SSE-NEXT: movdqa %xmm10, %xmm2
2919 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[2,0],xmm1[3,0]
2920 ; SSE-NEXT: movaps %xmm1, %xmm3
2921 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,1],xmm2[0,2]
2922 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,0],xmm10[0,0]
2923 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm10[2,3]
2924 ; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm3[0,1,2,3,7,5,6,7]
2925 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,0,2]
2926 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[3,1,2,3,4,5,6,7]
2927 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,3,2,3]
2928 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,3,3,4,5,6,7]
2929 ; SSE-NEXT: packuswb %xmm1, %xmm2
2930 ; SSE-NEXT: movdqa %xmm12, %xmm1
2931 ; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
2932 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,1,2,3]
2933 ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm1[3,1,2,1,4,5,6,7]
2934 ; SSE-NEXT: movdqa {{.*#+}} xmm1 = [0,65535,65535,0,65535,65535,65535,65535]
2935 ; SSE-NEXT: movdqa %xmm1, %xmm5
2936 ; SSE-NEXT: pandn %xmm3, %xmm5
2937 ; SSE-NEXT: punpckhbw {{.*#+}} xmm12 = xmm12[8],xmm4[8],xmm12[9],xmm4[9],xmm12[10],xmm4[10],xmm12[11],xmm4[11],xmm12[12],xmm4[12],xmm12[13],xmm4[13],xmm12[14],xmm4[14],xmm12[15],xmm4[15]
2938 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm12[0,3,2,1]
2939 ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,1,3,3,4,5,6,7]
2940 ; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,7,7,7,7]
2941 ; SSE-NEXT: pand %xmm1, %xmm3
2942 ; SSE-NEXT: por %xmm5, %xmm3
2943 ; SSE-NEXT: packuswb %xmm3, %xmm3
2944 ; SSE-NEXT: movdqa %xmm0, %xmm5
2945 ; SSE-NEXT: pandn %xmm3, %xmm5
2946 ; SSE-NEXT: pand %xmm0, %xmm2
2947 ; SSE-NEXT: por %xmm2, %xmm5
2948 ; SSE-NEXT: movdqa %xmm11, %xmm2
2949 ; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm4[8],xmm2[9],xmm4[9],xmm2[10],xmm4[10],xmm2[11],xmm4[11],xmm2[12],xmm4[12],xmm2[13],xmm4[13],xmm2[14],xmm4[14],xmm2[15],xmm4[15]
2950 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,2,1]
2951 ; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm2[0,1,2,3,7,5,6,5]
2952 ; SSE-NEXT: movdqa {{.*#+}} xmm2 = [65535,65535,65535,65535,0,65535,65535,0]
2953 ; SSE-NEXT: movdqa %xmm2, %xmm6
2954 ; SSE-NEXT: pandn %xmm3, %xmm6
2955 ; SSE-NEXT: punpcklbw {{.*#+}} xmm11 = xmm11[0],xmm4[0],xmm11[1],xmm4[1],xmm11[2],xmm4[2],xmm11[3],xmm4[3],xmm11[4],xmm4[4],xmm11[5],xmm4[5],xmm11[6],xmm4[6],xmm11[7],xmm4[7]
2956 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm11[0,2,0,3]
2957 ; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5,7,7]
2958 ; SSE-NEXT: pand %xmm2, %xmm3
2959 ; SSE-NEXT: por %xmm6, %xmm3
2960 ; SSE-NEXT: packuswb %xmm3, %xmm3
2961 ; SSE-NEXT: movdqa %xmm7, %xmm6
2962 ; SSE-NEXT: pandn %xmm3, %xmm6
2963 ; SSE-NEXT: pand %xmm7, %xmm5
2964 ; SSE-NEXT: por %xmm5, %xmm6
2965 ; SSE-NEXT: movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2966 ; SSE-NEXT: movdqa %xmm9, %xmm3
2967 ; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3],xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
2968 ; SSE-NEXT: punpckhbw {{.*#+}} xmm9 = xmm9[8],xmm4[8],xmm9[9],xmm4[9],xmm9[10],xmm4[10],xmm9[11],xmm4[11],xmm9[12],xmm4[12],xmm9[13],xmm4[13],xmm9[14],xmm4[14],xmm9[15],xmm4[15]
2969 ; SSE-NEXT: movdqa %xmm9, %xmm5
2970 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[2,0],xmm3[3,0]
2971 ; SSE-NEXT: movaps %xmm3, %xmm6
2972 ; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,1],xmm5[0,2]
2973 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,0],xmm9[0,0]
2974 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[2,0],xmm9[2,3]
2975 ; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm6[0,1,2,3,7,5,6,7]
2976 ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,1,0,2]
2977 ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[3,1,2,3,4,5,6,7]
2978 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,3,2,3]
2979 ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,1,3,3,4,5,6,7]
2980 ; SSE-NEXT: packuswb %xmm3, %xmm5
2981 ; SSE-NEXT: movdqa %xmm13, %xmm3
2982 ; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3],xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
2983 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,1,2,3]
2984 ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[3,1,2,1,4,5,6,7]
2985 ; SSE-NEXT: punpckhbw {{.*#+}} xmm13 = xmm13[8],xmm4[8],xmm13[9],xmm4[9],xmm13[10],xmm4[10],xmm13[11],xmm4[11],xmm13[12],xmm4[12],xmm13[13],xmm4[13],xmm13[14],xmm4[14],xmm13[15],xmm4[15]
2986 ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm13[0,3,2,1]
2987 ; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[0,1,3,3,4,5,6,7]
2988 ; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,7,7,7,7]
2989 ; SSE-NEXT: pand %xmm1, %xmm6
2990 ; SSE-NEXT: pandn %xmm3, %xmm1
2991 ; SSE-NEXT: por %xmm6, %xmm1
2992 ; SSE-NEXT: pand %xmm0, %xmm5
2993 ; SSE-NEXT: packuswb %xmm1, %xmm1
2994 ; SSE-NEXT: pandn %xmm1, %xmm0
2995 ; SSE-NEXT: por %xmm5, %xmm0
2996 ; SSE-NEXT: movdqa %xmm8, %xmm1
2997 ; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm4[8],xmm1[9],xmm4[9],xmm1[10],xmm4[10],xmm1[11],xmm4[11],xmm1[12],xmm4[12],xmm1[13],xmm4[13],xmm1[14],xmm4[14],xmm1[15],xmm4[15]
2998 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,2,1]
2999 ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,5,6,5]
3000 ; SSE-NEXT: punpcklbw {{.*#+}} xmm8 = xmm8[0],xmm4[0],xmm8[1],xmm4[1],xmm8[2],xmm4[2],xmm8[3],xmm4[3],xmm8[4],xmm4[4],xmm8[5],xmm4[5],xmm8[6],xmm4[6],xmm8[7],xmm4[7]
3001 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm8[0,2,0,3]
3002 ; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5,7,7]
3003 ; SSE-NEXT: pand %xmm2, %xmm3
3004 ; SSE-NEXT: pandn %xmm1, %xmm2
3005 ; SSE-NEXT: por %xmm3, %xmm2
3006 ; SSE-NEXT: movdqa %xmm7, %xmm13
3007 ; SSE-NEXT: pand %xmm7, %xmm0
3008 ; SSE-NEXT: packuswb %xmm2, %xmm1
3009 ; SSE-NEXT: pandn %xmm1, %xmm13
3010 ; SSE-NEXT: por %xmm0, %xmm13
3011 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
3012 ; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload
3013 ; SSE-NEXT: movdqa %xmm7, %xmm0
3014 ; SSE-NEXT: pand %xmm15, %xmm0
3015 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,1,2,3]
3016 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[2,1,2,3,4,5,6,7]
3017 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,7,6,7]
3018 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,2]
3019 ; SSE-NEXT: packuswb %xmm1, %xmm0
3020 ; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,0,0,0,0,0,255,255,255,255,255,255]
3021 ; SSE-NEXT: movdqa %xmm2, %xmm1
3022 ; SSE-NEXT: pandn %xmm0, %xmm1
3023 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
3024 ; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Folded Reload
3025 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm9[3,1,2,0]
3026 ; SSE-NEXT: pand %xmm15, %xmm0
3027 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,6,7]
3028 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,1,0,3]
3029 ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm0[2,1,0,3,4,5,6,7]
3030 ; SSE-NEXT: packuswb %xmm3, %xmm3
3031 ; SSE-NEXT: pand %xmm2, %xmm3
3032 ; SSE-NEXT: por %xmm1, %xmm3
3033 ; SSE-NEXT: movdqa %xmm14, %xmm11
3034 ; SSE-NEXT: movdqa {{.*#+}} xmm12 = [65535,0,65535,65535,0,65535,65535,0]
3035 ; SSE-NEXT: pand %xmm12, %xmm11
3036 ; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Folded Reload
3037 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm11[0,2,1,3]
3038 ; SSE-NEXT: pand %xmm15, %xmm0
3039 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,2,1,4,5,6,7]
3040 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
3041 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,0,3,4,5,6,7]
3042 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,4,7]
3043 ; SSE-NEXT: packuswb %xmm0, %xmm5
3044 ; SSE-NEXT: movdqa {{.*#+}} xmm0 = [65535,65535,65535,65535,65535,0,0,0]
3045 ; SSE-NEXT: movdqa %xmm0, %xmm8
3046 ; SSE-NEXT: pandn %xmm5, %xmm8
3047 ; SSE-NEXT: pand %xmm0, %xmm3
3048 ; SSE-NEXT: por %xmm3, %xmm8
3049 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
3050 ; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Folded Reload
3051 ; SSE-NEXT: movdqa %xmm14, %xmm3
3052 ; SSE-NEXT: pand %xmm15, %xmm3
3053 ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm3[2,1,2,3]
3054 ; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[2,1,2,3,4,5,6,7]
3055 ; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,7,6,7]
3056 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,1,0,2]
3057 ; SSE-NEXT: packuswb %xmm5, %xmm3
3058 ; SSE-NEXT: movdqa %xmm2, %xmm5
3059 ; SSE-NEXT: pandn %xmm3, %xmm5
3060 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
3061 ; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
3062 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[3,1,2,0]
3063 ; SSE-NEXT: pand %xmm15, %xmm3
3064 ; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,6,5,6,7]
3065 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,1,0,3]
3066 ; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm3[2,1,0,3,4,5,6,7]
3067 ; SSE-NEXT: packuswb %xmm6, %xmm6
3068 ; SSE-NEXT: pand %xmm2, %xmm6
3069 ; SSE-NEXT: por %xmm5, %xmm6
3070 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
3071 ; SSE-NEXT: pand %xmm12, %xmm3
3072 ; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Folded Reload
3073 ; SSE-NEXT: por %xmm3, %xmm12
3074 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm12[0,2,1,3]
3075 ; SSE-NEXT: pand %xmm15, %xmm3
3076 ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,1,2,1,4,5,6,7]
3077 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,1,3]
3078 ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,1,0,3,4,5,6,7]
3079 ; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,6,5,4,7]
3080 ; SSE-NEXT: packuswb %xmm3, %xmm5
3081 ; SSE-NEXT: movdqa %xmm0, %xmm3
3082 ; SSE-NEXT: pandn %xmm5, %xmm3
3083 ; SSE-NEXT: pand %xmm0, %xmm6
3084 ; SSE-NEXT: por %xmm6, %xmm3
3085 ; SSE-NEXT: movdqa %xmm7, %xmm5
3086 ; SSE-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm4[8],xmm5[9],xmm4[9],xmm5[10],xmm4[10],xmm5[11],xmm4[11],xmm5[12],xmm4[12],xmm5[13],xmm4[13],xmm5[14],xmm4[14],xmm5[15],xmm4[15]
3087 ; SSE-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm4[0],xmm7[1],xmm4[1],xmm7[2],xmm4[2],xmm7[3],xmm4[3],xmm7[4],xmm4[4],xmm7[5],xmm4[5],xmm7[6],xmm4[6],xmm7[7],xmm4[7]
3088 ; SSE-NEXT: movdqa %xmm7, %xmm6
3089 ; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[1,0],xmm5[0,0]
3090 ; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[2,0],xmm5[2,3]
3091 ; SSE-NEXT: psrlq $48, %xmm5
3092 ; SSE-NEXT: psrldq {{.*#+}} xmm6 = xmm6[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
3093 ; SSE-NEXT: punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3]
3094 ; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm7[3,1,2,3,4,5,6,7]
3095 ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,1,0,3]
3096 ; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,4,5,7]
3097 ; SSE-NEXT: packuswb %xmm6, %xmm5
3098 ; SSE-NEXT: movdqa %xmm2, %xmm6
3099 ; SSE-NEXT: pandn %xmm5, %xmm6
3100 ; SSE-NEXT: movdqa %xmm9, %xmm5
3101 ; SSE-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm4[8],xmm5[9],xmm4[9],xmm5[10],xmm4[10],xmm5[11],xmm4[11],xmm5[12],xmm4[12],xmm5[13],xmm4[13],xmm5[14],xmm4[14],xmm5[15],xmm4[15]
3102 ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[1,1,2,3]
3103 ; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm5[0,1,2,3,5,5,5,5]
3104 ; SSE-NEXT: movdqa {{.*#+}} xmm5 = [65535,0,65535,65535,0,65535,65535,65535]
3105 ; SSE-NEXT: movdqa %xmm5, %xmm10
3106 ; SSE-NEXT: pandn %xmm7, %xmm10
3107 ; SSE-NEXT: punpcklbw {{.*#+}} xmm9 = xmm9[0],xmm4[0],xmm9[1],xmm4[1],xmm9[2],xmm4[2],xmm9[3],xmm4[3],xmm9[4],xmm4[4],xmm9[5],xmm4[5],xmm9[6],xmm4[6],xmm9[7],xmm4[7]
3108 ; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm9[0,1,2,3,7,5,6,7]
3109 ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm7[0,2,2,3]
3110 ; SSE-NEXT: pshuflw {{.*#+}} xmm9 = xmm7[3,1,1,2,4,5,6,7]
3111 ; SSE-NEXT: pand %xmm5, %xmm9
3112 ; SSE-NEXT: por %xmm10, %xmm9
3113 ; SSE-NEXT: packuswb %xmm9, %xmm9
3114 ; SSE-NEXT: pand %xmm2, %xmm9
3115 ; SSE-NEXT: por %xmm6, %xmm9
3116 ; SSE-NEXT: movdqa %xmm11, %xmm6
3117 ; SSE-NEXT: punpckhbw {{.*#+}} xmm11 = xmm11[8],xmm4[8],xmm11[9],xmm4[9],xmm11[10],xmm4[10],xmm11[11],xmm4[11],xmm11[12],xmm4[12],xmm11[13],xmm4[13],xmm11[14],xmm4[14],xmm11[15],xmm4[15]
3118 ; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm11[0,1,2,3,7,5,6,7]
3119 ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm7[0,1,2,0]
3120 ; SSE-NEXT: pshufhw {{.*#+}} xmm10 = xmm7[0,1,2,3,5,5,7,4]
3121 ; SSE-NEXT: movdqa {{.*#+}} xmm7 = [65535,65535,65535,65535,0,65535,0,0]
3122 ; SSE-NEXT: movdqa %xmm7, %xmm11
3123 ; SSE-NEXT: pandn %xmm10, %xmm11
3124 ; SSE-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm4[0],xmm6[1],xmm4[1],xmm6[2],xmm4[2],xmm6[3],xmm4[3],xmm6[4],xmm4[4],xmm6[5],xmm4[5],xmm6[6],xmm4[6],xmm6[7],xmm4[7]
3125 ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,3,1,1]
3126 ; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[0,1,1,3,4,5,6,7]
3127 ; SSE-NEXT: pand %xmm7, %xmm6
3128 ; SSE-NEXT: por %xmm6, %xmm11
3129 ; SSE-NEXT: packuswb %xmm11, %xmm10
3130 ; SSE-NEXT: movdqa %xmm0, %xmm6
3131 ; SSE-NEXT: pandn %xmm10, %xmm6
3132 ; SSE-NEXT: pand %xmm0, %xmm9
3133 ; SSE-NEXT: por %xmm9, %xmm6
3134 ; SSE-NEXT: movdqa %xmm14, %xmm11
3135 ; SSE-NEXT: movdqa %xmm14, %xmm9
3136 ; SSE-NEXT: punpckhbw {{.*#+}} xmm9 = xmm9[8],xmm4[8],xmm9[9],xmm4[9],xmm9[10],xmm4[10],xmm9[11],xmm4[11],xmm9[12],xmm4[12],xmm9[13],xmm4[13],xmm9[14],xmm4[14],xmm9[15],xmm4[15]
3137 ; SSE-NEXT: punpcklbw {{.*#+}} xmm11 = xmm11[0],xmm4[0],xmm11[1],xmm4[1],xmm11[2],xmm4[2],xmm11[3],xmm4[3],xmm11[4],xmm4[4],xmm11[5],xmm4[5],xmm11[6],xmm4[6],xmm11[7],xmm4[7]
3138 ; SSE-NEXT: movdqa %xmm11, %xmm10
3139 ; SSE-NEXT: shufps {{.*#+}} xmm11 = xmm11[1,0],xmm9[0,0]
3140 ; SSE-NEXT: shufps {{.*#+}} xmm11 = xmm11[2,0],xmm9[2,3]
3141 ; SSE-NEXT: psrlq $48, %xmm9
3142 ; SSE-NEXT: psrldq {{.*#+}} xmm10 = xmm10[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
3143 ; SSE-NEXT: punpcklwd {{.*#+}} xmm10 = xmm10[0],xmm9[0],xmm10[1],xmm9[1],xmm10[2],xmm9[2],xmm10[3],xmm9[3]
3144 ; SSE-NEXT: pshuflw {{.*#+}} xmm9 = xmm11[3,1,2,3,4,5,6,7]
3145 ; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm9[0,1,0,3]
3146 ; SSE-NEXT: pshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,4,4,5,7]
3147 ; SSE-NEXT: packuswb %xmm10, %xmm9
3148 ; SSE-NEXT: movdqa %xmm1, %xmm10
3149 ; SSE-NEXT: punpckhbw {{.*#+}} xmm10 = xmm10[8],xmm4[8],xmm10[9],xmm4[9],xmm10[10],xmm4[10],xmm10[11],xmm4[11],xmm10[12],xmm4[12],xmm10[13],xmm4[13],xmm10[14],xmm4[14],xmm10[15],xmm4[15]
3150 ; SSE-NEXT: pshufd {{.*#+}} xmm10 = xmm10[1,1,2,3]
3151 ; SSE-NEXT: pshufhw {{.*#+}} xmm10 = xmm10[0,1,2,3,5,5,5,5]
3152 ; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
3153 ; SSE-NEXT: pshufhw {{.*#+}} xmm11 = xmm1[0,1,2,3,7,5,6,7]
3154 ; SSE-NEXT: pshufd {{.*#+}} xmm11 = xmm11[0,2,2,3]
3155 ; SSE-NEXT: pshuflw {{.*#+}} xmm11 = xmm11[3,1,1,2,4,5,6,7]
3156 ; SSE-NEXT: pand %xmm5, %xmm11
3157 ; SSE-NEXT: pandn %xmm10, %xmm5
3158 ; SSE-NEXT: por %xmm11, %xmm5
3159 ; SSE-NEXT: packuswb %xmm5, %xmm5
3160 ; SSE-NEXT: pand %xmm2, %xmm5
3161 ; SSE-NEXT: pandn %xmm9, %xmm2
3162 ; SSE-NEXT: por %xmm2, %xmm5
3163 ; SSE-NEXT: movdqa %xmm12, %xmm2
3164 ; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3],xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7]
3165 ; SSE-NEXT: punpckhbw {{.*#+}} xmm12 = xmm12[8],xmm4[8],xmm12[9],xmm4[9],xmm12[10],xmm4[10],xmm12[11],xmm4[11],xmm12[12],xmm4[12],xmm12[13],xmm4[13],xmm12[14],xmm4[14],xmm12[15],xmm4[15]
3166 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,3,1,1]
3167 ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,1,1,3,4,5,6,7]
3168 ; SSE-NEXT: pand %xmm7, %xmm2
3169 ; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm12[0,1,2,3,7,5,6,7]
3170 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,1,2,0]
3171 ; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,5,5,7,4]
3172 ; SSE-NEXT: pandn %xmm4, %xmm7
3173 ; SSE-NEXT: por %xmm2, %xmm7
3174 ; SSE-NEXT: pand %xmm0, %xmm5
3175 ; SSE-NEXT: packuswb %xmm7, %xmm2
3176 ; SSE-NEXT: pandn %xmm2, %xmm0
3177 ; SSE-NEXT: por %xmm5, %xmm0
3178 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
3179 ; SSE-NEXT: movaps %xmm2, 16(%rsi)
3180 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
3181 ; SSE-NEXT: movaps %xmm2, (%rsi)
3182 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
3183 ; SSE-NEXT: movaps %xmm1, 16(%rdx)
3184 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
3185 ; SSE-NEXT: movaps %xmm1, (%rdx)
3186 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
3187 ; SSE-NEXT: movaps %xmm1, 16(%rcx)
3188 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
3189 ; SSE-NEXT: movaps %xmm1, (%rcx)
3190 ; SSE-NEXT: movdqa %xmm13, 16(%r8)
3191 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
3192 ; SSE-NEXT: movaps %xmm1, (%r8)
3193 ; SSE-NEXT: movdqa %xmm3, 16(%r9)
3194 ; SSE-NEXT: movdqa %xmm8, (%r9)
3195 ; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax
3196 ; SSE-NEXT: movdqa %xmm0, 16(%rax)
3197 ; SSE-NEXT: movdqa %xmm6, (%rax)
3198 ; SSE-NEXT: addq $264, %rsp # imm = 0x108
3201 ; AVX-LABEL: load_i8_stride6_vf32:
3203 ; AVX-NEXT: subq $120, %rsp
3204 ; AVX-NEXT: vmovdqa (%rdi), %xmm9
3205 ; AVX-NEXT: vmovdqa 16(%rdi), %xmm7
3206 ; AVX-NEXT: vmovdqa 32(%rdi), %xmm6
3207 ; AVX-NEXT: vmovdqa 48(%rdi), %xmm8
3208 ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm8[2,8,14,u,u,u,u,u,u,u,u,u,u,u,u,u]
3209 ; AVX-NEXT: vpshufb {{.*#+}} xmm1 = xmm6[u,u,u,u,u,0,6,12,u,u,u,u,u,u,u,u]
3210 ; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
3211 ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = zero,zero,zero,xmm7[4,10,u,u,u,u,u,u,u,u,u,u,u]
3212 ; AVX-NEXT: vpshufb {{.*#+}} xmm2 = xmm9[2,8,14],zero,zero,xmm9[u,u,u,u,u,u,u,u,u,u,u]
3213 ; AVX-NEXT: vpor %xmm0, %xmm2, %xmm2
3214 ; AVX-NEXT: vmovdqa {{.*#+}} xmm0 = [0,0,0,0,0,255,255,255,255,255,255,u,u,u,u,u]
3215 ; AVX-NEXT: vpblendvb %xmm0, %xmm1, %xmm2, %xmm1
3216 ; AVX-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3217 ; AVX-NEXT: vpshufb {{.*#+}} xmm1 = xmm8[3,9,15,u,u,u,u,u,u,u,u,u,u,u,u,u]
3218 ; AVX-NEXT: vpshufb {{.*#+}} xmm2 = xmm6[u,u,u,u,u,1,7,13,u,u,u,u,u,u,u,u]
3219 ; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
3220 ; AVX-NEXT: vpshufb {{.*#+}} xmm2 = zero,zero,zero,xmm7[5,11,u,u,u,u,u,u,u,u,u,u,u]
3221 ; AVX-NEXT: vpshufb {{.*#+}} xmm3 = xmm9[3,9,15],zero,zero,xmm9[u,u,u,u,u,u,u,u,u,u,u]
3222 ; AVX-NEXT: vpor %xmm2, %xmm3, %xmm2
3223 ; AVX-NEXT: vpblendvb %xmm0, %xmm1, %xmm2, %xmm0
3224 ; AVX-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3225 ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm9[4,10],zero,zero,zero,xmm9[u,u,u,u,u,u,u,u,u,u,u]
3226 ; AVX-NEXT: vpshufb {{.*#+}} xmm1 = zero,zero,xmm7[0,6,12,u,u,u,u,u,u,u,u,u,u,u]
3227 ; AVX-NEXT: vpor %xmm0, %xmm1, %xmm1
3228 ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm8[u,u,u,u,u,u,u,u,4,10,u,u,u,u,u,u]
3229 ; AVX-NEXT: vpshufb {{.*#+}} xmm2 = xmm6[u,u,u,u,u,u,u,u,u,u,u,u,u,2,8,14]
3230 ; AVX-NEXT: vpunpckhqdq {{.*#+}} xmm2 = xmm2[1],xmm0[1]
3231 ; AVX-NEXT: vmovq {{.*#+}} xmm0 = [255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0]
3232 ; AVX-NEXT: vpblendvb %xmm0, %xmm1, %xmm2, %xmm1
3233 ; AVX-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3234 ; AVX-NEXT: vpshufb {{.*#+}} xmm1 = xmm9[5,11],zero,zero,zero,xmm9[u,u,u,u,u,u,u,u,u,u,u]
3235 ; AVX-NEXT: vmovdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3236 ; AVX-NEXT: vpshufb {{.*#+}} xmm2 = zero,zero,xmm7[1,7,13,u,u,u,u,u,u,u,u,u,u,u]
3237 ; AVX-NEXT: vpor %xmm1, %xmm2, %xmm1
3238 ; AVX-NEXT: vmovdqa %xmm8, %xmm5
3239 ; AVX-NEXT: vmovdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3240 ; AVX-NEXT: vpshufb {{.*#+}} xmm2 = xmm8[u,u,u,u,u,u,u,u,5,11,u,u,u,u,u,u]
3241 ; AVX-NEXT: vmovdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3242 ; AVX-NEXT: vpshufb {{.*#+}} xmm3 = xmm6[u,u,u,u,u,u,u,u,u,u,u,u,u,3,9,15]
3243 ; AVX-NEXT: vpunpckhqdq {{.*#+}} xmm2 = xmm3[1],xmm2[1]
3244 ; AVX-NEXT: vpblendvb %xmm0, %xmm1, %xmm2, %xmm0
3245 ; AVX-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3246 ; AVX-NEXT: vmovq {{.*#+}} xmm8 = [128,128,128,2,8,14,0,0,0,0,0,0,0,0,0,0]
3247 ; AVX-NEXT: vmovdqa 112(%rdi), %xmm0
3248 ; AVX-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3249 ; AVX-NEXT: vpshufb %xmm8, %xmm0, %xmm1
3250 ; AVX-NEXT: vmovq {{.*#+}} xmm2 = [0,6,12,128,128,128,0,0,0,0,0,0,0,0,0,0]
3251 ; AVX-NEXT: vmovdqa 96(%rdi), %xmm13
3252 ; AVX-NEXT: vpshufb %xmm2, %xmm13, %xmm3
3253 ; AVX-NEXT: vpor %xmm1, %xmm3, %xmm1
3254 ; AVX-NEXT: vmovddup {{.*#+}} xmm11 = [0,0,0,128,128,128,4,10,0,0,0,128,128,128,4,10]
3255 ; AVX-NEXT: # xmm11 = mem[0,0]
3256 ; AVX-NEXT: vmovdqa 80(%rdi), %xmm12
3257 ; AVX-NEXT: vpshufb %xmm11, %xmm12, %xmm4
3258 ; AVX-NEXT: vmovddup {{.*#+}} xmm3 = [0,0,0,2,8,14,128,128,0,0,0,2,8,14,128,128]
3259 ; AVX-NEXT: # xmm3 = mem[0,0]
3260 ; AVX-NEXT: vmovdqa 64(%rdi), %xmm14
3261 ; AVX-NEXT: vpshufb %xmm3, %xmm14, %xmm10
3262 ; AVX-NEXT: vpor %xmm4, %xmm10, %xmm4
3263 ; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm4, %ymm1
3264 ; AVX-NEXT: vmovd {{.*#+}} xmm15 = [0,0,4,10,0,0,0,0,0,0,0,0,0,0,0,0]
3265 ; AVX-NEXT: vpshufb %xmm15, %xmm6, %xmm4
3266 ; AVX-NEXT: vbroadcastss {{.*#+}} xmm0 = [0,6,12,0,0,6,12,0,0,6,12,0,0,6,12,0]
3267 ; AVX-NEXT: vpshufb %xmm0, %xmm5, %xmm10
3268 ; AVX-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm10[0],xmm4[0],xmm10[1],xmm4[1]
3269 ; AVX-NEXT: vpshufb %xmm8, %xmm7, %xmm8
3270 ; AVX-NEXT: vmovdqa %xmm7, %xmm10
3271 ; AVX-NEXT: vpshufb %xmm2, %xmm9, %xmm2
3272 ; AVX-NEXT: vpor %xmm2, %xmm8, %xmm2
3273 ; AVX-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2],xmm4[3,4,5],xmm2[6,7]
3274 ; AVX-NEXT: vmovaps {{.*#+}} ymm4 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255]
3275 ; AVX-NEXT: vandnps %ymm1, %ymm4, %ymm1
3276 ; AVX-NEXT: vandps %ymm4, %ymm2, %ymm2
3277 ; AVX-NEXT: vorps %ymm1, %ymm2, %ymm8
3278 ; AVX-NEXT: vmovdqa 128(%rdi), %xmm6
3279 ; AVX-NEXT: vpshufb %xmm15, %xmm6, %xmm1
3280 ; AVX-NEXT: vmovdqa 144(%rdi), %xmm5
3281 ; AVX-NEXT: vpshufb %xmm0, %xmm5, %xmm0
3282 ; AVX-NEXT: vpunpckldq {{.*#+}} xmm15 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
3283 ; AVX-NEXT: vmovdqa 176(%rdi), %xmm4
3284 ; AVX-NEXT: vpshufb %xmm11, %xmm4, %xmm11
3285 ; AVX-NEXT: vmovdqa 160(%rdi), %xmm2
3286 ; AVX-NEXT: vpshufb %xmm3, %xmm2, %xmm3
3287 ; AVX-NEXT: vpor %xmm3, %xmm11, %xmm11
3288 ; AVX-NEXT: vmovdqa {{.*#+}} ymm3 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255]
3289 ; AVX-NEXT: vpblendvb %xmm3, %xmm15, %xmm11, %xmm15
3290 ; AVX-NEXT: vmovaps {{.*#+}} ymm11 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0]
3291 ; AVX-NEXT: vandps %ymm11, %ymm8, %ymm8
3292 ; AVX-NEXT: vinsertf128 $1, %xmm15, %ymm0, %ymm15
3293 ; AVX-NEXT: vandnps %ymm15, %ymm11, %ymm15
3294 ; AVX-NEXT: vorps %ymm15, %ymm8, %ymm0
3295 ; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3296 ; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
3297 ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = zero,zero,zero,xmm9[3,9,15,u,u,u,u,u,u,u,u,u,u]
3298 ; AVX-NEXT: vpshufb {{.*#+}} xmm15 = xmm13[1,7,13],zero,zero,zero,xmm13[u,u,u,u,u,u,u,u,u,u]
3299 ; AVX-NEXT: vpor %xmm0, %xmm15, %xmm1
3300 ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm12[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm12[5,11]
3301 ; AVX-NEXT: vmovdqa %xmm14, %xmm7
3302 ; AVX-NEXT: vpshufb {{.*#+}} xmm15 = xmm14[u,u,u,u,u,u,u,u,u,u,u,3,9,15],zero,zero
3303 ; AVX-NEXT: vpor %xmm0, %xmm15, %xmm0
3304 ; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
3305 ; AVX-NEXT: vmovd {{.*#+}} xmm8 = [0,0,5,11,0,0,0,0,0,0,0,0,0,0,0,0]
3306 ; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
3307 ; AVX-NEXT: vpshufb %xmm8, %xmm1, %xmm15
3308 ; AVX-NEXT: vbroadcastss {{.*#+}} xmm1 = [1,7,13,0,1,7,13,0,1,7,13,0,1,7,13,0]
3309 ; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
3310 ; AVX-NEXT: vpshufb %xmm1, %xmm14, %xmm14
3311 ; AVX-NEXT: vpunpckldq {{.*#+}} xmm14 = xmm14[0],xmm15[0],xmm14[1],xmm15[1]
3312 ; AVX-NEXT: vpshufb {{.*#+}} xmm15 = zero,zero,zero,xmm10[3,9,15,u,u,u,u,u,u,u,u,u,u]
3313 ; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
3314 ; AVX-NEXT: vpshufb {{.*#+}} xmm10 = xmm10[1,7,13],zero,zero,zero,xmm10[u,u,u,u,u,u,u,u,u,u]
3315 ; AVX-NEXT: vpor %xmm15, %xmm10, %xmm10
3316 ; AVX-NEXT: vpblendw {{.*#+}} xmm10 = xmm10[0,1,2],xmm14[3,4,5],xmm10[6,7]
3317 ; AVX-NEXT: vmovaps {{.*#+}} ymm14 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255]
3318 ; AVX-NEXT: vandnps %ymm0, %ymm14, %ymm0
3319 ; AVX-NEXT: vandps %ymm14, %ymm10, %ymm10
3320 ; AVX-NEXT: vorps %ymm0, %ymm10, %ymm0
3321 ; AVX-NEXT: vpshufb %xmm8, %xmm6, %xmm8
3322 ; AVX-NEXT: vpshufb %xmm1, %xmm5, %xmm1
3323 ; AVX-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],xmm8[0],xmm1[1],xmm8[1]
3324 ; AVX-NEXT: vpshufb {{.*#+}} xmm8 = xmm4[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm4[5,11]
3325 ; AVX-NEXT: vpshufb {{.*#+}} xmm10 = xmm2[u,u,u,u,u,u,u,u,u,u,u,3,9,15],zero,zero
3326 ; AVX-NEXT: vpor %xmm8, %xmm10, %xmm8
3327 ; AVX-NEXT: vpblendvb %xmm3, %xmm1, %xmm8, %xmm1
3328 ; AVX-NEXT: vandps %ymm0, %ymm11, %ymm0
3329 ; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
3330 ; AVX-NEXT: vandnps %ymm1, %ymm11, %ymm1
3331 ; AVX-NEXT: vorps %ymm1, %ymm0, %ymm0
3332 ; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3333 ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = zero,zero,zero,xmm9[4,10,u,u,u,u,u,u,u,u,u,u,u]
3334 ; AVX-NEXT: vpshufb {{.*#+}} xmm1 = xmm13[2,8,14],zero,zero,xmm13[u,u,u,u,u,u,u,u,u,u,u]
3335 ; AVX-NEXT: vpor %xmm0, %xmm1, %xmm0
3336 ; AVX-NEXT: vmovddup {{.*#+}} xmm1 = [0,0,0,4,10,128,128,128,0,0,0,4,10,128,128,128]
3337 ; AVX-NEXT: # xmm1 = mem[0,0]
3338 ; AVX-NEXT: vpshufb %xmm1, %xmm7, %xmm10
3339 ; AVX-NEXT: vmovddup {{.*#+}} xmm11 = [0,0,0,128,128,0,6,12,0,0,0,128,128,0,6,12]
3340 ; AVX-NEXT: # xmm11 = mem[0,0]
3341 ; AVX-NEXT: vpshufb %xmm11, %xmm12, %xmm14
3342 ; AVX-NEXT: vpor %xmm10, %xmm14, %xmm10
3343 ; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm10, %ymm0
3344 ; AVX-NEXT: vandnps %ymm0, %ymm3, %ymm0
3345 ; AVX-NEXT: vandps {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm10 # 32-byte Folded Reload
3346 ; AVX-NEXT: vorps %ymm0, %ymm10, %ymm0
3347 ; AVX-NEXT: vpshufb {{.*#+}} xmm10 = xmm5[2,8,14,u,u,u,u,u,u,u,u,u,u,u,u,u]
3348 ; AVX-NEXT: vpshufb {{.*#+}} xmm14 = xmm6[u,u,u,u,u,0,6,12,u,u,u,u,u,u,u,u]
3349 ; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm10 = xmm14[0],xmm10[0]
3350 ; AVX-NEXT: vpshufb %xmm1, %xmm2, %xmm1
3351 ; AVX-NEXT: vpshufb %xmm11, %xmm4, %xmm11
3352 ; AVX-NEXT: vpor %xmm1, %xmm11, %xmm1
3353 ; AVX-NEXT: vpblendvb %xmm3, %xmm10, %xmm1, %xmm1
3354 ; AVX-NEXT: vmovaps {{.*#+}} ymm10 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0]
3355 ; AVX-NEXT: vandps %ymm0, %ymm10, %ymm0
3356 ; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
3357 ; AVX-NEXT: vandnps %ymm1, %ymm10, %ymm1
3358 ; AVX-NEXT: vorps %ymm1, %ymm0, %ymm11
3359 ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = zero,zero,zero,xmm9[5,11,u,u,u,u,u,u,u,u,u,u,u]
3360 ; AVX-NEXT: vpshufb {{.*#+}} xmm1 = xmm13[3,9,15],zero,zero,xmm13[u,u,u,u,u,u,u,u,u,u,u]
3361 ; AVX-NEXT: vpor %xmm0, %xmm1, %xmm0
3362 ; AVX-NEXT: vmovddup {{.*#+}} xmm1 = [0,0,0,5,11,128,128,128,0,0,0,5,11,128,128,128]
3363 ; AVX-NEXT: # xmm1 = mem[0,0]
3364 ; AVX-NEXT: vpshufb %xmm1, %xmm7, %xmm14
3365 ; AVX-NEXT: vmovddup {{.*#+}} xmm15 = [0,0,0,128,128,1,7,13,0,0,0,128,128,1,7,13]
3366 ; AVX-NEXT: # xmm15 = mem[0,0]
3367 ; AVX-NEXT: vpshufb %xmm15, %xmm12, %xmm8
3368 ; AVX-NEXT: vpor %xmm14, %xmm8, %xmm8
3369 ; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm8, %ymm0
3370 ; AVX-NEXT: vpshufb {{.*#+}} xmm8 = xmm5[3,9,15,u,u,u,u,u,u,u,u,u,u,u,u,u]
3371 ; AVX-NEXT: vpshufb {{.*#+}} xmm14 = xmm6[u,u,u,u,u,1,7,13,u,u,u,u,u,u,u,u]
3372 ; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm8 = xmm14[0],xmm8[0]
3373 ; AVX-NEXT: vpshufb %xmm1, %xmm2, %xmm1
3374 ; AVX-NEXT: vpshufb %xmm15, %xmm4, %xmm14
3375 ; AVX-NEXT: vpor %xmm1, %xmm14, %xmm1
3376 ; AVX-NEXT: vpblendvb %xmm3, %xmm8, %xmm1, %xmm1
3377 ; AVX-NEXT: vandnps %ymm0, %ymm3, %ymm0
3378 ; AVX-NEXT: vandps {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
3379 ; AVX-NEXT: vorps %ymm0, %ymm3, %ymm0
3380 ; AVX-NEXT: vandps %ymm0, %ymm10, %ymm0
3381 ; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
3382 ; AVX-NEXT: vandnps %ymm1, %ymm10, %ymm1
3383 ; AVX-NEXT: vorps %ymm1, %ymm0, %ymm3
3384 ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm13[4,10],zero,zero,zero,xmm13[u,u,u,u,u,u,u,u,u,u,u]
3385 ; AVX-NEXT: vpshufb {{.*#+}} xmm1 = zero,zero,xmm9[0,6,12,u,u,u,u,u,u,u,u,u,u,u]
3386 ; AVX-NEXT: vpor %xmm0, %xmm1, %xmm0
3387 ; AVX-NEXT: vmovddup {{.*#+}} xmm1 = [0,0,128,128,128,2,8,14,0,0,128,128,128,2,8,14]
3388 ; AVX-NEXT: # xmm1 = mem[0,0]
3389 ; AVX-NEXT: vpshufb %xmm1, %xmm12, %xmm8
3390 ; AVX-NEXT: vmovddup {{.*#+}} xmm14 = [0,0,0,6,12,128,128,128,0,0,0,6,12,128,128,128]
3391 ; AVX-NEXT: # xmm14 = mem[0,0]
3392 ; AVX-NEXT: vpshufb %xmm14, %xmm7, %xmm15
3393 ; AVX-NEXT: vpor %xmm8, %xmm15, %xmm8
3394 ; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm8, %ymm0
3395 ; AVX-NEXT: vmovaps {{.*#+}} ymm8 = [0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
3396 ; AVX-NEXT: vandnps {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm15 # 32-byte Folded Reload
3397 ; AVX-NEXT: vandps %ymm0, %ymm8, %ymm0
3398 ; AVX-NEXT: vorps %ymm0, %ymm15, %ymm0
3399 ; AVX-NEXT: vpshufb %xmm1, %xmm4, %xmm1
3400 ; AVX-NEXT: vpshufb %xmm14, %xmm2, %xmm14
3401 ; AVX-NEXT: vpor %xmm1, %xmm14, %xmm1
3402 ; AVX-NEXT: vpshufb {{.*#+}} xmm14 = xmm5[u,u,u,u,u,u,u,u,4,10,u,u,u,u,u,u]
3403 ; AVX-NEXT: vpshufb {{.*#+}} xmm15 = xmm6[u,u,u,u,u,u,u,u,u,u,u,u,u,2,8,14]
3404 ; AVX-NEXT: vpunpckhqdq {{.*#+}} xmm14 = xmm15[1],xmm14[1]
3405 ; AVX-NEXT: vpblendw {{.*#+}} xmm1 = xmm14[0,1,2,3,4],xmm1[5,6,7]
3406 ; AVX-NEXT: vandps %ymm0, %ymm10, %ymm0
3407 ; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
3408 ; AVX-NEXT: vandnps %ymm1, %ymm10, %ymm1
3409 ; AVX-NEXT: vorps %ymm1, %ymm0, %ymm0
3410 ; AVX-NEXT: vpshufb {{.*#+}} xmm1 = xmm13[5,11],zero,zero,zero,xmm13[u,u,u,u,u,u,u,u,u,u,u]
3411 ; AVX-NEXT: vpshufb {{.*#+}} xmm9 = zero,zero,xmm9[1,7,13,u,u,u,u,u,u,u,u,u,u,u]
3412 ; AVX-NEXT: vpor %xmm1, %xmm9, %xmm1
3413 ; AVX-NEXT: vmovddup {{.*#+}} xmm9 = [0,0,128,128,128,3,9,15,0,0,128,128,128,3,9,15]
3414 ; AVX-NEXT: # xmm9 = mem[0,0]
3415 ; AVX-NEXT: vpshufb %xmm9, %xmm12, %xmm12
3416 ; AVX-NEXT: vmovddup {{.*#+}} xmm13 = [0,0,1,7,13,128,128,128,0,0,1,7,13,128,128,128]
3417 ; AVX-NEXT: # xmm13 = mem[0,0]
3418 ; AVX-NEXT: vpshufb %xmm13, %xmm7, %xmm7
3419 ; AVX-NEXT: vpor %xmm7, %xmm12, %xmm7
3420 ; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm7, %ymm1
3421 ; AVX-NEXT: vandnps {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm7 # 32-byte Folded Reload
3422 ; AVX-NEXT: vandps %ymm1, %ymm8, %ymm1
3423 ; AVX-NEXT: vorps %ymm7, %ymm1, %ymm1
3424 ; AVX-NEXT: vpshufb %xmm9, %xmm4, %xmm4
3425 ; AVX-NEXT: vpshufb %xmm13, %xmm2, %xmm2
3426 ; AVX-NEXT: vpor %xmm4, %xmm2, %xmm2
3427 ; AVX-NEXT: vpshufb {{.*#+}} xmm4 = xmm5[u,u,u,u,u,u,u,u,5,11,u,u,u,u,u,u]
3428 ; AVX-NEXT: vpshufb {{.*#+}} xmm5 = xmm6[u,u,u,u,u,u,u,u,u,u,u,u,u,3,9,15]
3429 ; AVX-NEXT: vpunpckhqdq {{.*#+}} xmm4 = xmm5[1],xmm4[1]
3430 ; AVX-NEXT: vpblendw {{.*#+}} xmm2 = xmm4[0,1,2,3,4],xmm2[5,6,7]
3431 ; AVX-NEXT: vandps %ymm1, %ymm10, %ymm1
3432 ; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2
3433 ; AVX-NEXT: vandnps %ymm2, %ymm10, %ymm2
3434 ; AVX-NEXT: vorps %ymm2, %ymm1, %ymm1
3435 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
3436 ; AVX-NEXT: vmovaps %ymm2, (%rsi)
3437 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
3438 ; AVX-NEXT: vmovaps %ymm2, (%rdx)
3439 ; AVX-NEXT: vmovaps %ymm11, (%rcx)
3440 ; AVX-NEXT: vmovaps %ymm3, (%r8)
3441 ; AVX-NEXT: vmovaps %ymm0, (%r9)
3442 ; AVX-NEXT: movq {{[0-9]+}}(%rsp), %rax
3443 ; AVX-NEXT: vmovaps %ymm1, (%rax)
3444 ; AVX-NEXT: addq $120, %rsp
3445 ; AVX-NEXT: vzeroupper
3448 ; AVX2-LABEL: load_i8_stride6_vf32:
3450 ; AVX2-NEXT: vmovdqa 160(%rdi), %ymm4
3451 ; AVX2-NEXT: vmovdqa (%rdi), %ymm2
3452 ; AVX2-NEXT: vmovdqa 32(%rdi), %ymm3
3453 ; AVX2-NEXT: vmovdqa 64(%rdi), %ymm0
3454 ; AVX2-NEXT: vmovdqa 96(%rdi), %ymm1
3455 ; AVX2-NEXT: vpmovsxbw {{.*#+}} ymm8 = [65535,0,0,65535,0,0,65535,0,0,65535,0,0,65535,0,0,65535]
3456 ; AVX2-NEXT: vpblendvb %ymm8, %ymm2, %ymm3, %ymm9
3457 ; AVX2-NEXT: vpshufb {{.*#+}} xmm5 = xmm9[0,6,12],zero,zero,zero,xmm9[4,10],zero,zero,zero,xmm9[u,u,u,u,u]
3458 ; AVX2-NEXT: vextracti128 $1, %ymm9, %xmm10
3459 ; AVX2-NEXT: vpshufb {{.*#+}} xmm6 = zero,zero,zero,xmm10[2,8,14],zero,zero,xmm10[0,6,12,u,u,u,u,u]
3460 ; AVX2-NEXT: vpor %xmm5, %xmm6, %xmm11
3461 ; AVX2-NEXT: vpmovsxbw {{.*#+}} ymm7 = [0,65535,0,0,65535,0,0,65535,65535,0,0,65535,0,0,65535,0]
3462 ; AVX2-NEXT: vperm2i128 {{.*#+}} ymm5 = ymm0[0,1],ymm1[0,1]
3463 ; AVX2-NEXT: vperm2i128 {{.*#+}} ymm6 = ymm0[2,3],ymm1[2,3]
3464 ; AVX2-NEXT: vpblendvb %ymm7, %ymm5, %ymm6, %ymm1
3465 ; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm1[u,u,u,u,u,u,u,u,u,u,u,2,8,14,4,10,16,22,28,18,24,30,u,u,u,u,u,u,u,u,u,u]
3466 ; AVX2-NEXT: vpmovsxdq {{.*#+}} xmm7 = [18446744073709551615,16777215]
3467 ; AVX2-NEXT: vpblendvb %ymm7, %ymm11, %ymm0, %ymm0
3468 ; AVX2-NEXT: vpshufb {{.*#+}} xmm9 = xmm9[1,7,13],zero,zero,zero,xmm9[5,11],zero,zero,zero,xmm9[u,u,u,u,u]
3469 ; AVX2-NEXT: vpshufb {{.*#+}} xmm10 = zero,zero,zero,xmm10[3,9,15],zero,zero,xmm10[1,7,13,u,u,u,u,u]
3470 ; AVX2-NEXT: vpor %xmm9, %xmm10, %xmm9
3471 ; AVX2-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,3,9,15,5,11,17,23,29,19,25,31,u,u,u,u,u,u,u,u,u,u]
3472 ; AVX2-NEXT: vpblendvb %ymm7, %ymm9, %ymm1, %ymm1
3473 ; AVX2-NEXT: vpmovsxbw {{.*#+}} ymm11 = [65535,0,0,65535,0,0,65535,0,0,65535,0,0,65535,0,0,65535]
3474 ; AVX2-NEXT: vpblendvb %ymm11, %ymm3, %ymm2, %ymm9
3475 ; AVX2-NEXT: vextracti128 $1, %ymm9, %xmm10
3476 ; AVX2-NEXT: vpshufb {{.*#+}} xmm12 = zero,zero,zero,xmm10[4,10],zero,zero,zero,xmm10[2,8,14,u,u,u,u,u]
3477 ; AVX2-NEXT: vpshufb {{.*#+}} xmm13 = xmm9[2,8,14],zero,zero,xmm9[0,6,12],zero,zero,zero,xmm9[u,u,u,u,u]
3478 ; AVX2-NEXT: vpor %xmm12, %xmm13, %xmm12
3479 ; AVX2-NEXT: vpmovsxbw {{.*#+}} ymm13 = [65535,0,0,65535,0,0,65535,0,0,0,65535,0,0,65535,0,0]
3480 ; AVX2-NEXT: vpblendvb %ymm13, %ymm6, %ymm5, %ymm13
3481 ; AVX2-NEXT: vpshufb {{.*#+}} ymm14 = ymm13[u,u,u,u,u,u,u,u,u,u,u,4,10,0,6,12,18,24,30,20,26,u,u,u,u,u,u,u,u,u,u,u]
3482 ; AVX2-NEXT: vpblendvb %ymm7, %ymm12, %ymm14, %ymm12
3483 ; AVX2-NEXT: vmovdqa 128(%rdi), %ymm14
3484 ; AVX2-NEXT: vpblendvb %ymm8, %ymm14, %ymm4, %ymm8
3485 ; AVX2-NEXT: vpshufb {{.*#+}} xmm10 = zero,zero,zero,xmm10[5,11],zero,zero,zero,xmm10[3,9,15,u,u,u,u,u]
3486 ; AVX2-NEXT: vpshufb {{.*#+}} xmm9 = xmm9[3,9,15],zero,zero,xmm9[1,7,13],zero,zero,zero,xmm9[u,u,u,u,u]
3487 ; AVX2-NEXT: vpor %xmm10, %xmm9, %xmm9
3488 ; AVX2-NEXT: vpshufb {{.*#+}} xmm10 = xmm8[u,u,u,u,u,0,6,12],zero,zero,zero,xmm8[4,10],zero,zero,zero
3489 ; AVX2-NEXT: vpshufb {{.*#+}} ymm13 = ymm13[u,u,u,u,u,u,u,u,u,u,u,5,11,1,7,13,19,25,31,21,27,u,u,u,u,u,u,u,u,u,u,u]
3490 ; AVX2-NEXT: vpblendvb %ymm7, %ymm9, %ymm13, %ymm13
3491 ; AVX2-NEXT: vextracti128 $1, %ymm8, %xmm9
3492 ; AVX2-NEXT: vpshufb {{.*#+}} xmm7 = xmm9[u,u,u,u,u],zero,zero,zero,xmm9[2,8,14],zero,zero,xmm9[0,6,12]
3493 ; AVX2-NEXT: vpor %xmm7, %xmm10, %xmm7
3494 ; AVX2-NEXT: vinserti128 $1, %xmm7, %ymm0, %ymm7
3495 ; AVX2-NEXT: vpmovsxwd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm10
3496 ; AVX2-NEXT: vpblendvb %ymm10, %ymm12, %ymm7, %ymm7
3497 ; AVX2-NEXT: vpmovsxbw {{.*#+}} ymm12 = [0,65535,0,0,65535,0,0,65535,0,0,65535,0,0,65535,0,0]
3498 ; AVX2-NEXT: vpshufb {{.*#+}} xmm8 = xmm8[u,u,u,u,u,1,7,13],zero,zero,zero,xmm8[5,11],zero,zero,zero
3499 ; AVX2-NEXT: vpshufb {{.*#+}} xmm9 = xmm9[u,u,u,u,u],zero,zero,zero,xmm9[3,9,15],zero,zero,xmm9[1,7,13]
3500 ; AVX2-NEXT: vpor %xmm8, %xmm9, %xmm8
3501 ; AVX2-NEXT: vpblendvb %ymm12, %ymm4, %ymm14, %ymm9
3502 ; AVX2-NEXT: vinserti128 $1, %xmm8, %ymm0, %ymm8
3503 ; AVX2-NEXT: vpblendvb %ymm10, %ymm13, %ymm8, %ymm8
3504 ; AVX2-NEXT: vpblendvb %ymm11, %ymm4, %ymm14, %ymm4
3505 ; AVX2-NEXT: vextracti128 $1, %ymm4, %xmm11
3506 ; AVX2-NEXT: vpshufb {{.*#+}} xmm13 = xmm11[u,u,u,u,u],zero,zero,zero,xmm11[4,10],zero,zero,zero,xmm11[2,8,14]
3507 ; AVX2-NEXT: vpshufb {{.*#+}} xmm14 = xmm4[u,u,u,u,u,2,8,14],zero,zero,xmm4[0,6,12],zero,zero,zero
3508 ; AVX2-NEXT: vpor %xmm13, %xmm14, %xmm13
3509 ; AVX2-NEXT: vpblendvb %ymm12, %ymm3, %ymm2, %ymm2
3510 ; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm3
3511 ; AVX2-NEXT: vpmovsxbw {{.*#+}} ymm12 = [0,65535,0,0,65535,0,0,65535,65535,0,0,65535,0,0,65535,0]
3512 ; AVX2-NEXT: vpblendvb %ymm12, %ymm6, %ymm5, %ymm5
3513 ; AVX2-NEXT: vpshufb {{.*#+}} xmm6 = zero,zero,xmm3[0,6,12],zero,zero,zero,xmm3[4,10,u,u,u,u,u,u]
3514 ; AVX2-NEXT: vpshufb {{.*#+}} xmm12 = xmm2[4,10],zero,zero,zero,xmm2[2,8,14],zero,zero,xmm2[u,u,u,u,u,u]
3515 ; AVX2-NEXT: vpor %xmm6, %xmm12, %xmm6
3516 ; AVX2-NEXT: vpshufb {{.*#+}} ymm12 = ymm5[u,u,u,u,u,u,u,u,u,u,0,6,12,2,8,14,20,26,16,22,28,u,u,u,u,u,u,u,u,u,u,u]
3517 ; AVX2-NEXT: vpblendw {{.*#+}} xmm6 = xmm6[0,1,2,3,4],xmm12[5,6,7]
3518 ; AVX2-NEXT: vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm12[4,5,6,7]
3519 ; AVX2-NEXT: vinserti128 $1, %xmm13, %ymm0, %ymm12
3520 ; AVX2-NEXT: vpblendvb %ymm10, %ymm6, %ymm12, %ymm6
3521 ; AVX2-NEXT: vpshufb {{.*#+}} xmm11 = xmm11[u,u,u,u,u],zero,zero,zero,xmm11[5,11],zero,zero,zero,xmm11[3,9,15]
3522 ; AVX2-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[u,u,u,u,u,3,9,15],zero,zero,xmm4[1,7,13],zero,zero,zero
3523 ; AVX2-NEXT: vpor %xmm4, %xmm11, %xmm4
3524 ; AVX2-NEXT: vpshufb {{.*#+}} xmm3 = zero,zero,xmm3[1,7,13],zero,zero,zero,xmm3[5,11,u,u,u,u,u,u]
3525 ; AVX2-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[5,11],zero,zero,zero,xmm2[3,9,15],zero,zero,xmm2[u,u,u,u,u,u]
3526 ; AVX2-NEXT: vpor %xmm3, %xmm2, %xmm2
3527 ; AVX2-NEXT: vpshufb {{.*#+}} ymm3 = ymm5[u,u,u,u,u,u,u,u,u,u,1,7,13,3,9,15,21,27,17,23,29,u,u,u,u,u,u,u,u,u,u,u]
3528 ; AVX2-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3,4],xmm3[5,6,7]
3529 ; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
3530 ; AVX2-NEXT: vextracti128 $1, %ymm9, %xmm3
3531 ; AVX2-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4
3532 ; AVX2-NEXT: vpblendvb %ymm10, %ymm2, %ymm4, %ymm2
3533 ; AVX2-NEXT: vpshufb {{.*#+}} xmm4 = xmm3[u,u,u,u,u,u],zero,zero,xmm3[0,6,12],zero,zero,zero,xmm3[4,10]
3534 ; AVX2-NEXT: vpshufb {{.*#+}} xmm5 = xmm9[u,u,u,u,u,u,4,10],zero,zero,zero,xmm9[2,8,14],zero,zero
3535 ; AVX2-NEXT: vpor %xmm4, %xmm5, %xmm4
3536 ; AVX2-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4
3537 ; AVX2-NEXT: vpblendw {{.*#+}} ymm4 = ymm0[0,1,2],ymm4[3,4,5,6,7],ymm0[8,9,10],ymm4[11,12,13,14,15]
3538 ; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm4[4,5,6,7]
3539 ; AVX2-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u,u,u],zero,zero,xmm3[1,7,13],zero,zero,zero,xmm3[5,11]
3540 ; AVX2-NEXT: vpshufb {{.*#+}} xmm4 = xmm9[u,u,u,u,u,u,5,11],zero,zero,zero,xmm9[3,9,15],zero,zero
3541 ; AVX2-NEXT: vpor %xmm3, %xmm4, %xmm3
3542 ; AVX2-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
3543 ; AVX2-NEXT: vpblendw {{.*#+}} ymm3 = ymm1[0,1,2],ymm3[3,4,5,6,7],ymm1[8,9,10],ymm3[11,12,13,14,15]
3544 ; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm3[4,5,6,7]
3545 ; AVX2-NEXT: vmovdqa %ymm0, (%rsi)
3546 ; AVX2-NEXT: vmovdqa %ymm1, (%rdx)
3547 ; AVX2-NEXT: vmovdqa %ymm7, (%rcx)
3548 ; AVX2-NEXT: vmovdqa %ymm8, (%r8)
3549 ; AVX2-NEXT: vmovdqa %ymm6, (%r9)
3550 ; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax
3551 ; AVX2-NEXT: vmovdqa %ymm2, (%rax)
3552 ; AVX2-NEXT: vzeroupper
3555 ; AVX2-FP-LABEL: load_i8_stride6_vf32:
3557 ; AVX2-FP-NEXT: vmovdqa 160(%rdi), %ymm4
3558 ; AVX2-FP-NEXT: vmovdqa (%rdi), %ymm2
3559 ; AVX2-FP-NEXT: vmovdqa 32(%rdi), %ymm3
3560 ; AVX2-FP-NEXT: vmovdqa 64(%rdi), %ymm0
3561 ; AVX2-FP-NEXT: vmovdqa 96(%rdi), %ymm1
3562 ; AVX2-FP-NEXT: vpmovsxbw {{.*#+}} ymm8 = [65535,0,0,65535,0,0,65535,0,0,65535,0,0,65535,0,0,65535]
3563 ; AVX2-FP-NEXT: vpblendvb %ymm8, %ymm2, %ymm3, %ymm9
3564 ; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm5 = xmm9[0,6,12],zero,zero,zero,xmm9[4,10],zero,zero,zero,xmm9[u,u,u,u,u]
3565 ; AVX2-FP-NEXT: vextracti128 $1, %ymm9, %xmm10
3566 ; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm6 = zero,zero,zero,xmm10[2,8,14],zero,zero,xmm10[0,6,12,u,u,u,u,u]
3567 ; AVX2-FP-NEXT: vpor %xmm5, %xmm6, %xmm11
3568 ; AVX2-FP-NEXT: vpmovsxbw {{.*#+}} ymm7 = [0,65535,0,0,65535,0,0,65535,65535,0,0,65535,0,0,65535,0]
3569 ; AVX2-FP-NEXT: vperm2i128 {{.*#+}} ymm5 = ymm0[0,1],ymm1[0,1]
3570 ; AVX2-FP-NEXT: vperm2i128 {{.*#+}} ymm6 = ymm0[2,3],ymm1[2,3]
3571 ; AVX2-FP-NEXT: vpblendvb %ymm7, %ymm5, %ymm6, %ymm1
3572 ; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm0 = ymm1[u,u,u,u,u,u,u,u,u,u,u,2,8,14,4,10,16,22,28,18,24,30,u,u,u,u,u,u,u,u,u,u]
3573 ; AVX2-FP-NEXT: vpmovsxdq {{.*#+}} xmm7 = [18446744073709551615,16777215]
3574 ; AVX2-FP-NEXT: vpblendvb %ymm7, %ymm11, %ymm0, %ymm0
3575 ; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm9 = xmm9[1,7,13],zero,zero,zero,xmm9[5,11],zero,zero,zero,xmm9[u,u,u,u,u]
3576 ; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm10 = zero,zero,zero,xmm10[3,9,15],zero,zero,xmm10[1,7,13,u,u,u,u,u]
3577 ; AVX2-FP-NEXT: vpor %xmm9, %xmm10, %xmm9
3578 ; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,3,9,15,5,11,17,23,29,19,25,31,u,u,u,u,u,u,u,u,u,u]
3579 ; AVX2-FP-NEXT: vpblendvb %ymm7, %ymm9, %ymm1, %ymm1
3580 ; AVX2-FP-NEXT: vpmovsxbw {{.*#+}} ymm11 = [65535,0,0,65535,0,0,65535,0,0,65535,0,0,65535,0,0,65535]
3581 ; AVX2-FP-NEXT: vpblendvb %ymm11, %ymm3, %ymm2, %ymm9
3582 ; AVX2-FP-NEXT: vextracti128 $1, %ymm9, %xmm10
3583 ; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm12 = zero,zero,zero,xmm10[4,10],zero,zero,zero,xmm10[2,8,14,u,u,u,u,u]
3584 ; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm13 = xmm9[2,8,14],zero,zero,xmm9[0,6,12],zero,zero,zero,xmm9[u,u,u,u,u]
3585 ; AVX2-FP-NEXT: vpor %xmm12, %xmm13, %xmm12
3586 ; AVX2-FP-NEXT: vpmovsxbw {{.*#+}} ymm13 = [65535,0,0,65535,0,0,65535,0,0,0,65535,0,0,65535,0,0]
3587 ; AVX2-FP-NEXT: vpblendvb %ymm13, %ymm6, %ymm5, %ymm13
3588 ; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm14 = ymm13[u,u,u,u,u,u,u,u,u,u,u,4,10,0,6,12,18,24,30,20,26,u,u,u,u,u,u,u,u,u,u,u]
3589 ; AVX2-FP-NEXT: vpblendvb %ymm7, %ymm12, %ymm14, %ymm12
3590 ; AVX2-FP-NEXT: vmovdqa 128(%rdi), %ymm14
3591 ; AVX2-FP-NEXT: vpblendvb %ymm8, %ymm14, %ymm4, %ymm8
3592 ; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm10 = zero,zero,zero,xmm10[5,11],zero,zero,zero,xmm10[3,9,15,u,u,u,u,u]
3593 ; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm9 = xmm9[3,9,15],zero,zero,xmm9[1,7,13],zero,zero,zero,xmm9[u,u,u,u,u]
3594 ; AVX2-FP-NEXT: vpor %xmm10, %xmm9, %xmm9
3595 ; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm10 = xmm8[u,u,u,u,u,0,6,12],zero,zero,zero,xmm8[4,10],zero,zero,zero
3596 ; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm13 = ymm13[u,u,u,u,u,u,u,u,u,u,u,5,11,1,7,13,19,25,31,21,27,u,u,u,u,u,u,u,u,u,u,u]
3597 ; AVX2-FP-NEXT: vpblendvb %ymm7, %ymm9, %ymm13, %ymm13
3598 ; AVX2-FP-NEXT: vextracti128 $1, %ymm8, %xmm9
3599 ; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm7 = xmm9[u,u,u,u,u],zero,zero,zero,xmm9[2,8,14],zero,zero,xmm9[0,6,12]
3600 ; AVX2-FP-NEXT: vpor %xmm7, %xmm10, %xmm7
3601 ; AVX2-FP-NEXT: vinserti128 $1, %xmm7, %ymm0, %ymm7
3602 ; AVX2-FP-NEXT: vpmovsxwd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm10
3603 ; AVX2-FP-NEXT: vpblendvb %ymm10, %ymm12, %ymm7, %ymm7
3604 ; AVX2-FP-NEXT: vpmovsxbw {{.*#+}} ymm12 = [0,65535,0,0,65535,0,0,65535,0,0,65535,0,0,65535,0,0]
3605 ; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm8 = xmm8[u,u,u,u,u,1,7,13],zero,zero,zero,xmm8[5,11],zero,zero,zero
3606 ; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm9 = xmm9[u,u,u,u,u],zero,zero,zero,xmm9[3,9,15],zero,zero,xmm9[1,7,13]
3607 ; AVX2-FP-NEXT: vpor %xmm8, %xmm9, %xmm8
3608 ; AVX2-FP-NEXT: vpblendvb %ymm12, %ymm4, %ymm14, %ymm9
3609 ; AVX2-FP-NEXT: vinserti128 $1, %xmm8, %ymm0, %ymm8
3610 ; AVX2-FP-NEXT: vpblendvb %ymm10, %ymm13, %ymm8, %ymm8
3611 ; AVX2-FP-NEXT: vpblendvb %ymm11, %ymm4, %ymm14, %ymm4
3612 ; AVX2-FP-NEXT: vextracti128 $1, %ymm4, %xmm11
3613 ; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm13 = xmm11[u,u,u,u,u],zero,zero,zero,xmm11[4,10],zero,zero,zero,xmm11[2,8,14]
3614 ; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm14 = xmm4[u,u,u,u,u,2,8,14],zero,zero,xmm4[0,6,12],zero,zero,zero
3615 ; AVX2-FP-NEXT: vpor %xmm13, %xmm14, %xmm13
3616 ; AVX2-FP-NEXT: vpblendvb %ymm12, %ymm3, %ymm2, %ymm2
3617 ; AVX2-FP-NEXT: vextracti128 $1, %ymm2, %xmm3
3618 ; AVX2-FP-NEXT: vpmovsxbw {{.*#+}} ymm12 = [0,65535,0,0,65535,0,0,65535,65535,0,0,65535,0,0,65535,0]
3619 ; AVX2-FP-NEXT: vpblendvb %ymm12, %ymm6, %ymm5, %ymm5
3620 ; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm6 = zero,zero,xmm3[0,6,12],zero,zero,zero,xmm3[4,10,u,u,u,u,u,u]
3621 ; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm12 = xmm2[4,10],zero,zero,zero,xmm2[2,8,14],zero,zero,xmm2[u,u,u,u,u,u]
3622 ; AVX2-FP-NEXT: vpor %xmm6, %xmm12, %xmm6
3623 ; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm12 = ymm5[u,u,u,u,u,u,u,u,u,u,0,6,12,2,8,14,20,26,16,22,28,u,u,u,u,u,u,u,u,u,u,u]
3624 ; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm6 = xmm6[0,1,2,3,4],xmm12[5,6,7]
3625 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm12[4,5,6,7]
3626 ; AVX2-FP-NEXT: vinserti128 $1, %xmm13, %ymm0, %ymm12
3627 ; AVX2-FP-NEXT: vpblendvb %ymm10, %ymm6, %ymm12, %ymm6
3628 ; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm11 = xmm11[u,u,u,u,u],zero,zero,zero,xmm11[5,11],zero,zero,zero,xmm11[3,9,15]
3629 ; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[u,u,u,u,u,3,9,15],zero,zero,xmm4[1,7,13],zero,zero,zero
3630 ; AVX2-FP-NEXT: vpor %xmm4, %xmm11, %xmm4
3631 ; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm3 = zero,zero,xmm3[1,7,13],zero,zero,zero,xmm3[5,11,u,u,u,u,u,u]
3632 ; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[5,11],zero,zero,zero,xmm2[3,9,15],zero,zero,xmm2[u,u,u,u,u,u]
3633 ; AVX2-FP-NEXT: vpor %xmm3, %xmm2, %xmm2
3634 ; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm3 = ymm5[u,u,u,u,u,u,u,u,u,u,1,7,13,3,9,15,21,27,17,23,29,u,u,u,u,u,u,u,u,u,u,u]
3635 ; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3,4],xmm3[5,6,7]
3636 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
3637 ; AVX2-FP-NEXT: vextracti128 $1, %ymm9, %xmm3
3638 ; AVX2-FP-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4
3639 ; AVX2-FP-NEXT: vpblendvb %ymm10, %ymm2, %ymm4, %ymm2
3640 ; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm4 = xmm3[u,u,u,u,u,u],zero,zero,xmm3[0,6,12],zero,zero,zero,xmm3[4,10]
3641 ; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm5 = xmm9[u,u,u,u,u,u,4,10],zero,zero,zero,xmm9[2,8,14],zero,zero
3642 ; AVX2-FP-NEXT: vpor %xmm4, %xmm5, %xmm4
3643 ; AVX2-FP-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4
3644 ; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm4 = ymm0[0,1,2],ymm4[3,4,5,6,7],ymm0[8,9,10],ymm4[11,12,13,14,15]
3645 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm4[4,5,6,7]
3646 ; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u,u,u],zero,zero,xmm3[1,7,13],zero,zero,zero,xmm3[5,11]
3647 ; AVX2-FP-NEXT: vpshufb {{.*#+}} xmm4 = xmm9[u,u,u,u,u,u,5,11],zero,zero,zero,xmm9[3,9,15],zero,zero
3648 ; AVX2-FP-NEXT: vpor %xmm3, %xmm4, %xmm3
3649 ; AVX2-FP-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
3650 ; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm3 = ymm1[0,1,2],ymm3[3,4,5,6,7],ymm1[8,9,10],ymm3[11,12,13,14,15]
3651 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm3[4,5,6,7]
3652 ; AVX2-FP-NEXT: vmovdqa %ymm0, (%rsi)
3653 ; AVX2-FP-NEXT: vmovdqa %ymm1, (%rdx)
3654 ; AVX2-FP-NEXT: vmovdqa %ymm7, (%rcx)
3655 ; AVX2-FP-NEXT: vmovdqa %ymm8, (%r8)
3656 ; AVX2-FP-NEXT: vmovdqa %ymm6, (%r9)
3657 ; AVX2-FP-NEXT: movq {{[0-9]+}}(%rsp), %rax
3658 ; AVX2-FP-NEXT: vmovdqa %ymm2, (%rax)
3659 ; AVX2-FP-NEXT: vzeroupper
3660 ; AVX2-FP-NEXT: retq
3662 ; AVX2-FCP-LABEL: load_i8_stride6_vf32:
3663 ; AVX2-FCP: # %bb.0:
3664 ; AVX2-FCP-NEXT: vmovdqa 160(%rdi), %ymm4
3665 ; AVX2-FCP-NEXT: vmovdqa (%rdi), %ymm2
3666 ; AVX2-FCP-NEXT: vmovdqa 32(%rdi), %ymm3
3667 ; AVX2-FCP-NEXT: vmovdqa 64(%rdi), %ymm0
3668 ; AVX2-FCP-NEXT: vmovdqa 96(%rdi), %ymm1
3669 ; AVX2-FCP-NEXT: vpmovsxbw {{.*#+}} ymm8 = [65535,0,0,65535,0,0,65535,0,0,65535,0,0,65535,0,0,65535]
3670 ; AVX2-FCP-NEXT: vpblendvb %ymm8, %ymm2, %ymm3, %ymm9
3671 ; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm5 = xmm9[0,6,12],zero,zero,zero,xmm9[4,10],zero,zero,zero,xmm9[u,u,u,u,u]
3672 ; AVX2-FCP-NEXT: vextracti128 $1, %ymm9, %xmm10
3673 ; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm6 = zero,zero,zero,xmm10[2,8,14],zero,zero,xmm10[0,6,12,u,u,u,u,u]
3674 ; AVX2-FCP-NEXT: vpor %xmm5, %xmm6, %xmm11
3675 ; AVX2-FCP-NEXT: vpmovsxbw {{.*#+}} ymm7 = [0,65535,0,0,65535,0,0,65535,65535,0,0,65535,0,0,65535,0]
3676 ; AVX2-FCP-NEXT: vperm2i128 {{.*#+}} ymm5 = ymm0[0,1],ymm1[0,1]
3677 ; AVX2-FCP-NEXT: vperm2i128 {{.*#+}} ymm6 = ymm0[2,3],ymm1[2,3]
3678 ; AVX2-FCP-NEXT: vpblendvb %ymm7, %ymm5, %ymm6, %ymm1
3679 ; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm1[u,u,u,u,u,u,u,u,u,u,u,2,8,14,4,10,16,22,28,18,24,30,u,u,u,u,u,u,u,u,u,u]
3680 ; AVX2-FCP-NEXT: vpmovsxdq {{.*#+}} xmm7 = [18446744073709551615,16777215]
3681 ; AVX2-FCP-NEXT: vpblendvb %ymm7, %ymm11, %ymm0, %ymm0
3682 ; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm9 = xmm9[1,7,13],zero,zero,zero,xmm9[5,11],zero,zero,zero,xmm9[u,u,u,u,u]
3683 ; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm10 = zero,zero,zero,xmm10[3,9,15],zero,zero,xmm10[1,7,13,u,u,u,u,u]
3684 ; AVX2-FCP-NEXT: vpor %xmm9, %xmm10, %xmm9
3685 ; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,3,9,15,5,11,17,23,29,19,25,31,u,u,u,u,u,u,u,u,u,u]
3686 ; AVX2-FCP-NEXT: vpblendvb %ymm7, %ymm9, %ymm1, %ymm1
3687 ; AVX2-FCP-NEXT: vpmovsxbw {{.*#+}} ymm11 = [65535,0,0,65535,0,0,65535,0,0,65535,0,0,65535,0,0,65535]
3688 ; AVX2-FCP-NEXT: vpblendvb %ymm11, %ymm3, %ymm2, %ymm9
3689 ; AVX2-FCP-NEXT: vextracti128 $1, %ymm9, %xmm10
3690 ; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm12 = zero,zero,zero,xmm10[4,10],zero,zero,zero,xmm10[2,8,14,u,u,u,u,u]
3691 ; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm13 = xmm9[2,8,14],zero,zero,xmm9[0,6,12],zero,zero,zero,xmm9[u,u,u,u,u]
3692 ; AVX2-FCP-NEXT: vpor %xmm12, %xmm13, %xmm12
3693 ; AVX2-FCP-NEXT: vpmovsxbw {{.*#+}} ymm13 = [65535,0,0,65535,0,0,65535,0,0,0,65535,0,0,65535,0,0]
3694 ; AVX2-FCP-NEXT: vpblendvb %ymm13, %ymm6, %ymm5, %ymm13
3695 ; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm14 = ymm13[u,u,u,u,u,u,u,u,u,u,u,4,10,0,6,12,18,24,30,20,26,u,u,u,u,u,u,u,u,u,u,u]
3696 ; AVX2-FCP-NEXT: vpblendvb %ymm7, %ymm12, %ymm14, %ymm12
3697 ; AVX2-FCP-NEXT: vmovdqa 128(%rdi), %ymm14
3698 ; AVX2-FCP-NEXT: vpblendvb %ymm8, %ymm14, %ymm4, %ymm8
3699 ; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm10 = zero,zero,zero,xmm10[5,11],zero,zero,zero,xmm10[3,9,15,u,u,u,u,u]
3700 ; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm9 = xmm9[3,9,15],zero,zero,xmm9[1,7,13],zero,zero,zero,xmm9[u,u,u,u,u]
3701 ; AVX2-FCP-NEXT: vpor %xmm10, %xmm9, %xmm9
3702 ; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm10 = xmm8[u,u,u,u,u,0,6,12],zero,zero,zero,xmm8[4,10],zero,zero,zero
3703 ; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm13 = ymm13[u,u,u,u,u,u,u,u,u,u,u,5,11,1,7,13,19,25,31,21,27,u,u,u,u,u,u,u,u,u,u,u]
3704 ; AVX2-FCP-NEXT: vpblendvb %ymm7, %ymm9, %ymm13, %ymm13
3705 ; AVX2-FCP-NEXT: vextracti128 $1, %ymm8, %xmm9
3706 ; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm9[u,u,u,u,u],zero,zero,zero,xmm9[2,8,14],zero,zero,xmm9[0,6,12]
3707 ; AVX2-FCP-NEXT: vpor %xmm7, %xmm10, %xmm7
3708 ; AVX2-FCP-NEXT: vinserti128 $1, %xmm7, %ymm0, %ymm7
3709 ; AVX2-FCP-NEXT: vpmovsxwd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm10
3710 ; AVX2-FCP-NEXT: vpblendvb %ymm10, %ymm12, %ymm7, %ymm7
3711 ; AVX2-FCP-NEXT: vpmovsxbw {{.*#+}} ymm12 = [0,65535,0,0,65535,0,0,65535,0,0,65535,0,0,65535,0,0]
3712 ; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm8 = xmm8[u,u,u,u,u,1,7,13],zero,zero,zero,xmm8[5,11],zero,zero,zero
3713 ; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm9 = xmm9[u,u,u,u,u],zero,zero,zero,xmm9[3,9,15],zero,zero,xmm9[1,7,13]
3714 ; AVX2-FCP-NEXT: vpor %xmm8, %xmm9, %xmm8
3715 ; AVX2-FCP-NEXT: vpblendvb %ymm12, %ymm4, %ymm14, %ymm9
3716 ; AVX2-FCP-NEXT: vinserti128 $1, %xmm8, %ymm0, %ymm8
3717 ; AVX2-FCP-NEXT: vpblendvb %ymm10, %ymm13, %ymm8, %ymm8
3718 ; AVX2-FCP-NEXT: vpblendvb %ymm11, %ymm4, %ymm14, %ymm4
3719 ; AVX2-FCP-NEXT: vextracti128 $1, %ymm4, %xmm11
3720 ; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm13 = xmm11[u,u,u,u,u],zero,zero,zero,xmm11[4,10],zero,zero,zero,xmm11[2,8,14]
3721 ; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm14 = xmm4[u,u,u,u,u,2,8,14],zero,zero,xmm4[0,6,12],zero,zero,zero
3722 ; AVX2-FCP-NEXT: vpor %xmm13, %xmm14, %xmm13
3723 ; AVX2-FCP-NEXT: vpblendvb %ymm12, %ymm3, %ymm2, %ymm2
3724 ; AVX2-FCP-NEXT: vextracti128 $1, %ymm2, %xmm3
3725 ; AVX2-FCP-NEXT: vpmovsxbw {{.*#+}} ymm12 = [0,65535,0,0,65535,0,0,65535,65535,0,0,65535,0,0,65535,0]
3726 ; AVX2-FCP-NEXT: vpblendvb %ymm12, %ymm6, %ymm5, %ymm5
3727 ; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm6 = zero,zero,xmm3[0,6,12],zero,zero,zero,xmm3[4,10,u,u,u,u,u,u]
3728 ; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm12 = xmm2[4,10],zero,zero,zero,xmm2[2,8,14],zero,zero,xmm2[u,u,u,u,u,u]
3729 ; AVX2-FCP-NEXT: vpor %xmm6, %xmm12, %xmm6
3730 ; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm12 = ymm5[u,u,u,u,u,u,u,u,u,u,0,6,12,2,8,14,20,26,16,22,28,u,u,u,u,u,u,u,u,u,u,u]
3731 ; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm6 = xmm6[0,1,2,3,4],xmm12[5,6,7]
3732 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm12[4,5,6,7]
3733 ; AVX2-FCP-NEXT: vinserti128 $1, %xmm13, %ymm0, %ymm12
3734 ; AVX2-FCP-NEXT: vpblendvb %ymm10, %ymm6, %ymm12, %ymm6
3735 ; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm11 = xmm11[u,u,u,u,u],zero,zero,zero,xmm11[5,11],zero,zero,zero,xmm11[3,9,15]
3736 ; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[u,u,u,u,u,3,9,15],zero,zero,xmm4[1,7,13],zero,zero,zero
3737 ; AVX2-FCP-NEXT: vpor %xmm4, %xmm11, %xmm4
3738 ; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm3 = zero,zero,xmm3[1,7,13],zero,zero,zero,xmm3[5,11,u,u,u,u,u,u]
3739 ; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[5,11],zero,zero,zero,xmm2[3,9,15],zero,zero,xmm2[u,u,u,u,u,u]
3740 ; AVX2-FCP-NEXT: vpor %xmm3, %xmm2, %xmm2
3741 ; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm3 = ymm5[u,u,u,u,u,u,u,u,u,u,1,7,13,3,9,15,21,27,17,23,29,u,u,u,u,u,u,u,u,u,u,u]
3742 ; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3,4],xmm3[5,6,7]
3743 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
3744 ; AVX2-FCP-NEXT: vextracti128 $1, %ymm9, %xmm3
3745 ; AVX2-FCP-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4
3746 ; AVX2-FCP-NEXT: vpblendvb %ymm10, %ymm2, %ymm4, %ymm2
3747 ; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm4 = xmm3[u,u,u,u,u,u],zero,zero,xmm3[0,6,12],zero,zero,zero,xmm3[4,10]
3748 ; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm5 = xmm9[u,u,u,u,u,u,4,10],zero,zero,zero,xmm9[2,8,14],zero,zero
3749 ; AVX2-FCP-NEXT: vpor %xmm4, %xmm5, %xmm4
3750 ; AVX2-FCP-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4
3751 ; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm4 = ymm0[0,1,2],ymm4[3,4,5,6,7],ymm0[8,9,10],ymm4[11,12,13,14,15]
3752 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm4[4,5,6,7]
3753 ; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u,u,u],zero,zero,xmm3[1,7,13],zero,zero,zero,xmm3[5,11]
3754 ; AVX2-FCP-NEXT: vpshufb {{.*#+}} xmm4 = xmm9[u,u,u,u,u,u,5,11],zero,zero,zero,xmm9[3,9,15],zero,zero
3755 ; AVX2-FCP-NEXT: vpor %xmm3, %xmm4, %xmm3
3756 ; AVX2-FCP-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
3757 ; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm3 = ymm1[0,1,2],ymm3[3,4,5,6,7],ymm1[8,9,10],ymm3[11,12,13,14,15]
3758 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm3[4,5,6,7]
3759 ; AVX2-FCP-NEXT: vmovdqa %ymm0, (%rsi)
3760 ; AVX2-FCP-NEXT: vmovdqa %ymm1, (%rdx)
3761 ; AVX2-FCP-NEXT: vmovdqa %ymm7, (%rcx)
3762 ; AVX2-FCP-NEXT: vmovdqa %ymm8, (%r8)
3763 ; AVX2-FCP-NEXT: vmovdqa %ymm6, (%r9)
3764 ; AVX2-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
3765 ; AVX2-FCP-NEXT: vmovdqa %ymm2, (%rax)
3766 ; AVX2-FCP-NEXT: vzeroupper
3767 ; AVX2-FCP-NEXT: retq
3769 ; AVX512-LABEL: load_i8_stride6_vf32:
3771 ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax
3772 ; AVX512-NEXT: vmovdqa {{.*#+}} ymm0 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535]
3773 ; AVX512-NEXT: vmovdqa64 (%rdi), %ymm17
3774 ; AVX512-NEXT: vmovdqa 32(%rdi), %ymm3
3775 ; AVX512-NEXT: vmovdqa 64(%rdi), %ymm1
3776 ; AVX512-NEXT: vmovdqa 128(%rdi), %ymm6
3777 ; AVX512-NEXT: vmovdqa %ymm0, %ymm7
3778 ; AVX512-NEXT: vpternlogq $202, %ymm3, %ymm17, %ymm7
3779 ; AVX512-NEXT: vpshufb {{.*#+}} xmm4 = xmm7[0,6,12],zero,zero,zero,xmm7[4,10],zero,zero,zero,xmm7[u,u,u,u,u]
3780 ; AVX512-NEXT: vextracti128 $1, %ymm7, %xmm8
3781 ; AVX512-NEXT: vpshufb {{.*#+}} xmm5 = zero,zero,zero,xmm8[2,8,14],zero,zero,xmm8[0,6,12,u,u,u,u,u]
3782 ; AVX512-NEXT: vpor %xmm4, %xmm5, %xmm4
3783 ; AVX512-NEXT: vperm2i128 {{.*#+}} ymm5 = ymm1[2,3],mem[2,3]
3784 ; AVX512-NEXT: vinserti128 $1, 96(%rdi), %ymm1, %ymm1
3785 ; AVX512-NEXT: vmovdqa {{.*#+}} ymm9 = [65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535,0]
3786 ; AVX512-NEXT: vmovdqa %ymm9, %ymm10
3787 ; AVX512-NEXT: vpternlogq $202, %ymm5, %ymm1, %ymm10
3788 ; AVX512-NEXT: vpshufb {{.*#+}} ymm11 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm10[2,8,14,4,10,16,22,28,18,24,30],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
3789 ; AVX512-NEXT: vmovdqa64 {{.*#+}} ymm16 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255]
3790 ; AVX512-NEXT: vpternlogq $248, %ymm16, %ymm4, %ymm11
3791 ; AVX512-NEXT: vmovdqa 160(%rdi), %ymm13
3792 ; AVX512-NEXT: vmovdqa %ymm0, %ymm14
3793 ; AVX512-NEXT: vpternlogq $202, %ymm6, %ymm13, %ymm14
3794 ; AVX512-NEXT: vextracti128 $1, %ymm14, %xmm15
3795 ; AVX512-NEXT: vpshufb {{.*#+}} xmm4 = xmm15[u,u,u,u,u,u],zero,zero,xmm15[0,6,12],zero,zero,zero,xmm15[4,10]
3796 ; AVX512-NEXT: vpshufb {{.*#+}} xmm12 = xmm14[u,u,u,u,u,u,4,10],zero,zero,zero,xmm14[2,8,14],zero,zero
3797 ; AVX512-NEXT: vpor %xmm4, %xmm12, %xmm4
3798 ; AVX512-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4
3799 ; AVX512-NEXT: vpblendw {{.*#+}} ymm4 = ymm11[0,1,2],ymm4[3,4,5,6,7],ymm11[8,9,10],ymm4[11,12,13,14,15]
3800 ; AVX512-NEXT: vpblendd {{.*#+}} ymm2 = ymm11[0,1,2,3],ymm4[4,5,6,7]
3801 ; AVX512-NEXT: vmovdqa64 %ymm2, %ymm18
3802 ; AVX512-NEXT: vpshufb {{.*#+}} xmm7 = xmm7[1,7,13],zero,zero,zero,xmm7[5,11],zero,zero,zero,xmm7[u,u,u,u,u]
3803 ; AVX512-NEXT: vpshufb {{.*#+}} xmm8 = zero,zero,zero,xmm8[3,9,15],zero,zero,xmm8[1,7,13,u,u,u,u,u]
3804 ; AVX512-NEXT: vpor %xmm7, %xmm8, %xmm7
3805 ; AVX512-NEXT: vpshufb {{.*#+}} ymm8 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm10[3,9,15,5,11,17,23,29,19,25,31],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
3806 ; AVX512-NEXT: vpternlogq $248, %ymm16, %ymm7, %ymm8
3807 ; AVX512-NEXT: vpshufb {{.*#+}} xmm7 = xmm15[u,u,u,u,u,u],zero,zero,xmm15[1,7,13],zero,zero,zero,xmm15[5,11]
3808 ; AVX512-NEXT: vpshufb {{.*#+}} xmm10 = xmm14[u,u,u,u,u,u,5,11],zero,zero,zero,xmm14[3,9,15],zero,zero
3809 ; AVX512-NEXT: vpor %xmm7, %xmm10, %xmm7
3810 ; AVX512-NEXT: vinserti128 $1, %xmm7, %ymm0, %ymm7
3811 ; AVX512-NEXT: vpblendw {{.*#+}} ymm7 = ymm8[0,1,2],ymm7[3,4,5,6,7],ymm8[8,9,10],ymm7[11,12,13,14,15]
3812 ; AVX512-NEXT: vpblendd {{.*#+}} ymm7 = ymm8[0,1,2,3],ymm7[4,5,6,7]
3813 ; AVX512-NEXT: vmovdqa {{.*#+}} ymm8 = [65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535]
3814 ; AVX512-NEXT: vmovdqa %ymm8, %ymm10
3815 ; AVX512-NEXT: vpternlogq $202, %ymm17, %ymm3, %ymm10
3816 ; AVX512-NEXT: vextracti128 $1, %ymm10, %xmm11
3817 ; AVX512-NEXT: vpshufb {{.*#+}} xmm12 = zero,zero,zero,xmm11[4,10],zero,zero,zero,xmm11[2,8,14,u,u,u,u,u]
3818 ; AVX512-NEXT: vpshufb {{.*#+}} xmm14 = xmm10[2,8,14],zero,zero,xmm10[0,6,12],zero,zero,zero,xmm10[u,u,u,u,u]
3819 ; AVX512-NEXT: vpor %xmm12, %xmm14, %xmm12
3820 ; AVX512-NEXT: vpternlogq $202, %ymm1, %ymm5, %ymm9
3821 ; AVX512-NEXT: vpshufb {{.*#+}} ymm14 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm9[4,10,0,6,12,18,24,30,20,26,u,u,u,u,u,u,u,u,u,u,u]
3822 ; AVX512-NEXT: vmovdqa64 {{.*#+}} ymm16 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255]
3823 ; AVX512-NEXT: vpternlogq $248, %ymm16, %ymm12, %ymm14
3824 ; AVX512-NEXT: vmovdqa %ymm0, %ymm12
3825 ; AVX512-NEXT: vpternlogq $202, %ymm13, %ymm6, %ymm12
3826 ; AVX512-NEXT: vpshufb {{.*#+}} xmm15 = xmm12[u,u,u,u,u,0,6,12],zero,zero,zero,xmm12[4,10],zero,zero,zero
3827 ; AVX512-NEXT: vextracti128 $1, %ymm12, %xmm2
3828 ; AVX512-NEXT: vpshufb {{.*#+}} xmm4 = xmm2[u,u,u,u,u],zero,zero,zero,xmm2[2,8,14],zero,zero,xmm2[0,6,12]
3829 ; AVX512-NEXT: vpor %xmm4, %xmm15, %xmm4
3830 ; AVX512-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4
3831 ; AVX512-NEXT: vpmovsxwd {{.*#+}} ymm15 = [4294967295,4294967295,4294967295,4294967295,4294967295,255,0,0]
3832 ; AVX512-NEXT: vpternlogq $184, %ymm14, %ymm15, %ymm4
3833 ; AVX512-NEXT: vpshufb {{.*#+}} xmm11 = zero,zero,zero,xmm11[5,11],zero,zero,zero,xmm11[3,9,15,u,u,u,u,u]
3834 ; AVX512-NEXT: vpshufb {{.*#+}} xmm10 = xmm10[3,9,15],zero,zero,xmm10[1,7,13],zero,zero,zero,xmm10[u,u,u,u,u]
3835 ; AVX512-NEXT: vpor %xmm11, %xmm10, %xmm10
3836 ; AVX512-NEXT: vpshufb {{.*#+}} ymm9 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm9[5,11,1,7,13,19,25,31,21,27,u,u,u,u,u,u,u,u,u,u,u]
3837 ; AVX512-NEXT: vpternlogq $248, %ymm16, %ymm10, %ymm9
3838 ; AVX512-NEXT: vpshufb {{.*#+}} xmm10 = xmm12[u,u,u,u,u,1,7,13],zero,zero,zero,xmm12[5,11],zero,zero,zero
3839 ; AVX512-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,u],zero,zero,zero,xmm2[3,9,15],zero,zero,xmm2[1,7,13]
3840 ; AVX512-NEXT: vpor %xmm2, %xmm10, %xmm2
3841 ; AVX512-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
3842 ; AVX512-NEXT: vpternlogq $184, %ymm9, %ymm15, %ymm2
3843 ; AVX512-NEXT: vpternlogq $202, %ymm6, %ymm13, %ymm8
3844 ; AVX512-NEXT: vextracti128 $1, %ymm8, %xmm6
3845 ; AVX512-NEXT: vpshufb {{.*#+}} xmm9 = xmm6[u,u,u,u,u],zero,zero,zero,xmm6[4,10],zero,zero,zero,xmm6[2,8,14]
3846 ; AVX512-NEXT: vpshufb {{.*#+}} xmm10 = xmm8[u,u,u,u,u,2,8,14],zero,zero,xmm8[0,6,12],zero,zero,zero
3847 ; AVX512-NEXT: vpor %xmm9, %xmm10, %xmm9
3848 ; AVX512-NEXT: vinserti128 $1, %xmm9, %ymm0, %ymm9
3849 ; AVX512-NEXT: vpternlogq $202, %ymm17, %ymm3, %ymm0
3850 ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm3
3851 ; AVX512-NEXT: vpshufb {{.*#+}} xmm10 = zero,zero,xmm3[0,6,12],zero,zero,zero,xmm3[4,10,u,u,u,u,u,u]
3852 ; AVX512-NEXT: vpshufb {{.*#+}} xmm11 = xmm0[4,10],zero,zero,zero,xmm0[2,8,14],zero,zero,xmm0[u,u,u,u,u,u]
3853 ; AVX512-NEXT: vpor %xmm10, %xmm11, %xmm10
3854 ; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm5, %ymm1
3855 ; AVX512-NEXT: vpshufb {{.*#+}} ymm5 = ymm1[u,u,u,u,u,u,u,u,u,u,0,6,12,2,8,14,20,26,16,22,28,u,u,u,u,u,u,u,u,u,u,u]
3856 ; AVX512-NEXT: vpblendw {{.*#+}} xmm10 = xmm10[0,1,2,3,4],xmm5[5,6,7]
3857 ; AVX512-NEXT: vpblendd {{.*#+}} ymm5 = ymm10[0,1,2,3],ymm5[4,5,6,7]
3858 ; AVX512-NEXT: vpternlogq $226, %ymm9, %ymm15, %ymm5
3859 ; AVX512-NEXT: vpshufb {{.*#+}} xmm6 = xmm6[u,u,u,u,u],zero,zero,zero,xmm6[5,11],zero,zero,zero,xmm6[3,9,15]
3860 ; AVX512-NEXT: vpshufb {{.*#+}} xmm8 = xmm8[u,u,u,u,u,3,9,15],zero,zero,xmm8[1,7,13],zero,zero,zero
3861 ; AVX512-NEXT: vpor %xmm6, %xmm8, %xmm6
3862 ; AVX512-NEXT: vinserti128 $1, %xmm6, %ymm0, %ymm6
3863 ; AVX512-NEXT: vpshufb {{.*#+}} xmm3 = zero,zero,xmm3[1,7,13],zero,zero,zero,xmm3[5,11,u,u,u,u,u,u]
3864 ; AVX512-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[5,11],zero,zero,zero,xmm0[3,9,15],zero,zero,xmm0[u,u,u,u,u,u]
3865 ; AVX512-NEXT: vpor %xmm3, %xmm0, %xmm0
3866 ; AVX512-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,1,7,13,3,9,15,21,27,17,23,29,u,u,u,u,u,u,u,u,u,u,u]
3867 ; AVX512-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm1[5,6,7]
3868 ; AVX512-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
3869 ; AVX512-NEXT: vpternlogq $226, %ymm6, %ymm15, %ymm0
3870 ; AVX512-NEXT: vmovdqa64 %ymm18, (%rsi)
3871 ; AVX512-NEXT: vmovdqa %ymm7, (%rdx)
3872 ; AVX512-NEXT: vmovdqa %ymm4, (%rcx)
3873 ; AVX512-NEXT: vmovdqa %ymm2, (%r8)
3874 ; AVX512-NEXT: vmovdqa %ymm5, (%r9)
3875 ; AVX512-NEXT: vmovdqa %ymm0, (%rax)
3876 ; AVX512-NEXT: vzeroupper
3879 ; AVX512-FCP-LABEL: load_i8_stride6_vf32:
3880 ; AVX512-FCP: # %bb.0:
3881 ; AVX512-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
3882 ; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm0 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535]
3883 ; AVX512-FCP-NEXT: vmovdqa64 (%rdi), %ymm17
3884 ; AVX512-FCP-NEXT: vmovdqa 32(%rdi), %ymm3
3885 ; AVX512-FCP-NEXT: vmovdqa 64(%rdi), %ymm1
3886 ; AVX512-FCP-NEXT: vmovdqa 128(%rdi), %ymm6
3887 ; AVX512-FCP-NEXT: vmovdqa %ymm0, %ymm7
3888 ; AVX512-FCP-NEXT: vpternlogq $202, %ymm3, %ymm17, %ymm7
3889 ; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm4 = xmm7[0,6,12],zero,zero,zero,xmm7[4,10],zero,zero,zero,xmm7[u,u,u,u,u]
3890 ; AVX512-FCP-NEXT: vextracti128 $1, %ymm7, %xmm8
3891 ; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm5 = zero,zero,zero,xmm8[2,8,14],zero,zero,xmm8[0,6,12,u,u,u,u,u]
3892 ; AVX512-FCP-NEXT: vpor %xmm4, %xmm5, %xmm4
3893 ; AVX512-FCP-NEXT: vperm2i128 {{.*#+}} ymm5 = ymm1[2,3],mem[2,3]
3894 ; AVX512-FCP-NEXT: vinserti128 $1, 96(%rdi), %ymm1, %ymm1
3895 ; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm9 = [65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535,0]
3896 ; AVX512-FCP-NEXT: vmovdqa %ymm9, %ymm10
3897 ; AVX512-FCP-NEXT: vpternlogq $202, %ymm5, %ymm1, %ymm10
3898 ; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm11 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm10[2,8,14,4,10,16,22,28,18,24,30],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
3899 ; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} ymm16 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255]
3900 ; AVX512-FCP-NEXT: vpternlogq $248, %ymm16, %ymm4, %ymm11
3901 ; AVX512-FCP-NEXT: vmovdqa 160(%rdi), %ymm13
3902 ; AVX512-FCP-NEXT: vmovdqa %ymm0, %ymm14
3903 ; AVX512-FCP-NEXT: vpternlogq $202, %ymm6, %ymm13, %ymm14
3904 ; AVX512-FCP-NEXT: vextracti128 $1, %ymm14, %xmm15
3905 ; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm4 = xmm15[u,u,u,u,u,u],zero,zero,xmm15[0,6,12],zero,zero,zero,xmm15[4,10]
3906 ; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm12 = xmm14[u,u,u,u,u,u,4,10],zero,zero,zero,xmm14[2,8,14],zero,zero
3907 ; AVX512-FCP-NEXT: vpor %xmm4, %xmm12, %xmm4
3908 ; AVX512-FCP-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4
3909 ; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm4 = ymm11[0,1,2],ymm4[3,4,5,6,7],ymm11[8,9,10],ymm4[11,12,13,14,15]
3910 ; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm11[0,1,2,3],ymm4[4,5,6,7]
3911 ; AVX512-FCP-NEXT: vmovdqa64 %ymm2, %ymm18
3912 ; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm7[1,7,13],zero,zero,zero,xmm7[5,11],zero,zero,zero,xmm7[u,u,u,u,u]
3913 ; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm8 = zero,zero,zero,xmm8[3,9,15],zero,zero,xmm8[1,7,13,u,u,u,u,u]
3914 ; AVX512-FCP-NEXT: vpor %xmm7, %xmm8, %xmm7
3915 ; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm8 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm10[3,9,15,5,11,17,23,29,19,25,31],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
3916 ; AVX512-FCP-NEXT: vpternlogq $248, %ymm16, %ymm7, %ymm8
3917 ; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm15[u,u,u,u,u,u],zero,zero,xmm15[1,7,13],zero,zero,zero,xmm15[5,11]
3918 ; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm10 = xmm14[u,u,u,u,u,u,5,11],zero,zero,zero,xmm14[3,9,15],zero,zero
3919 ; AVX512-FCP-NEXT: vpor %xmm7, %xmm10, %xmm7
3920 ; AVX512-FCP-NEXT: vinserti128 $1, %xmm7, %ymm0, %ymm7
3921 ; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm7 = ymm8[0,1,2],ymm7[3,4,5,6,7],ymm8[8,9,10],ymm7[11,12,13,14,15]
3922 ; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm7 = ymm8[0,1,2,3],ymm7[4,5,6,7]
3923 ; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm8 = [65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535]
3924 ; AVX512-FCP-NEXT: vmovdqa %ymm8, %ymm10
3925 ; AVX512-FCP-NEXT: vpternlogq $202, %ymm17, %ymm3, %ymm10
3926 ; AVX512-FCP-NEXT: vextracti128 $1, %ymm10, %xmm11
3927 ; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm12 = zero,zero,zero,xmm11[4,10],zero,zero,zero,xmm11[2,8,14,u,u,u,u,u]
3928 ; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm14 = xmm10[2,8,14],zero,zero,xmm10[0,6,12],zero,zero,zero,xmm10[u,u,u,u,u]
3929 ; AVX512-FCP-NEXT: vpor %xmm12, %xmm14, %xmm12
3930 ; AVX512-FCP-NEXT: vpternlogq $202, %ymm1, %ymm5, %ymm9
3931 ; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm14 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm9[4,10,0,6,12,18,24,30,20,26,u,u,u,u,u,u,u,u,u,u,u]
3932 ; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} ymm16 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255]
3933 ; AVX512-FCP-NEXT: vpternlogq $248, %ymm16, %ymm12, %ymm14
3934 ; AVX512-FCP-NEXT: vmovdqa %ymm0, %ymm12
3935 ; AVX512-FCP-NEXT: vpternlogq $202, %ymm13, %ymm6, %ymm12
3936 ; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm15 = xmm12[u,u,u,u,u,0,6,12],zero,zero,zero,xmm12[4,10],zero,zero,zero
3937 ; AVX512-FCP-NEXT: vextracti128 $1, %ymm12, %xmm2
3938 ; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm4 = xmm2[u,u,u,u,u],zero,zero,zero,xmm2[2,8,14],zero,zero,xmm2[0,6,12]
3939 ; AVX512-FCP-NEXT: vpor %xmm4, %xmm15, %xmm4
3940 ; AVX512-FCP-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4
3941 ; AVX512-FCP-NEXT: vpmovsxwd {{.*#+}} ymm15 = [4294967295,4294967295,4294967295,4294967295,4294967295,255,0,0]
3942 ; AVX512-FCP-NEXT: vpternlogq $184, %ymm14, %ymm15, %ymm4
3943 ; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm11 = zero,zero,zero,xmm11[5,11],zero,zero,zero,xmm11[3,9,15,u,u,u,u,u]
3944 ; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm10 = xmm10[3,9,15],zero,zero,xmm10[1,7,13],zero,zero,zero,xmm10[u,u,u,u,u]
3945 ; AVX512-FCP-NEXT: vpor %xmm11, %xmm10, %xmm10
3946 ; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm9 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm9[5,11,1,7,13,19,25,31,21,27,u,u,u,u,u,u,u,u,u,u,u]
3947 ; AVX512-FCP-NEXT: vpternlogq $248, %ymm16, %ymm10, %ymm9
3948 ; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm10 = xmm12[u,u,u,u,u,1,7,13],zero,zero,zero,xmm12[5,11],zero,zero,zero
3949 ; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,u],zero,zero,zero,xmm2[3,9,15],zero,zero,xmm2[1,7,13]
3950 ; AVX512-FCP-NEXT: vpor %xmm2, %xmm10, %xmm2
3951 ; AVX512-FCP-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
3952 ; AVX512-FCP-NEXT: vpternlogq $184, %ymm9, %ymm15, %ymm2
3953 ; AVX512-FCP-NEXT: vpternlogq $202, %ymm6, %ymm13, %ymm8
3954 ; AVX512-FCP-NEXT: vextracti128 $1, %ymm8, %xmm6
3955 ; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm9 = xmm6[u,u,u,u,u],zero,zero,zero,xmm6[4,10],zero,zero,zero,xmm6[2,8,14]
3956 ; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm10 = xmm8[u,u,u,u,u,2,8,14],zero,zero,xmm8[0,6,12],zero,zero,zero
3957 ; AVX512-FCP-NEXT: vpor %xmm9, %xmm10, %xmm9
3958 ; AVX512-FCP-NEXT: vinserti128 $1, %xmm9, %ymm0, %ymm9
3959 ; AVX512-FCP-NEXT: vpternlogq $202, %ymm17, %ymm3, %ymm0
3960 ; AVX512-FCP-NEXT: vextracti128 $1, %ymm0, %xmm3
3961 ; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm10 = zero,zero,xmm3[0,6,12],zero,zero,zero,xmm3[4,10,u,u,u,u,u,u]
3962 ; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm11 = xmm0[4,10],zero,zero,zero,xmm0[2,8,14],zero,zero,xmm0[u,u,u,u,u,u]
3963 ; AVX512-FCP-NEXT: vpor %xmm10, %xmm11, %xmm10
3964 ; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm5, %ymm1
3965 ; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm5 = ymm1[u,u,u,u,u,u,u,u,u,u,0,6,12,2,8,14,20,26,16,22,28,u,u,u,u,u,u,u,u,u,u,u]
3966 ; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm10 = xmm10[0,1,2,3,4],xmm5[5,6,7]
3967 ; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm5 = ymm10[0,1,2,3],ymm5[4,5,6,7]
3968 ; AVX512-FCP-NEXT: vpternlogq $226, %ymm9, %ymm15, %ymm5
3969 ; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm6 = xmm6[u,u,u,u,u],zero,zero,zero,xmm6[5,11],zero,zero,zero,xmm6[3,9,15]
3970 ; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm8 = xmm8[u,u,u,u,u,3,9,15],zero,zero,xmm8[1,7,13],zero,zero,zero
3971 ; AVX512-FCP-NEXT: vpor %xmm6, %xmm8, %xmm6
3972 ; AVX512-FCP-NEXT: vinserti128 $1, %xmm6, %ymm0, %ymm6
3973 ; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm3 = zero,zero,xmm3[1,7,13],zero,zero,zero,xmm3[5,11,u,u,u,u,u,u]
3974 ; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[5,11],zero,zero,zero,xmm0[3,9,15],zero,zero,xmm0[u,u,u,u,u,u]
3975 ; AVX512-FCP-NEXT: vpor %xmm3, %xmm0, %xmm0
3976 ; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,1,7,13,3,9,15,21,27,17,23,29,u,u,u,u,u,u,u,u,u,u,u]
3977 ; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm1[5,6,7]
3978 ; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
3979 ; AVX512-FCP-NEXT: vpternlogq $226, %ymm6, %ymm15, %ymm0
3980 ; AVX512-FCP-NEXT: vmovdqa64 %ymm18, (%rsi)
3981 ; AVX512-FCP-NEXT: vmovdqa %ymm7, (%rdx)
3982 ; AVX512-FCP-NEXT: vmovdqa %ymm4, (%rcx)
3983 ; AVX512-FCP-NEXT: vmovdqa %ymm2, (%r8)
3984 ; AVX512-FCP-NEXT: vmovdqa %ymm5, (%r9)
3985 ; AVX512-FCP-NEXT: vmovdqa %ymm0, (%rax)
3986 ; AVX512-FCP-NEXT: vzeroupper
3987 ; AVX512-FCP-NEXT: retq
3989 ; AVX512DQ-LABEL: load_i8_stride6_vf32:
3990 ; AVX512DQ: # %bb.0:
3991 ; AVX512DQ-NEXT: movq {{[0-9]+}}(%rsp), %rax
3992 ; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm0 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535]
3993 ; AVX512DQ-NEXT: vmovdqa64 (%rdi), %ymm17
3994 ; AVX512DQ-NEXT: vmovdqa 32(%rdi), %ymm3
3995 ; AVX512DQ-NEXT: vmovdqa 64(%rdi), %ymm1
3996 ; AVX512DQ-NEXT: vmovdqa 128(%rdi), %ymm6
3997 ; AVX512DQ-NEXT: vmovdqa %ymm0, %ymm7
3998 ; AVX512DQ-NEXT: vpternlogq $202, %ymm3, %ymm17, %ymm7
3999 ; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm4 = xmm7[0,6,12],zero,zero,zero,xmm7[4,10],zero,zero,zero,xmm7[u,u,u,u,u]
4000 ; AVX512DQ-NEXT: vextracti128 $1, %ymm7, %xmm8
4001 ; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm5 = zero,zero,zero,xmm8[2,8,14],zero,zero,xmm8[0,6,12,u,u,u,u,u]
4002 ; AVX512DQ-NEXT: vpor %xmm4, %xmm5, %xmm4
4003 ; AVX512DQ-NEXT: vperm2i128 {{.*#+}} ymm5 = ymm1[2,3],mem[2,3]
4004 ; AVX512DQ-NEXT: vinserti128 $1, 96(%rdi), %ymm1, %ymm1
4005 ; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm9 = [65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535,0]
4006 ; AVX512DQ-NEXT: vmovdqa %ymm9, %ymm10
4007 ; AVX512DQ-NEXT: vpternlogq $202, %ymm5, %ymm1, %ymm10
4008 ; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm11 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm10[2,8,14,4,10,16,22,28,18,24,30],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
4009 ; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} ymm16 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255]
4010 ; AVX512DQ-NEXT: vpternlogq $248, %ymm16, %ymm4, %ymm11
4011 ; AVX512DQ-NEXT: vmovdqa 160(%rdi), %ymm13
4012 ; AVX512DQ-NEXT: vmovdqa %ymm0, %ymm14
4013 ; AVX512DQ-NEXT: vpternlogq $202, %ymm6, %ymm13, %ymm14
4014 ; AVX512DQ-NEXT: vextracti128 $1, %ymm14, %xmm15
4015 ; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm4 = xmm15[u,u,u,u,u,u],zero,zero,xmm15[0,6,12],zero,zero,zero,xmm15[4,10]
4016 ; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm12 = xmm14[u,u,u,u,u,u,4,10],zero,zero,zero,xmm14[2,8,14],zero,zero
4017 ; AVX512DQ-NEXT: vpor %xmm4, %xmm12, %xmm4
4018 ; AVX512DQ-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4
4019 ; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm4 = ymm11[0,1,2],ymm4[3,4,5,6,7],ymm11[8,9,10],ymm4[11,12,13,14,15]
4020 ; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm2 = ymm11[0,1,2,3],ymm4[4,5,6,7]
4021 ; AVX512DQ-NEXT: vmovdqa64 %ymm2, %ymm18
4022 ; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm7 = xmm7[1,7,13],zero,zero,zero,xmm7[5,11],zero,zero,zero,xmm7[u,u,u,u,u]
4023 ; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm8 = zero,zero,zero,xmm8[3,9,15],zero,zero,xmm8[1,7,13,u,u,u,u,u]
4024 ; AVX512DQ-NEXT: vpor %xmm7, %xmm8, %xmm7
4025 ; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm8 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm10[3,9,15,5,11,17,23,29,19,25,31],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
4026 ; AVX512DQ-NEXT: vpternlogq $248, %ymm16, %ymm7, %ymm8
4027 ; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm7 = xmm15[u,u,u,u,u,u],zero,zero,xmm15[1,7,13],zero,zero,zero,xmm15[5,11]
4028 ; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm10 = xmm14[u,u,u,u,u,u,5,11],zero,zero,zero,xmm14[3,9,15],zero,zero
4029 ; AVX512DQ-NEXT: vpor %xmm7, %xmm10, %xmm7
4030 ; AVX512DQ-NEXT: vinserti128 $1, %xmm7, %ymm0, %ymm7
4031 ; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm7 = ymm8[0,1,2],ymm7[3,4,5,6,7],ymm8[8,9,10],ymm7[11,12,13,14,15]
4032 ; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm7 = ymm8[0,1,2,3],ymm7[4,5,6,7]
4033 ; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm8 = [65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535]
4034 ; AVX512DQ-NEXT: vmovdqa %ymm8, %ymm10
4035 ; AVX512DQ-NEXT: vpternlogq $202, %ymm17, %ymm3, %ymm10
4036 ; AVX512DQ-NEXT: vextracti128 $1, %ymm10, %xmm11
4037 ; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm12 = zero,zero,zero,xmm11[4,10],zero,zero,zero,xmm11[2,8,14,u,u,u,u,u]
4038 ; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm14 = xmm10[2,8,14],zero,zero,xmm10[0,6,12],zero,zero,zero,xmm10[u,u,u,u,u]
4039 ; AVX512DQ-NEXT: vpor %xmm12, %xmm14, %xmm12
4040 ; AVX512DQ-NEXT: vpternlogq $202, %ymm1, %ymm5, %ymm9
4041 ; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm14 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm9[4,10,0,6,12,18,24,30,20,26,u,u,u,u,u,u,u,u,u,u,u]
4042 ; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} ymm16 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255]
4043 ; AVX512DQ-NEXT: vpternlogq $248, %ymm16, %ymm12, %ymm14
4044 ; AVX512DQ-NEXT: vmovdqa %ymm0, %ymm12
4045 ; AVX512DQ-NEXT: vpternlogq $202, %ymm13, %ymm6, %ymm12
4046 ; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm15 = xmm12[u,u,u,u,u,0,6,12],zero,zero,zero,xmm12[4,10],zero,zero,zero
4047 ; AVX512DQ-NEXT: vextracti128 $1, %ymm12, %xmm2
4048 ; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm4 = xmm2[u,u,u,u,u],zero,zero,zero,xmm2[2,8,14],zero,zero,xmm2[0,6,12]
4049 ; AVX512DQ-NEXT: vpor %xmm4, %xmm15, %xmm4
4050 ; AVX512DQ-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4
4051 ; AVX512DQ-NEXT: vpmovsxwd {{.*#+}} ymm15 = [4294967295,4294967295,4294967295,4294967295,4294967295,255,0,0]
4052 ; AVX512DQ-NEXT: vpternlogq $184, %ymm14, %ymm15, %ymm4
4053 ; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm11 = zero,zero,zero,xmm11[5,11],zero,zero,zero,xmm11[3,9,15,u,u,u,u,u]
4054 ; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm10 = xmm10[3,9,15],zero,zero,xmm10[1,7,13],zero,zero,zero,xmm10[u,u,u,u,u]
4055 ; AVX512DQ-NEXT: vpor %xmm11, %xmm10, %xmm10
4056 ; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm9 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm9[5,11,1,7,13,19,25,31,21,27,u,u,u,u,u,u,u,u,u,u,u]
4057 ; AVX512DQ-NEXT: vpternlogq $248, %ymm16, %ymm10, %ymm9
4058 ; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm10 = xmm12[u,u,u,u,u,1,7,13],zero,zero,zero,xmm12[5,11],zero,zero,zero
4059 ; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,u],zero,zero,zero,xmm2[3,9,15],zero,zero,xmm2[1,7,13]
4060 ; AVX512DQ-NEXT: vpor %xmm2, %xmm10, %xmm2
4061 ; AVX512DQ-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
4062 ; AVX512DQ-NEXT: vpternlogq $184, %ymm9, %ymm15, %ymm2
4063 ; AVX512DQ-NEXT: vpternlogq $202, %ymm6, %ymm13, %ymm8
4064 ; AVX512DQ-NEXT: vextracti128 $1, %ymm8, %xmm6
4065 ; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm9 = xmm6[u,u,u,u,u],zero,zero,zero,xmm6[4,10],zero,zero,zero,xmm6[2,8,14]
4066 ; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm10 = xmm8[u,u,u,u,u,2,8,14],zero,zero,xmm8[0,6,12],zero,zero,zero
4067 ; AVX512DQ-NEXT: vpor %xmm9, %xmm10, %xmm9
4068 ; AVX512DQ-NEXT: vinserti128 $1, %xmm9, %ymm0, %ymm9
4069 ; AVX512DQ-NEXT: vpternlogq $202, %ymm17, %ymm3, %ymm0
4070 ; AVX512DQ-NEXT: vextracti128 $1, %ymm0, %xmm3
4071 ; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm10 = zero,zero,xmm3[0,6,12],zero,zero,zero,xmm3[4,10,u,u,u,u,u,u]
4072 ; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm11 = xmm0[4,10],zero,zero,zero,xmm0[2,8,14],zero,zero,xmm0[u,u,u,u,u,u]
4073 ; AVX512DQ-NEXT: vpor %xmm10, %xmm11, %xmm10
4074 ; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm5, %ymm1
4075 ; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm5 = ymm1[u,u,u,u,u,u,u,u,u,u,0,6,12,2,8,14,20,26,16,22,28,u,u,u,u,u,u,u,u,u,u,u]
4076 ; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm10 = xmm10[0,1,2,3,4],xmm5[5,6,7]
4077 ; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm5 = ymm10[0,1,2,3],ymm5[4,5,6,7]
4078 ; AVX512DQ-NEXT: vpternlogq $226, %ymm9, %ymm15, %ymm5
4079 ; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm6 = xmm6[u,u,u,u,u],zero,zero,zero,xmm6[5,11],zero,zero,zero,xmm6[3,9,15]
4080 ; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm8 = xmm8[u,u,u,u,u,3,9,15],zero,zero,xmm8[1,7,13],zero,zero,zero
4081 ; AVX512DQ-NEXT: vpor %xmm6, %xmm8, %xmm6
4082 ; AVX512DQ-NEXT: vinserti128 $1, %xmm6, %ymm0, %ymm6
4083 ; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm3 = zero,zero,xmm3[1,7,13],zero,zero,zero,xmm3[5,11,u,u,u,u,u,u]
4084 ; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[5,11],zero,zero,zero,xmm0[3,9,15],zero,zero,xmm0[u,u,u,u,u,u]
4085 ; AVX512DQ-NEXT: vpor %xmm3, %xmm0, %xmm0
4086 ; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,1,7,13,3,9,15,21,27,17,23,29,u,u,u,u,u,u,u,u,u,u,u]
4087 ; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm1[5,6,7]
4088 ; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
4089 ; AVX512DQ-NEXT: vpternlogq $226, %ymm6, %ymm15, %ymm0
4090 ; AVX512DQ-NEXT: vmovdqa64 %ymm18, (%rsi)
4091 ; AVX512DQ-NEXT: vmovdqa %ymm7, (%rdx)
4092 ; AVX512DQ-NEXT: vmovdqa %ymm4, (%rcx)
4093 ; AVX512DQ-NEXT: vmovdqa %ymm2, (%r8)
4094 ; AVX512DQ-NEXT: vmovdqa %ymm5, (%r9)
4095 ; AVX512DQ-NEXT: vmovdqa %ymm0, (%rax)
4096 ; AVX512DQ-NEXT: vzeroupper
4097 ; AVX512DQ-NEXT: retq
4099 ; AVX512DQ-FCP-LABEL: load_i8_stride6_vf32:
4100 ; AVX512DQ-FCP: # %bb.0:
4101 ; AVX512DQ-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
4102 ; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm0 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535]
4103 ; AVX512DQ-FCP-NEXT: vmovdqa64 (%rdi), %ymm17
4104 ; AVX512DQ-FCP-NEXT: vmovdqa 32(%rdi), %ymm3
4105 ; AVX512DQ-FCP-NEXT: vmovdqa 64(%rdi), %ymm1
4106 ; AVX512DQ-FCP-NEXT: vmovdqa 128(%rdi), %ymm6
4107 ; AVX512DQ-FCP-NEXT: vmovdqa %ymm0, %ymm7
4108 ; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm3, %ymm17, %ymm7
4109 ; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm4 = xmm7[0,6,12],zero,zero,zero,xmm7[4,10],zero,zero,zero,xmm7[u,u,u,u,u]
4110 ; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm7, %xmm8
4111 ; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm5 = zero,zero,zero,xmm8[2,8,14],zero,zero,xmm8[0,6,12,u,u,u,u,u]
4112 ; AVX512DQ-FCP-NEXT: vpor %xmm4, %xmm5, %xmm4
4113 ; AVX512DQ-FCP-NEXT: vperm2i128 {{.*#+}} ymm5 = ymm1[2,3],mem[2,3]
4114 ; AVX512DQ-FCP-NEXT: vinserti128 $1, 96(%rdi), %ymm1, %ymm1
4115 ; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm9 = [65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535,0]
4116 ; AVX512DQ-FCP-NEXT: vmovdqa %ymm9, %ymm10
4117 ; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm5, %ymm1, %ymm10
4118 ; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm11 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm10[2,8,14,4,10,16,22,28,18,24,30],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
4119 ; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} ymm16 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255]
4120 ; AVX512DQ-FCP-NEXT: vpternlogq $248, %ymm16, %ymm4, %ymm11
4121 ; AVX512DQ-FCP-NEXT: vmovdqa 160(%rdi), %ymm13
4122 ; AVX512DQ-FCP-NEXT: vmovdqa %ymm0, %ymm14
4123 ; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm6, %ymm13, %ymm14
4124 ; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm14, %xmm15
4125 ; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm4 = xmm15[u,u,u,u,u,u],zero,zero,xmm15[0,6,12],zero,zero,zero,xmm15[4,10]
4126 ; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm12 = xmm14[u,u,u,u,u,u,4,10],zero,zero,zero,xmm14[2,8,14],zero,zero
4127 ; AVX512DQ-FCP-NEXT: vpor %xmm4, %xmm12, %xmm4
4128 ; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4
4129 ; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm4 = ymm11[0,1,2],ymm4[3,4,5,6,7],ymm11[8,9,10],ymm4[11,12,13,14,15]
4130 ; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm11[0,1,2,3],ymm4[4,5,6,7]
4131 ; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm2, %ymm18
4132 ; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm7[1,7,13],zero,zero,zero,xmm7[5,11],zero,zero,zero,xmm7[u,u,u,u,u]
4133 ; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm8 = zero,zero,zero,xmm8[3,9,15],zero,zero,xmm8[1,7,13,u,u,u,u,u]
4134 ; AVX512DQ-FCP-NEXT: vpor %xmm7, %xmm8, %xmm7
4135 ; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm8 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm10[3,9,15,5,11,17,23,29,19,25,31],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
4136 ; AVX512DQ-FCP-NEXT: vpternlogq $248, %ymm16, %ymm7, %ymm8
4137 ; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm15[u,u,u,u,u,u],zero,zero,xmm15[1,7,13],zero,zero,zero,xmm15[5,11]
4138 ; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm10 = xmm14[u,u,u,u,u,u,5,11],zero,zero,zero,xmm14[3,9,15],zero,zero
4139 ; AVX512DQ-FCP-NEXT: vpor %xmm7, %xmm10, %xmm7
4140 ; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm7, %ymm0, %ymm7
4141 ; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm7 = ymm8[0,1,2],ymm7[3,4,5,6,7],ymm8[8,9,10],ymm7[11,12,13,14,15]
4142 ; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm7 = ymm8[0,1,2,3],ymm7[4,5,6,7]
4143 ; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm8 = [65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535]
4144 ; AVX512DQ-FCP-NEXT: vmovdqa %ymm8, %ymm10
4145 ; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm17, %ymm3, %ymm10
4146 ; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm10, %xmm11
4147 ; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm12 = zero,zero,zero,xmm11[4,10],zero,zero,zero,xmm11[2,8,14,u,u,u,u,u]
4148 ; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm14 = xmm10[2,8,14],zero,zero,xmm10[0,6,12],zero,zero,zero,xmm10[u,u,u,u,u]
4149 ; AVX512DQ-FCP-NEXT: vpor %xmm12, %xmm14, %xmm12
4150 ; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm1, %ymm5, %ymm9
4151 ; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm14 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm9[4,10,0,6,12,18,24,30,20,26,u,u,u,u,u,u,u,u,u,u,u]
4152 ; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} ymm16 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255]
4153 ; AVX512DQ-FCP-NEXT: vpternlogq $248, %ymm16, %ymm12, %ymm14
4154 ; AVX512DQ-FCP-NEXT: vmovdqa %ymm0, %ymm12
4155 ; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm13, %ymm6, %ymm12
4156 ; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm15 = xmm12[u,u,u,u,u,0,6,12],zero,zero,zero,xmm12[4,10],zero,zero,zero
4157 ; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm12, %xmm2
4158 ; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm4 = xmm2[u,u,u,u,u],zero,zero,zero,xmm2[2,8,14],zero,zero,xmm2[0,6,12]
4159 ; AVX512DQ-FCP-NEXT: vpor %xmm4, %xmm15, %xmm4
4160 ; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4
4161 ; AVX512DQ-FCP-NEXT: vpmovsxwd {{.*#+}} ymm15 = [4294967295,4294967295,4294967295,4294967295,4294967295,255,0,0]
4162 ; AVX512DQ-FCP-NEXT: vpternlogq $184, %ymm14, %ymm15, %ymm4
4163 ; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm11 = zero,zero,zero,xmm11[5,11],zero,zero,zero,xmm11[3,9,15,u,u,u,u,u]
4164 ; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm10 = xmm10[3,9,15],zero,zero,xmm10[1,7,13],zero,zero,zero,xmm10[u,u,u,u,u]
4165 ; AVX512DQ-FCP-NEXT: vpor %xmm11, %xmm10, %xmm10
4166 ; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm9 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm9[5,11,1,7,13,19,25,31,21,27,u,u,u,u,u,u,u,u,u,u,u]
4167 ; AVX512DQ-FCP-NEXT: vpternlogq $248, %ymm16, %ymm10, %ymm9
4168 ; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm10 = xmm12[u,u,u,u,u,1,7,13],zero,zero,zero,xmm12[5,11],zero,zero,zero
4169 ; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,u],zero,zero,zero,xmm2[3,9,15],zero,zero,xmm2[1,7,13]
4170 ; AVX512DQ-FCP-NEXT: vpor %xmm2, %xmm10, %xmm2
4171 ; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
4172 ; AVX512DQ-FCP-NEXT: vpternlogq $184, %ymm9, %ymm15, %ymm2
4173 ; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm6, %ymm13, %ymm8
4174 ; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm8, %xmm6
4175 ; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm9 = xmm6[u,u,u,u,u],zero,zero,zero,xmm6[4,10],zero,zero,zero,xmm6[2,8,14]
4176 ; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm10 = xmm8[u,u,u,u,u,2,8,14],zero,zero,xmm8[0,6,12],zero,zero,zero
4177 ; AVX512DQ-FCP-NEXT: vpor %xmm9, %xmm10, %xmm9
4178 ; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm9, %ymm0, %ymm9
4179 ; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm17, %ymm3, %ymm0
4180 ; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm0, %xmm3
4181 ; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm10 = zero,zero,xmm3[0,6,12],zero,zero,zero,xmm3[4,10,u,u,u,u,u,u]
4182 ; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm11 = xmm0[4,10],zero,zero,zero,xmm0[2,8,14],zero,zero,xmm0[u,u,u,u,u,u]
4183 ; AVX512DQ-FCP-NEXT: vpor %xmm10, %xmm11, %xmm10
4184 ; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm5, %ymm1
4185 ; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm5 = ymm1[u,u,u,u,u,u,u,u,u,u,0,6,12,2,8,14,20,26,16,22,28,u,u,u,u,u,u,u,u,u,u,u]
4186 ; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm10 = xmm10[0,1,2,3,4],xmm5[5,6,7]
4187 ; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm5 = ymm10[0,1,2,3],ymm5[4,5,6,7]
4188 ; AVX512DQ-FCP-NEXT: vpternlogq $226, %ymm9, %ymm15, %ymm5
4189 ; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm6 = xmm6[u,u,u,u,u],zero,zero,zero,xmm6[5,11],zero,zero,zero,xmm6[3,9,15]
4190 ; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm8 = xmm8[u,u,u,u,u,3,9,15],zero,zero,xmm8[1,7,13],zero,zero,zero
4191 ; AVX512DQ-FCP-NEXT: vpor %xmm6, %xmm8, %xmm6
4192 ; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm6, %ymm0, %ymm6
4193 ; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm3 = zero,zero,xmm3[1,7,13],zero,zero,zero,xmm3[5,11,u,u,u,u,u,u]
4194 ; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[5,11],zero,zero,zero,xmm0[3,9,15],zero,zero,xmm0[u,u,u,u,u,u]
4195 ; AVX512DQ-FCP-NEXT: vpor %xmm3, %xmm0, %xmm0
4196 ; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,1,7,13,3,9,15,21,27,17,23,29,u,u,u,u,u,u,u,u,u,u,u]
4197 ; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm1[5,6,7]
4198 ; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
4199 ; AVX512DQ-FCP-NEXT: vpternlogq $226, %ymm6, %ymm15, %ymm0
4200 ; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm18, (%rsi)
4201 ; AVX512DQ-FCP-NEXT: vmovdqa %ymm7, (%rdx)
4202 ; AVX512DQ-FCP-NEXT: vmovdqa %ymm4, (%rcx)
4203 ; AVX512DQ-FCP-NEXT: vmovdqa %ymm2, (%r8)
4204 ; AVX512DQ-FCP-NEXT: vmovdqa %ymm5, (%r9)
4205 ; AVX512DQ-FCP-NEXT: vmovdqa %ymm0, (%rax)
4206 ; AVX512DQ-FCP-NEXT: vzeroupper
4207 ; AVX512DQ-FCP-NEXT: retq
4209 ; AVX512BW-LABEL: load_i8_stride6_vf32:
4210 ; AVX512BW: # %bb.0:
4211 ; AVX512BW-NEXT: movq {{[0-9]+}}(%rsp), %rax
4212 ; AVX512BW-NEXT: vmovdqa (%rdi), %ymm4
4213 ; AVX512BW-NEXT: vmovdqa 32(%rdi), %ymm0
4214 ; AVX512BW-NEXT: vmovdqa 64(%rdi), %ymm3
4215 ; AVX512BW-NEXT: vmovdqa 128(%rdi), %ymm2
4216 ; AVX512BW-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm3[2,3],mem[2,3]
4217 ; AVX512BW-NEXT: vinserti128 $1, 96(%rdi), %ymm3, %ymm8
4218 ; AVX512BW-NEXT: movw $-28124, %r10w # imm = 0x9224
4219 ; AVX512BW-NEXT: kmovd %r10d, %k2
4220 ; AVX512BW-NEXT: vpblendmw %ymm1, %ymm8, %ymm6 {%k2}
4221 ; AVX512BW-NEXT: movw $18724, %r10w # imm = 0x4924
4222 ; AVX512BW-NEXT: kmovd %r10d, %k1
4223 ; AVX512BW-NEXT: vpblendmw %ymm0, %ymm4, %ymm7 {%k1}
4224 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm3 = xmm7[0,6,12],zero,zero,zero,xmm7[4,10],zero,zero,zero,xmm7[u,u,u,u,u]
4225 ; AVX512BW-NEXT: vextracti128 $1, %ymm7, %xmm9
4226 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm5 = zero,zero,zero,xmm9[2,8,14],zero,zero,xmm9[0,6,12,u,u,u,u,u]
4227 ; AVX512BW-NEXT: vpor %xmm3, %xmm5, %xmm5
4228 ; AVX512BW-NEXT: movl $4192256, %r10d # imm = 0x3FF800
4229 ; AVX512BW-NEXT: kmovd %r10d, %k3
4230 ; AVX512BW-NEXT: vpshufb {{.*#+}} ymm5 {%k3} = ymm6[u,u,u,u,u,u,u,u,u,u,u,2,8,14,4,10,16,22,28,18,24,30,u,u,u,u,u,u,u,u,u,u]
4231 ; AVX512BW-NEXT: vmovdqa 160(%rdi), %ymm3
4232 ; AVX512BW-NEXT: vpblendmw %ymm2, %ymm3, %ymm10 {%k1}
4233 ; AVX512BW-NEXT: vextracti128 $1, %ymm10, %xmm11
4234 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm12 = xmm11[u,u,u,u,u,u],zero,zero,xmm11[0,6,12],zero,zero,zero,xmm11[4,10]
4235 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm13 = xmm10[u,u,u,u,u,u,4,10],zero,zero,zero,xmm10[2,8,14],zero,zero
4236 ; AVX512BW-NEXT: vpor %xmm12, %xmm13, %xmm12
4237 ; AVX512BW-NEXT: vinserti128 $1, %xmm12, %ymm0, %ymm12
4238 ; AVX512BW-NEXT: vpblendw {{.*#+}} ymm12 = ymm5[0,1,2],ymm12[3,4,5,6,7],ymm5[8,9,10],ymm12[11,12,13,14,15]
4239 ; AVX512BW-NEXT: vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm12[4,5,6,7]
4240 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm7 = xmm7[1,7,13],zero,zero,zero,xmm7[5,11],zero,zero,zero,xmm7[u,u,u,u,u]
4241 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm9 = zero,zero,zero,xmm9[3,9,15],zero,zero,xmm9[1,7,13,u,u,u,u,u]
4242 ; AVX512BW-NEXT: vpor %xmm7, %xmm9, %xmm7
4243 ; AVX512BW-NEXT: vpshufb {{.*#+}} ymm7 {%k3} = ymm6[u,u,u,u,u,u,u,u,u,u,u,3,9,15,5,11,17,23,29,19,25,31,u,u,u,u,u,u,u,u,u,u]
4244 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm6 = xmm11[u,u,u,u,u,u],zero,zero,xmm11[1,7,13],zero,zero,zero,xmm11[5,11]
4245 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm9 = xmm10[u,u,u,u,u,u,5,11],zero,zero,zero,xmm10[3,9,15],zero,zero
4246 ; AVX512BW-NEXT: vpor %xmm6, %xmm9, %xmm6
4247 ; AVX512BW-NEXT: vinserti128 $1, %xmm6, %ymm0, %ymm6
4248 ; AVX512BW-NEXT: vpblendw {{.*#+}} ymm6 = ymm7[0,1,2],ymm6[3,4,5,6,7],ymm7[8,9,10],ymm6[11,12,13,14,15]
4249 ; AVX512BW-NEXT: vpblendd {{.*#+}} ymm6 = ymm7[0,1,2,3],ymm6[4,5,6,7]
4250 ; AVX512BW-NEXT: vpblendmw %ymm8, %ymm1, %ymm9 {%k2}
4251 ; AVX512BW-NEXT: movw $9362, %di # imm = 0x2492
4252 ; AVX512BW-NEXT: kmovd %edi, %k3
4253 ; AVX512BW-NEXT: vpblendmw %ymm4, %ymm0, %ymm10 {%k3}
4254 ; AVX512BW-NEXT: vextracti128 $1, %ymm10, %xmm11
4255 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm7 = zero,zero,zero,xmm11[4,10],zero,zero,zero,xmm11[2,8,14,u,u,u,u,u]
4256 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm12 = xmm10[2,8,14],zero,zero,xmm10[0,6,12],zero,zero,zero,xmm10[u,u,u,u,u]
4257 ; AVX512BW-NEXT: vpor %xmm7, %xmm12, %xmm7
4258 ; AVX512BW-NEXT: movl $2095104, %edi # imm = 0x1FF800
4259 ; AVX512BW-NEXT: kmovd %edi, %k4
4260 ; AVX512BW-NEXT: vpshufb {{.*#+}} ymm7 {%k4} = ymm9[u,u,u,u,u,u,u,u,u,u,u,4,10,0,6,12,18,24,30,20,26,u,u,u,u,u,u,u,u,u,u,u]
4261 ; AVX512BW-NEXT: vpblendmw %ymm3, %ymm2, %ymm12 {%k1}
4262 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm13 = xmm12[u,u,u,u,u,0,6,12],zero,zero,zero,xmm12[4,10],zero,zero,zero
4263 ; AVX512BW-NEXT: vextracti128 $1, %ymm12, %xmm14
4264 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm15 = xmm14[u,u,u,u,u],zero,zero,zero,xmm14[2,8,14],zero,zero,xmm14[0,6,12]
4265 ; AVX512BW-NEXT: vpor %xmm13, %xmm15, %xmm13
4266 ; AVX512BW-NEXT: vinserti128 $1, %xmm13, %ymm0, %ymm13
4267 ; AVX512BW-NEXT: movl $-2097152, %edi # imm = 0xFFE00000
4268 ; AVX512BW-NEXT: kmovd %edi, %k2
4269 ; AVX512BW-NEXT: vmovdqu8 %ymm13, %ymm7 {%k2}
4270 ; AVX512BW-NEXT: movw $9289, %di # imm = 0x2449
4271 ; AVX512BW-NEXT: kmovd %edi, %k5
4272 ; AVX512BW-NEXT: vmovdqu16 %ymm8, %ymm1 {%k5}
4273 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm8 = zero,zero,zero,xmm11[5,11],zero,zero,zero,xmm11[3,9,15,u,u,u,u,u]
4274 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm10 = xmm10[3,9,15],zero,zero,xmm10[1,7,13],zero,zero,zero,xmm10[u,u,u,u,u]
4275 ; AVX512BW-NEXT: vpor %xmm8, %xmm10, %xmm8
4276 ; AVX512BW-NEXT: vpshufb {{.*#+}} ymm8 {%k4} = ymm9[u,u,u,u,u,u,u,u,u,u,u,5,11,1,7,13,19,25,31,21,27,u,u,u,u,u,u,u,u,u,u,u]
4277 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm9 = xmm12[u,u,u,u,u,1,7,13],zero,zero,zero,xmm12[5,11],zero,zero,zero
4278 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm10 = xmm14[u,u,u,u,u],zero,zero,zero,xmm14[3,9,15],zero,zero,xmm14[1,7,13]
4279 ; AVX512BW-NEXT: vpor %xmm9, %xmm10, %xmm9
4280 ; AVX512BW-NEXT: vinserti128 $1, %xmm9, %ymm0, %ymm9
4281 ; AVX512BW-NEXT: vmovdqu8 %ymm9, %ymm8 {%k2}
4282 ; AVX512BW-NEXT: vpshufb {{.*#+}} ymm9 = ymm1[u,u,u,u,u,u,u,u,u,u,0,6,12,2,8,14,20,26,16,22,28,u,u,u,u,u,u,u,u,u,u,u]
4283 ; AVX512BW-NEXT: vmovdqu16 %ymm4, %ymm0 {%k1}
4284 ; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm4
4285 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm10 = zero,zero,xmm4[0,6,12],zero,zero,zero,xmm4[4,10,u,u,u,u,u,u]
4286 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm11 = xmm0[4,10],zero,zero,zero,xmm0[2,8,14],zero,zero,xmm0[u,u,u,u,u,u]
4287 ; AVX512BW-NEXT: vpor %xmm10, %xmm11, %xmm10
4288 ; AVX512BW-NEXT: vpblendw {{.*#+}} xmm10 = xmm10[0,1,2,3,4],xmm9[5,6,7]
4289 ; AVX512BW-NEXT: vpblendd {{.*#+}} ymm9 = ymm10[0,1,2,3],ymm9[4,5,6,7]
4290 ; AVX512BW-NEXT: vmovdqu16 %ymm2, %ymm3 {%k3}
4291 ; AVX512BW-NEXT: vextracti128 $1, %ymm3, %xmm2
4292 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm10 = xmm2[u,u,u,u,u],zero,zero,zero,xmm2[4,10],zero,zero,zero,xmm2[2,8,14]
4293 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm11 = xmm3[u,u,u,u,u,2,8,14],zero,zero,xmm3[0,6,12],zero,zero,zero
4294 ; AVX512BW-NEXT: vpor %xmm10, %xmm11, %xmm10
4295 ; AVX512BW-NEXT: vinserti128 $1, %xmm10, %ymm0, %ymm10
4296 ; AVX512BW-NEXT: vmovdqu8 %ymm10, %ymm9 {%k2}
4297 ; AVX512BW-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,1,7,13,3,9,15,21,27,17,23,29,u,u,u,u,u,u,u,u,u,u,u]
4298 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm4 = zero,zero,xmm4[1,7,13],zero,zero,zero,xmm4[5,11,u,u,u,u,u,u]
4299 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[5,11],zero,zero,zero,xmm0[3,9,15],zero,zero,xmm0[u,u,u,u,u,u]
4300 ; AVX512BW-NEXT: vpor %xmm4, %xmm0, %xmm0
4301 ; AVX512BW-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm1[5,6,7]
4302 ; AVX512BW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
4303 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm1 = xmm2[u,u,u,u,u],zero,zero,zero,xmm2[5,11],zero,zero,zero,xmm2[3,9,15]
4304 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm2 = xmm3[u,u,u,u,u,3,9,15],zero,zero,xmm3[1,7,13],zero,zero,zero
4305 ; AVX512BW-NEXT: vpor %xmm1, %xmm2, %xmm1
4306 ; AVX512BW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
4307 ; AVX512BW-NEXT: vmovdqu8 %ymm1, %ymm0 {%k2}
4308 ; AVX512BW-NEXT: vmovdqa %ymm5, (%rsi)
4309 ; AVX512BW-NEXT: vmovdqa %ymm6, (%rdx)
4310 ; AVX512BW-NEXT: vmovdqa %ymm7, (%rcx)
4311 ; AVX512BW-NEXT: vmovdqa %ymm8, (%r8)
4312 ; AVX512BW-NEXT: vmovdqa %ymm9, (%r9)
4313 ; AVX512BW-NEXT: vmovdqa %ymm0, (%rax)
4314 ; AVX512BW-NEXT: vzeroupper
4315 ; AVX512BW-NEXT: retq
4317 ; AVX512BW-FCP-LABEL: load_i8_stride6_vf32:
4318 ; AVX512BW-FCP: # %bb.0:
4319 ; AVX512BW-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
4320 ; AVX512BW-FCP-NEXT: vmovdqa (%rdi), %ymm4
4321 ; AVX512BW-FCP-NEXT: vmovdqa 32(%rdi), %ymm0
4322 ; AVX512BW-FCP-NEXT: vmovdqa 64(%rdi), %ymm3
4323 ; AVX512BW-FCP-NEXT: vmovdqa 128(%rdi), %ymm2
4324 ; AVX512BW-FCP-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm3[2,3],mem[2,3]
4325 ; AVX512BW-FCP-NEXT: vinserti128 $1, 96(%rdi), %ymm3, %ymm8
4326 ; AVX512BW-FCP-NEXT: movw $-28124, %r10w # imm = 0x9224
4327 ; AVX512BW-FCP-NEXT: kmovd %r10d, %k2
4328 ; AVX512BW-FCP-NEXT: vpblendmw %ymm1, %ymm8, %ymm6 {%k2}
4329 ; AVX512BW-FCP-NEXT: movw $18724, %r10w # imm = 0x4924
4330 ; AVX512BW-FCP-NEXT: kmovd %r10d, %k1
4331 ; AVX512BW-FCP-NEXT: vpblendmw %ymm0, %ymm4, %ymm7 {%k1}
4332 ; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm7[0,6,12],zero,zero,zero,xmm7[4,10],zero,zero,zero,xmm7[u,u,u,u,u]
4333 ; AVX512BW-FCP-NEXT: vextracti128 $1, %ymm7, %xmm9
4334 ; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm5 = zero,zero,zero,xmm9[2,8,14],zero,zero,xmm9[0,6,12,u,u,u,u,u]
4335 ; AVX512BW-FCP-NEXT: vpor %xmm3, %xmm5, %xmm5
4336 ; AVX512BW-FCP-NEXT: movl $4192256, %r10d # imm = 0x3FF800
4337 ; AVX512BW-FCP-NEXT: kmovd %r10d, %k3
4338 ; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm5 {%k3} = ymm6[u,u,u,u,u,u,u,u,u,u,u,2,8,14,4,10,16,22,28,18,24,30,u,u,u,u,u,u,u,u,u,u]
4339 ; AVX512BW-FCP-NEXT: vmovdqa 160(%rdi), %ymm3
4340 ; AVX512BW-FCP-NEXT: vpblendmw %ymm2, %ymm3, %ymm10 {%k1}
4341 ; AVX512BW-FCP-NEXT: vextracti128 $1, %ymm10, %xmm11
4342 ; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm12 = xmm11[u,u,u,u,u,u],zero,zero,xmm11[0,6,12],zero,zero,zero,xmm11[4,10]
4343 ; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm13 = xmm10[u,u,u,u,u,u,4,10],zero,zero,zero,xmm10[2,8,14],zero,zero
4344 ; AVX512BW-FCP-NEXT: vpor %xmm12, %xmm13, %xmm12
4345 ; AVX512BW-FCP-NEXT: vinserti128 $1, %xmm12, %ymm0, %ymm12
4346 ; AVX512BW-FCP-NEXT: vpblendw {{.*#+}} ymm12 = ymm5[0,1,2],ymm12[3,4,5,6,7],ymm5[8,9,10],ymm12[11,12,13,14,15]
4347 ; AVX512BW-FCP-NEXT: vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm12[4,5,6,7]
4348 ; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm7[1,7,13],zero,zero,zero,xmm7[5,11],zero,zero,zero,xmm7[u,u,u,u,u]
4349 ; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm9 = zero,zero,zero,xmm9[3,9,15],zero,zero,xmm9[1,7,13,u,u,u,u,u]
4350 ; AVX512BW-FCP-NEXT: vpor %xmm7, %xmm9, %xmm7
4351 ; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm7 {%k3} = ymm6[u,u,u,u,u,u,u,u,u,u,u,3,9,15,5,11,17,23,29,19,25,31,u,u,u,u,u,u,u,u,u,u]
4352 ; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm6 = xmm11[u,u,u,u,u,u],zero,zero,xmm11[1,7,13],zero,zero,zero,xmm11[5,11]
4353 ; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm9 = xmm10[u,u,u,u,u,u,5,11],zero,zero,zero,xmm10[3,9,15],zero,zero
4354 ; AVX512BW-FCP-NEXT: vpor %xmm6, %xmm9, %xmm6
4355 ; AVX512BW-FCP-NEXT: vinserti128 $1, %xmm6, %ymm0, %ymm6
4356 ; AVX512BW-FCP-NEXT: vpblendw {{.*#+}} ymm6 = ymm7[0,1,2],ymm6[3,4,5,6,7],ymm7[8,9,10],ymm6[11,12,13,14,15]
4357 ; AVX512BW-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm7[0,1,2,3],ymm6[4,5,6,7]
4358 ; AVX512BW-FCP-NEXT: vpblendmw %ymm8, %ymm1, %ymm9 {%k2}
4359 ; AVX512BW-FCP-NEXT: movw $9362, %di # imm = 0x2492
4360 ; AVX512BW-FCP-NEXT: kmovd %edi, %k3
4361 ; AVX512BW-FCP-NEXT: vpblendmw %ymm4, %ymm0, %ymm10 {%k3}
4362 ; AVX512BW-FCP-NEXT: vextracti128 $1, %ymm10, %xmm11
4363 ; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm7 = zero,zero,zero,xmm11[4,10],zero,zero,zero,xmm11[2,8,14,u,u,u,u,u]
4364 ; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm12 = xmm10[2,8,14],zero,zero,xmm10[0,6,12],zero,zero,zero,xmm10[u,u,u,u,u]
4365 ; AVX512BW-FCP-NEXT: vpor %xmm7, %xmm12, %xmm7
4366 ; AVX512BW-FCP-NEXT: movl $2095104, %edi # imm = 0x1FF800
4367 ; AVX512BW-FCP-NEXT: kmovd %edi, %k4
4368 ; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm7 {%k4} = ymm9[u,u,u,u,u,u,u,u,u,u,u,4,10,0,6,12,18,24,30,20,26,u,u,u,u,u,u,u,u,u,u,u]
4369 ; AVX512BW-FCP-NEXT: vpblendmw %ymm3, %ymm2, %ymm12 {%k1}
4370 ; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm13 = xmm12[u,u,u,u,u,0,6,12],zero,zero,zero,xmm12[4,10],zero,zero,zero
4371 ; AVX512BW-FCP-NEXT: vextracti128 $1, %ymm12, %xmm14
4372 ; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm15 = xmm14[u,u,u,u,u],zero,zero,zero,xmm14[2,8,14],zero,zero,xmm14[0,6,12]
4373 ; AVX512BW-FCP-NEXT: vpor %xmm13, %xmm15, %xmm13
4374 ; AVX512BW-FCP-NEXT: vinserti128 $1, %xmm13, %ymm0, %ymm13
4375 ; AVX512BW-FCP-NEXT: movl $-2097152, %edi # imm = 0xFFE00000
4376 ; AVX512BW-FCP-NEXT: kmovd %edi, %k2
4377 ; AVX512BW-FCP-NEXT: vmovdqu8 %ymm13, %ymm7 {%k2}
4378 ; AVX512BW-FCP-NEXT: movw $9289, %di # imm = 0x2449
4379 ; AVX512BW-FCP-NEXT: kmovd %edi, %k5
4380 ; AVX512BW-FCP-NEXT: vmovdqu16 %ymm8, %ymm1 {%k5}
4381 ; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm8 = zero,zero,zero,xmm11[5,11],zero,zero,zero,xmm11[3,9,15,u,u,u,u,u]
4382 ; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm10 = xmm10[3,9,15],zero,zero,xmm10[1,7,13],zero,zero,zero,xmm10[u,u,u,u,u]
4383 ; AVX512BW-FCP-NEXT: vpor %xmm8, %xmm10, %xmm8
4384 ; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm8 {%k4} = ymm9[u,u,u,u,u,u,u,u,u,u,u,5,11,1,7,13,19,25,31,21,27,u,u,u,u,u,u,u,u,u,u,u]
4385 ; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm9 = xmm12[u,u,u,u,u,1,7,13],zero,zero,zero,xmm12[5,11],zero,zero,zero
4386 ; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm10 = xmm14[u,u,u,u,u],zero,zero,zero,xmm14[3,9,15],zero,zero,xmm14[1,7,13]
4387 ; AVX512BW-FCP-NEXT: vpor %xmm9, %xmm10, %xmm9
4388 ; AVX512BW-FCP-NEXT: vinserti128 $1, %xmm9, %ymm0, %ymm9
4389 ; AVX512BW-FCP-NEXT: vmovdqu8 %ymm9, %ymm8 {%k2}
4390 ; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm9 = ymm1[u,u,u,u,u,u,u,u,u,u,0,6,12,2,8,14,20,26,16,22,28,u,u,u,u,u,u,u,u,u,u,u]
4391 ; AVX512BW-FCP-NEXT: vmovdqu16 %ymm4, %ymm0 {%k1}
4392 ; AVX512BW-FCP-NEXT: vextracti128 $1, %ymm0, %xmm4
4393 ; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm10 = zero,zero,xmm4[0,6,12],zero,zero,zero,xmm4[4,10,u,u,u,u,u,u]
4394 ; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm11 = xmm0[4,10],zero,zero,zero,xmm0[2,8,14],zero,zero,xmm0[u,u,u,u,u,u]
4395 ; AVX512BW-FCP-NEXT: vpor %xmm10, %xmm11, %xmm10
4396 ; AVX512BW-FCP-NEXT: vpblendw {{.*#+}} xmm10 = xmm10[0,1,2,3,4],xmm9[5,6,7]
4397 ; AVX512BW-FCP-NEXT: vpblendd {{.*#+}} ymm9 = ymm10[0,1,2,3],ymm9[4,5,6,7]
4398 ; AVX512BW-FCP-NEXT: vmovdqu16 %ymm2, %ymm3 {%k3}
4399 ; AVX512BW-FCP-NEXT: vextracti128 $1, %ymm3, %xmm2
4400 ; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm10 = xmm2[u,u,u,u,u],zero,zero,zero,xmm2[4,10],zero,zero,zero,xmm2[2,8,14]
4401 ; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm11 = xmm3[u,u,u,u,u,2,8,14],zero,zero,xmm3[0,6,12],zero,zero,zero
4402 ; AVX512BW-FCP-NEXT: vpor %xmm10, %xmm11, %xmm10
4403 ; AVX512BW-FCP-NEXT: vinserti128 $1, %xmm10, %ymm0, %ymm10
4404 ; AVX512BW-FCP-NEXT: vmovdqu8 %ymm10, %ymm9 {%k2}
4405 ; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,1,7,13,3,9,15,21,27,17,23,29,u,u,u,u,u,u,u,u,u,u,u]
4406 ; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm4 = zero,zero,xmm4[1,7,13],zero,zero,zero,xmm4[5,11,u,u,u,u,u,u]
4407 ; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[5,11],zero,zero,zero,xmm0[3,9,15],zero,zero,xmm0[u,u,u,u,u,u]
4408 ; AVX512BW-FCP-NEXT: vpor %xmm4, %xmm0, %xmm0
4409 ; AVX512BW-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm1[5,6,7]
4410 ; AVX512BW-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
4411 ; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm2[u,u,u,u,u],zero,zero,zero,xmm2[5,11],zero,zero,zero,xmm2[3,9,15]
4412 ; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm3[u,u,u,u,u,3,9,15],zero,zero,xmm3[1,7,13],zero,zero,zero
4413 ; AVX512BW-FCP-NEXT: vpor %xmm1, %xmm2, %xmm1
4414 ; AVX512BW-FCP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
4415 ; AVX512BW-FCP-NEXT: vmovdqu8 %ymm1, %ymm0 {%k2}
4416 ; AVX512BW-FCP-NEXT: vmovdqa %ymm5, (%rsi)
4417 ; AVX512BW-FCP-NEXT: vmovdqa %ymm6, (%rdx)
4418 ; AVX512BW-FCP-NEXT: vmovdqa %ymm7, (%rcx)
4419 ; AVX512BW-FCP-NEXT: vmovdqa %ymm8, (%r8)
4420 ; AVX512BW-FCP-NEXT: vmovdqa %ymm9, (%r9)
4421 ; AVX512BW-FCP-NEXT: vmovdqa %ymm0, (%rax)
4422 ; AVX512BW-FCP-NEXT: vzeroupper
4423 ; AVX512BW-FCP-NEXT: retq
4425 ; AVX512DQ-BW-LABEL: load_i8_stride6_vf32:
4426 ; AVX512DQ-BW: # %bb.0:
4427 ; AVX512DQ-BW-NEXT: movq {{[0-9]+}}(%rsp), %rax
4428 ; AVX512DQ-BW-NEXT: vmovdqa (%rdi), %ymm4
4429 ; AVX512DQ-BW-NEXT: vmovdqa 32(%rdi), %ymm0
4430 ; AVX512DQ-BW-NEXT: vmovdqa 64(%rdi), %ymm3
4431 ; AVX512DQ-BW-NEXT: vmovdqa 128(%rdi), %ymm2
4432 ; AVX512DQ-BW-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm3[2,3],mem[2,3]
4433 ; AVX512DQ-BW-NEXT: vinserti128 $1, 96(%rdi), %ymm3, %ymm8
4434 ; AVX512DQ-BW-NEXT: movw $-28124, %r10w # imm = 0x9224
4435 ; AVX512DQ-BW-NEXT: kmovd %r10d, %k2
4436 ; AVX512DQ-BW-NEXT: vpblendmw %ymm1, %ymm8, %ymm6 {%k2}
4437 ; AVX512DQ-BW-NEXT: movw $18724, %r10w # imm = 0x4924
4438 ; AVX512DQ-BW-NEXT: kmovd %r10d, %k1
4439 ; AVX512DQ-BW-NEXT: vpblendmw %ymm0, %ymm4, %ymm7 {%k1}
4440 ; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm3 = xmm7[0,6,12],zero,zero,zero,xmm7[4,10],zero,zero,zero,xmm7[u,u,u,u,u]
4441 ; AVX512DQ-BW-NEXT: vextracti128 $1, %ymm7, %xmm9
4442 ; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm5 = zero,zero,zero,xmm9[2,8,14],zero,zero,xmm9[0,6,12,u,u,u,u,u]
4443 ; AVX512DQ-BW-NEXT: vpor %xmm3, %xmm5, %xmm5
4444 ; AVX512DQ-BW-NEXT: movl $4192256, %r10d # imm = 0x3FF800
4445 ; AVX512DQ-BW-NEXT: kmovd %r10d, %k3
4446 ; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm5 {%k3} = ymm6[u,u,u,u,u,u,u,u,u,u,u,2,8,14,4,10,16,22,28,18,24,30,u,u,u,u,u,u,u,u,u,u]
4447 ; AVX512DQ-BW-NEXT: vmovdqa 160(%rdi), %ymm3
4448 ; AVX512DQ-BW-NEXT: vpblendmw %ymm2, %ymm3, %ymm10 {%k1}
4449 ; AVX512DQ-BW-NEXT: vextracti128 $1, %ymm10, %xmm11
4450 ; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm12 = xmm11[u,u,u,u,u,u],zero,zero,xmm11[0,6,12],zero,zero,zero,xmm11[4,10]
4451 ; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm13 = xmm10[u,u,u,u,u,u,4,10],zero,zero,zero,xmm10[2,8,14],zero,zero
4452 ; AVX512DQ-BW-NEXT: vpor %xmm12, %xmm13, %xmm12
4453 ; AVX512DQ-BW-NEXT: vinserti128 $1, %xmm12, %ymm0, %ymm12
4454 ; AVX512DQ-BW-NEXT: vpblendw {{.*#+}} ymm12 = ymm5[0,1,2],ymm12[3,4,5,6,7],ymm5[8,9,10],ymm12[11,12,13,14,15]
4455 ; AVX512DQ-BW-NEXT: vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm12[4,5,6,7]
4456 ; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm7 = xmm7[1,7,13],zero,zero,zero,xmm7[5,11],zero,zero,zero,xmm7[u,u,u,u,u]
4457 ; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm9 = zero,zero,zero,xmm9[3,9,15],zero,zero,xmm9[1,7,13,u,u,u,u,u]
4458 ; AVX512DQ-BW-NEXT: vpor %xmm7, %xmm9, %xmm7
4459 ; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm7 {%k3} = ymm6[u,u,u,u,u,u,u,u,u,u,u,3,9,15,5,11,17,23,29,19,25,31,u,u,u,u,u,u,u,u,u,u]
4460 ; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm6 = xmm11[u,u,u,u,u,u],zero,zero,xmm11[1,7,13],zero,zero,zero,xmm11[5,11]
4461 ; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm9 = xmm10[u,u,u,u,u,u,5,11],zero,zero,zero,xmm10[3,9,15],zero,zero
4462 ; AVX512DQ-BW-NEXT: vpor %xmm6, %xmm9, %xmm6
4463 ; AVX512DQ-BW-NEXT: vinserti128 $1, %xmm6, %ymm0, %ymm6
4464 ; AVX512DQ-BW-NEXT: vpblendw {{.*#+}} ymm6 = ymm7[0,1,2],ymm6[3,4,5,6,7],ymm7[8,9,10],ymm6[11,12,13,14,15]
4465 ; AVX512DQ-BW-NEXT: vpblendd {{.*#+}} ymm6 = ymm7[0,1,2,3],ymm6[4,5,6,7]
4466 ; AVX512DQ-BW-NEXT: vpblendmw %ymm8, %ymm1, %ymm9 {%k2}
4467 ; AVX512DQ-BW-NEXT: movw $9362, %di # imm = 0x2492
4468 ; AVX512DQ-BW-NEXT: kmovd %edi, %k3
4469 ; AVX512DQ-BW-NEXT: vpblendmw %ymm4, %ymm0, %ymm10 {%k3}
4470 ; AVX512DQ-BW-NEXT: vextracti128 $1, %ymm10, %xmm11
4471 ; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm7 = zero,zero,zero,xmm11[4,10],zero,zero,zero,xmm11[2,8,14,u,u,u,u,u]
4472 ; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm12 = xmm10[2,8,14],zero,zero,xmm10[0,6,12],zero,zero,zero,xmm10[u,u,u,u,u]
4473 ; AVX512DQ-BW-NEXT: vpor %xmm7, %xmm12, %xmm7
4474 ; AVX512DQ-BW-NEXT: movl $2095104, %edi # imm = 0x1FF800
4475 ; AVX512DQ-BW-NEXT: kmovd %edi, %k4
4476 ; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm7 {%k4} = ymm9[u,u,u,u,u,u,u,u,u,u,u,4,10,0,6,12,18,24,30,20,26,u,u,u,u,u,u,u,u,u,u,u]
4477 ; AVX512DQ-BW-NEXT: vpblendmw %ymm3, %ymm2, %ymm12 {%k1}
4478 ; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm13 = xmm12[u,u,u,u,u,0,6,12],zero,zero,zero,xmm12[4,10],zero,zero,zero
4479 ; AVX512DQ-BW-NEXT: vextracti128 $1, %ymm12, %xmm14
4480 ; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm15 = xmm14[u,u,u,u,u],zero,zero,zero,xmm14[2,8,14],zero,zero,xmm14[0,6,12]
4481 ; AVX512DQ-BW-NEXT: vpor %xmm13, %xmm15, %xmm13
4482 ; AVX512DQ-BW-NEXT: vinserti128 $1, %xmm13, %ymm0, %ymm13
4483 ; AVX512DQ-BW-NEXT: movl $-2097152, %edi # imm = 0xFFE00000
4484 ; AVX512DQ-BW-NEXT: kmovd %edi, %k2
4485 ; AVX512DQ-BW-NEXT: vmovdqu8 %ymm13, %ymm7 {%k2}
4486 ; AVX512DQ-BW-NEXT: movw $9289, %di # imm = 0x2449
4487 ; AVX512DQ-BW-NEXT: kmovd %edi, %k5
4488 ; AVX512DQ-BW-NEXT: vmovdqu16 %ymm8, %ymm1 {%k5}
4489 ; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm8 = zero,zero,zero,xmm11[5,11],zero,zero,zero,xmm11[3,9,15,u,u,u,u,u]
4490 ; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm10 = xmm10[3,9,15],zero,zero,xmm10[1,7,13],zero,zero,zero,xmm10[u,u,u,u,u]
4491 ; AVX512DQ-BW-NEXT: vpor %xmm8, %xmm10, %xmm8
4492 ; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm8 {%k4} = ymm9[u,u,u,u,u,u,u,u,u,u,u,5,11,1,7,13,19,25,31,21,27,u,u,u,u,u,u,u,u,u,u,u]
4493 ; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm9 = xmm12[u,u,u,u,u,1,7,13],zero,zero,zero,xmm12[5,11],zero,zero,zero
4494 ; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm10 = xmm14[u,u,u,u,u],zero,zero,zero,xmm14[3,9,15],zero,zero,xmm14[1,7,13]
4495 ; AVX512DQ-BW-NEXT: vpor %xmm9, %xmm10, %xmm9
4496 ; AVX512DQ-BW-NEXT: vinserti128 $1, %xmm9, %ymm0, %ymm9
4497 ; AVX512DQ-BW-NEXT: vmovdqu8 %ymm9, %ymm8 {%k2}
4498 ; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm9 = ymm1[u,u,u,u,u,u,u,u,u,u,0,6,12,2,8,14,20,26,16,22,28,u,u,u,u,u,u,u,u,u,u,u]
4499 ; AVX512DQ-BW-NEXT: vmovdqu16 %ymm4, %ymm0 {%k1}
4500 ; AVX512DQ-BW-NEXT: vextracti128 $1, %ymm0, %xmm4
4501 ; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm10 = zero,zero,xmm4[0,6,12],zero,zero,zero,xmm4[4,10,u,u,u,u,u,u]
4502 ; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm11 = xmm0[4,10],zero,zero,zero,xmm0[2,8,14],zero,zero,xmm0[u,u,u,u,u,u]
4503 ; AVX512DQ-BW-NEXT: vpor %xmm10, %xmm11, %xmm10
4504 ; AVX512DQ-BW-NEXT: vpblendw {{.*#+}} xmm10 = xmm10[0,1,2,3,4],xmm9[5,6,7]
4505 ; AVX512DQ-BW-NEXT: vpblendd {{.*#+}} ymm9 = ymm10[0,1,2,3],ymm9[4,5,6,7]
4506 ; AVX512DQ-BW-NEXT: vmovdqu16 %ymm2, %ymm3 {%k3}
4507 ; AVX512DQ-BW-NEXT: vextracti128 $1, %ymm3, %xmm2
4508 ; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm10 = xmm2[u,u,u,u,u],zero,zero,zero,xmm2[4,10],zero,zero,zero,xmm2[2,8,14]
4509 ; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm11 = xmm3[u,u,u,u,u,2,8,14],zero,zero,xmm3[0,6,12],zero,zero,zero
4510 ; AVX512DQ-BW-NEXT: vpor %xmm10, %xmm11, %xmm10
4511 ; AVX512DQ-BW-NEXT: vinserti128 $1, %xmm10, %ymm0, %ymm10
4512 ; AVX512DQ-BW-NEXT: vmovdqu8 %ymm10, %ymm9 {%k2}
4513 ; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,1,7,13,3,9,15,21,27,17,23,29,u,u,u,u,u,u,u,u,u,u,u]
4514 ; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm4 = zero,zero,xmm4[1,7,13],zero,zero,zero,xmm4[5,11,u,u,u,u,u,u]
4515 ; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[5,11],zero,zero,zero,xmm0[3,9,15],zero,zero,xmm0[u,u,u,u,u,u]
4516 ; AVX512DQ-BW-NEXT: vpor %xmm4, %xmm0, %xmm0
4517 ; AVX512DQ-BW-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm1[5,6,7]
4518 ; AVX512DQ-BW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
4519 ; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm1 = xmm2[u,u,u,u,u],zero,zero,zero,xmm2[5,11],zero,zero,zero,xmm2[3,9,15]
4520 ; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm2 = xmm3[u,u,u,u,u,3,9,15],zero,zero,xmm3[1,7,13],zero,zero,zero
4521 ; AVX512DQ-BW-NEXT: vpor %xmm1, %xmm2, %xmm1
4522 ; AVX512DQ-BW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
4523 ; AVX512DQ-BW-NEXT: vmovdqu8 %ymm1, %ymm0 {%k2}
4524 ; AVX512DQ-BW-NEXT: vmovdqa %ymm5, (%rsi)
4525 ; AVX512DQ-BW-NEXT: vmovdqa %ymm6, (%rdx)
4526 ; AVX512DQ-BW-NEXT: vmovdqa %ymm7, (%rcx)
4527 ; AVX512DQ-BW-NEXT: vmovdqa %ymm8, (%r8)
4528 ; AVX512DQ-BW-NEXT: vmovdqa %ymm9, (%r9)
4529 ; AVX512DQ-BW-NEXT: vmovdqa %ymm0, (%rax)
4530 ; AVX512DQ-BW-NEXT: vzeroupper
4531 ; AVX512DQ-BW-NEXT: retq
4533 ; AVX512DQ-BW-FCP-LABEL: load_i8_stride6_vf32:
4534 ; AVX512DQ-BW-FCP: # %bb.0:
4535 ; AVX512DQ-BW-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
4536 ; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdi), %ymm4
4537 ; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%rdi), %ymm0
4538 ; AVX512DQ-BW-FCP-NEXT: vmovdqa 64(%rdi), %ymm3
4539 ; AVX512DQ-BW-FCP-NEXT: vmovdqa 128(%rdi), %ymm2
4540 ; AVX512DQ-BW-FCP-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm3[2,3],mem[2,3]
4541 ; AVX512DQ-BW-FCP-NEXT: vinserti128 $1, 96(%rdi), %ymm3, %ymm8
4542 ; AVX512DQ-BW-FCP-NEXT: movw $-28124, %r10w # imm = 0x9224
4543 ; AVX512DQ-BW-FCP-NEXT: kmovd %r10d, %k2
4544 ; AVX512DQ-BW-FCP-NEXT: vpblendmw %ymm1, %ymm8, %ymm6 {%k2}
4545 ; AVX512DQ-BW-FCP-NEXT: movw $18724, %r10w # imm = 0x4924
4546 ; AVX512DQ-BW-FCP-NEXT: kmovd %r10d, %k1
4547 ; AVX512DQ-BW-FCP-NEXT: vpblendmw %ymm0, %ymm4, %ymm7 {%k1}
4548 ; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm7[0,6,12],zero,zero,zero,xmm7[4,10],zero,zero,zero,xmm7[u,u,u,u,u]
4549 ; AVX512DQ-BW-FCP-NEXT: vextracti128 $1, %ymm7, %xmm9
4550 ; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm5 = zero,zero,zero,xmm9[2,8,14],zero,zero,xmm9[0,6,12,u,u,u,u,u]
4551 ; AVX512DQ-BW-FCP-NEXT: vpor %xmm3, %xmm5, %xmm5
4552 ; AVX512DQ-BW-FCP-NEXT: movl $4192256, %r10d # imm = 0x3FF800
4553 ; AVX512DQ-BW-FCP-NEXT: kmovd %r10d, %k3
4554 ; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm5 {%k3} = ymm6[u,u,u,u,u,u,u,u,u,u,u,2,8,14,4,10,16,22,28,18,24,30,u,u,u,u,u,u,u,u,u,u]
4555 ; AVX512DQ-BW-FCP-NEXT: vmovdqa 160(%rdi), %ymm3
4556 ; AVX512DQ-BW-FCP-NEXT: vpblendmw %ymm2, %ymm3, %ymm10 {%k1}
4557 ; AVX512DQ-BW-FCP-NEXT: vextracti128 $1, %ymm10, %xmm11
4558 ; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm12 = xmm11[u,u,u,u,u,u],zero,zero,xmm11[0,6,12],zero,zero,zero,xmm11[4,10]
4559 ; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm13 = xmm10[u,u,u,u,u,u,4,10],zero,zero,zero,xmm10[2,8,14],zero,zero
4560 ; AVX512DQ-BW-FCP-NEXT: vpor %xmm12, %xmm13, %xmm12
4561 ; AVX512DQ-BW-FCP-NEXT: vinserti128 $1, %xmm12, %ymm0, %ymm12
4562 ; AVX512DQ-BW-FCP-NEXT: vpblendw {{.*#+}} ymm12 = ymm5[0,1,2],ymm12[3,4,5,6,7],ymm5[8,9,10],ymm12[11,12,13,14,15]
4563 ; AVX512DQ-BW-FCP-NEXT: vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm12[4,5,6,7]
4564 ; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm7[1,7,13],zero,zero,zero,xmm7[5,11],zero,zero,zero,xmm7[u,u,u,u,u]
4565 ; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm9 = zero,zero,zero,xmm9[3,9,15],zero,zero,xmm9[1,7,13,u,u,u,u,u]
4566 ; AVX512DQ-BW-FCP-NEXT: vpor %xmm7, %xmm9, %xmm7
4567 ; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm7 {%k3} = ymm6[u,u,u,u,u,u,u,u,u,u,u,3,9,15,5,11,17,23,29,19,25,31,u,u,u,u,u,u,u,u,u,u]
4568 ; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm6 = xmm11[u,u,u,u,u,u],zero,zero,xmm11[1,7,13],zero,zero,zero,xmm11[5,11]
4569 ; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm9 = xmm10[u,u,u,u,u,u,5,11],zero,zero,zero,xmm10[3,9,15],zero,zero
4570 ; AVX512DQ-BW-FCP-NEXT: vpor %xmm6, %xmm9, %xmm6
4571 ; AVX512DQ-BW-FCP-NEXT: vinserti128 $1, %xmm6, %ymm0, %ymm6
4572 ; AVX512DQ-BW-FCP-NEXT: vpblendw {{.*#+}} ymm6 = ymm7[0,1,2],ymm6[3,4,5,6,7],ymm7[8,9,10],ymm6[11,12,13,14,15]
4573 ; AVX512DQ-BW-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm7[0,1,2,3],ymm6[4,5,6,7]
4574 ; AVX512DQ-BW-FCP-NEXT: vpblendmw %ymm8, %ymm1, %ymm9 {%k2}
4575 ; AVX512DQ-BW-FCP-NEXT: movw $9362, %di # imm = 0x2492
4576 ; AVX512DQ-BW-FCP-NEXT: kmovd %edi, %k3
4577 ; AVX512DQ-BW-FCP-NEXT: vpblendmw %ymm4, %ymm0, %ymm10 {%k3}
4578 ; AVX512DQ-BW-FCP-NEXT: vextracti128 $1, %ymm10, %xmm11
4579 ; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm7 = zero,zero,zero,xmm11[4,10],zero,zero,zero,xmm11[2,8,14,u,u,u,u,u]
4580 ; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm12 = xmm10[2,8,14],zero,zero,xmm10[0,6,12],zero,zero,zero,xmm10[u,u,u,u,u]
4581 ; AVX512DQ-BW-FCP-NEXT: vpor %xmm7, %xmm12, %xmm7
4582 ; AVX512DQ-BW-FCP-NEXT: movl $2095104, %edi # imm = 0x1FF800
4583 ; AVX512DQ-BW-FCP-NEXT: kmovd %edi, %k4
4584 ; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm7 {%k4} = ymm9[u,u,u,u,u,u,u,u,u,u,u,4,10,0,6,12,18,24,30,20,26,u,u,u,u,u,u,u,u,u,u,u]
4585 ; AVX512DQ-BW-FCP-NEXT: vpblendmw %ymm3, %ymm2, %ymm12 {%k1}
4586 ; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm13 = xmm12[u,u,u,u,u,0,6,12],zero,zero,zero,xmm12[4,10],zero,zero,zero
4587 ; AVX512DQ-BW-FCP-NEXT: vextracti128 $1, %ymm12, %xmm14
4588 ; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm15 = xmm14[u,u,u,u,u],zero,zero,zero,xmm14[2,8,14],zero,zero,xmm14[0,6,12]
4589 ; AVX512DQ-BW-FCP-NEXT: vpor %xmm13, %xmm15, %xmm13
4590 ; AVX512DQ-BW-FCP-NEXT: vinserti128 $1, %xmm13, %ymm0, %ymm13
4591 ; AVX512DQ-BW-FCP-NEXT: movl $-2097152, %edi # imm = 0xFFE00000
4592 ; AVX512DQ-BW-FCP-NEXT: kmovd %edi, %k2
4593 ; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %ymm13, %ymm7 {%k2}
4594 ; AVX512DQ-BW-FCP-NEXT: movw $9289, %di # imm = 0x2449
4595 ; AVX512DQ-BW-FCP-NEXT: kmovd %edi, %k5
4596 ; AVX512DQ-BW-FCP-NEXT: vmovdqu16 %ymm8, %ymm1 {%k5}
4597 ; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm8 = zero,zero,zero,xmm11[5,11],zero,zero,zero,xmm11[3,9,15,u,u,u,u,u]
4598 ; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm10 = xmm10[3,9,15],zero,zero,xmm10[1,7,13],zero,zero,zero,xmm10[u,u,u,u,u]
4599 ; AVX512DQ-BW-FCP-NEXT: vpor %xmm8, %xmm10, %xmm8
4600 ; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm8 {%k4} = ymm9[u,u,u,u,u,u,u,u,u,u,u,5,11,1,7,13,19,25,31,21,27,u,u,u,u,u,u,u,u,u,u,u]
4601 ; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm9 = xmm12[u,u,u,u,u,1,7,13],zero,zero,zero,xmm12[5,11],zero,zero,zero
4602 ; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm10 = xmm14[u,u,u,u,u],zero,zero,zero,xmm14[3,9,15],zero,zero,xmm14[1,7,13]
4603 ; AVX512DQ-BW-FCP-NEXT: vpor %xmm9, %xmm10, %xmm9
4604 ; AVX512DQ-BW-FCP-NEXT: vinserti128 $1, %xmm9, %ymm0, %ymm9
4605 ; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %ymm9, %ymm8 {%k2}
4606 ; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm9 = ymm1[u,u,u,u,u,u,u,u,u,u,0,6,12,2,8,14,20,26,16,22,28,u,u,u,u,u,u,u,u,u,u,u]
4607 ; AVX512DQ-BW-FCP-NEXT: vmovdqu16 %ymm4, %ymm0 {%k1}
4608 ; AVX512DQ-BW-FCP-NEXT: vextracti128 $1, %ymm0, %xmm4
4609 ; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm10 = zero,zero,xmm4[0,6,12],zero,zero,zero,xmm4[4,10,u,u,u,u,u,u]
4610 ; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm11 = xmm0[4,10],zero,zero,zero,xmm0[2,8,14],zero,zero,xmm0[u,u,u,u,u,u]
4611 ; AVX512DQ-BW-FCP-NEXT: vpor %xmm10, %xmm11, %xmm10
4612 ; AVX512DQ-BW-FCP-NEXT: vpblendw {{.*#+}} xmm10 = xmm10[0,1,2,3,4],xmm9[5,6,7]
4613 ; AVX512DQ-BW-FCP-NEXT: vpblendd {{.*#+}} ymm9 = ymm10[0,1,2,3],ymm9[4,5,6,7]
4614 ; AVX512DQ-BW-FCP-NEXT: vmovdqu16 %ymm2, %ymm3 {%k3}
4615 ; AVX512DQ-BW-FCP-NEXT: vextracti128 $1, %ymm3, %xmm2
4616 ; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm10 = xmm2[u,u,u,u,u],zero,zero,zero,xmm2[4,10],zero,zero,zero,xmm2[2,8,14]
4617 ; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm11 = xmm3[u,u,u,u,u,2,8,14],zero,zero,xmm3[0,6,12],zero,zero,zero
4618 ; AVX512DQ-BW-FCP-NEXT: vpor %xmm10, %xmm11, %xmm10
4619 ; AVX512DQ-BW-FCP-NEXT: vinserti128 $1, %xmm10, %ymm0, %ymm10
4620 ; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %ymm10, %ymm9 {%k2}
4621 ; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,1,7,13,3,9,15,21,27,17,23,29,u,u,u,u,u,u,u,u,u,u,u]
4622 ; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm4 = zero,zero,xmm4[1,7,13],zero,zero,zero,xmm4[5,11,u,u,u,u,u,u]
4623 ; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[5,11],zero,zero,zero,xmm0[3,9,15],zero,zero,xmm0[u,u,u,u,u,u]
4624 ; AVX512DQ-BW-FCP-NEXT: vpor %xmm4, %xmm0, %xmm0
4625 ; AVX512DQ-BW-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm1[5,6,7]
4626 ; AVX512DQ-BW-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
4627 ; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm2[u,u,u,u,u],zero,zero,zero,xmm2[5,11],zero,zero,zero,xmm2[3,9,15]
4628 ; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm3[u,u,u,u,u,3,9,15],zero,zero,xmm3[1,7,13],zero,zero,zero
4629 ; AVX512DQ-BW-FCP-NEXT: vpor %xmm1, %xmm2, %xmm1
4630 ; AVX512DQ-BW-FCP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
4631 ; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %ymm1, %ymm0 {%k2}
4632 ; AVX512DQ-BW-FCP-NEXT: vmovdqa %ymm5, (%rsi)
4633 ; AVX512DQ-BW-FCP-NEXT: vmovdqa %ymm6, (%rdx)
4634 ; AVX512DQ-BW-FCP-NEXT: vmovdqa %ymm7, (%rcx)
4635 ; AVX512DQ-BW-FCP-NEXT: vmovdqa %ymm8, (%r8)
4636 ; AVX512DQ-BW-FCP-NEXT: vmovdqa %ymm9, (%r9)
4637 ; AVX512DQ-BW-FCP-NEXT: vmovdqa %ymm0, (%rax)
4638 ; AVX512DQ-BW-FCP-NEXT: vzeroupper
4639 ; AVX512DQ-BW-FCP-NEXT: retq
4640 %wide.vec = load <192 x i8>, ptr %in.vec, align 64
4641 %strided.vec0 = shufflevector <192 x i8> %wide.vec, <192 x i8> poison, <32 x i32> <i32 0, i32 6, i32 12, i32 18, i32 24, i32 30, i32 36, i32 42, i32 48, i32 54, i32 60, i32 66, i32 72, i32 78, i32 84, i32 90, i32 96, i32 102, i32 108, i32 114, i32 120, i32 126, i32 132, i32 138, i32 144, i32 150, i32 156, i32 162, i32 168, i32 174, i32 180, i32 186>
4642 %strided.vec1 = shufflevector <192 x i8> %wide.vec, <192 x i8> poison, <32 x i32> <i32 1, i32 7, i32 13, i32 19, i32 25, i32 31, i32 37, i32 43, i32 49, i32 55, i32 61, i32 67, i32 73, i32 79, i32 85, i32 91, i32 97, i32 103, i32 109, i32 115, i32 121, i32 127, i32 133, i32 139, i32 145, i32 151, i32 157, i32 163, i32 169, i32 175, i32 181, i32 187>
4643 %strided.vec2 = shufflevector <192 x i8> %wide.vec, <192 x i8> poison, <32 x i32> <i32 2, i32 8, i32 14, i32 20, i32 26, i32 32, i32 38, i32 44, i32 50, i32 56, i32 62, i32 68, i32 74, i32 80, i32 86, i32 92, i32 98, i32 104, i32 110, i32 116, i32 122, i32 128, i32 134, i32 140, i32 146, i32 152, i32 158, i32 164, i32 170, i32 176, i32 182, i32 188>
4644 %strided.vec3 = shufflevector <192 x i8> %wide.vec, <192 x i8> poison, <32 x i32> <i32 3, i32 9, i32 15, i32 21, i32 27, i32 33, i32 39, i32 45, i32 51, i32 57, i32 63, i32 69, i32 75, i32 81, i32 87, i32 93, i32 99, i32 105, i32 111, i32 117, i32 123, i32 129, i32 135, i32 141, i32 147, i32 153, i32 159, i32 165, i32 171, i32 177, i32 183, i32 189>
4645 %strided.vec4 = shufflevector <192 x i8> %wide.vec, <192 x i8> poison, <32 x i32> <i32 4, i32 10, i32 16, i32 22, i32 28, i32 34, i32 40, i32 46, i32 52, i32 58, i32 64, i32 70, i32 76, i32 82, i32 88, i32 94, i32 100, i32 106, i32 112, i32 118, i32 124, i32 130, i32 136, i32 142, i32 148, i32 154, i32 160, i32 166, i32 172, i32 178, i32 184, i32 190>
4646 %strided.vec5 = shufflevector <192 x i8> %wide.vec, <192 x i8> poison, <32 x i32> <i32 5, i32 11, i32 17, i32 23, i32 29, i32 35, i32 41, i32 47, i32 53, i32 59, i32 65, i32 71, i32 77, i32 83, i32 89, i32 95, i32 101, i32 107, i32 113, i32 119, i32 125, i32 131, i32 137, i32 143, i32 149, i32 155, i32 161, i32 167, i32 173, i32 179, i32 185, i32 191>
4647 store <32 x i8> %strided.vec0, ptr %out.vec0, align 64
4648 store <32 x i8> %strided.vec1, ptr %out.vec1, align 64
4649 store <32 x i8> %strided.vec2, ptr %out.vec2, align 64
4650 store <32 x i8> %strided.vec3, ptr %out.vec3, align 64
4651 store <32 x i8> %strided.vec4, ptr %out.vec4, align 64
4652 store <32 x i8> %strided.vec5, ptr %out.vec5, align 64
4656 define void @load_i8_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2, ptr %out.vec3, ptr %out.vec4, ptr %out.vec5) nounwind {
4657 ; SSE-LABEL: load_i8_stride6_vf64:
4659 ; SSE-NEXT: subq $792, %rsp # imm = 0x318
4660 ; SSE-NEXT: movdqa 64(%rdi), %xmm4
4661 ; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4662 ; SSE-NEXT: movdqa 80(%rdi), %xmm5
4663 ; SSE-NEXT: movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4664 ; SSE-NEXT: movdqa (%rdi), %xmm7
4665 ; SSE-NEXT: movdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4666 ; SSE-NEXT: movdqa 16(%rdi), %xmm6
4667 ; SSE-NEXT: movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4668 ; SSE-NEXT: movdqa 32(%rdi), %xmm2
4669 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4670 ; SSE-NEXT: movdqa 48(%rdi), %xmm0
4671 ; SSE-NEXT: movdqa {{.*#+}} xmm13 = [65535,65535,0,65535,65535,0,65535,65535]
4672 ; SSE-NEXT: movdqa %xmm13, %xmm1
4673 ; SSE-NEXT: pandn %xmm2, %xmm1
4674 ; SSE-NEXT: movdqa {{.*#+}} xmm3 = [65535,0,65535,65535,0,65535,65535,0]
4675 ; SSE-NEXT: movdqa %xmm3, %xmm2
4676 ; SSE-NEXT: pandn %xmm0, %xmm2
4677 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4678 ; SSE-NEXT: movdqa %xmm13, %xmm2
4679 ; SSE-NEXT: pandn %xmm0, %xmm2
4680 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4681 ; SSE-NEXT: pand %xmm13, %xmm0
4682 ; SSE-NEXT: por %xmm1, %xmm0
4683 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4684 ; SSE-NEXT: movdqa {{.*#+}} xmm1 = [255,255,255,255,255,255,255,255]
4685 ; SSE-NEXT: pand %xmm1, %xmm0
4686 ; SSE-NEXT: movdqa %xmm1, %xmm10
4687 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm0[0,3,2,3,4,5,6,7]
4688 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,3,2,3]
4689 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,1]
4690 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6,5]
4691 ; SSE-NEXT: packuswb %xmm1, %xmm0
4692 ; SSE-NEXT: movdqa {{.*#+}} xmm8 = [65535,65535,65535,0,0,0,65535,65535]
4693 ; SSE-NEXT: movdqa %xmm3, %xmm1
4694 ; SSE-NEXT: pandn %xmm6, %xmm1
4695 ; SSE-NEXT: movdqa %xmm7, %xmm2
4696 ; SSE-NEXT: pand %xmm3, %xmm2
4697 ; SSE-NEXT: por %xmm1, %xmm2
4698 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4699 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,2,1,3]
4700 ; SSE-NEXT: pand %xmm10, %xmm1
4701 ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,5,6,7]
4702 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,1,3]
4703 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,3,2,1,4,5,6,7]
4704 ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,7,6,7]
4705 ; SSE-NEXT: packuswb %xmm1, %xmm1
4706 ; SSE-NEXT: pand %xmm8, %xmm1
4707 ; SSE-NEXT: movdqa %xmm8, %xmm2
4708 ; SSE-NEXT: pandn %xmm0, %xmm2
4709 ; SSE-NEXT: por %xmm2, %xmm1
4710 ; SSE-NEXT: movdqa %xmm13, %xmm0
4711 ; SSE-NEXT: pandn %xmm5, %xmm0
4712 ; SSE-NEXT: pand %xmm13, %xmm4
4713 ; SSE-NEXT: por %xmm0, %xmm4
4714 ; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4715 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm4[3,1,2,0]
4716 ; SSE-NEXT: pand %xmm10, %xmm0
4717 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[2,1,2,3,4,5,6,7]
4718 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,3,2,0]
4719 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,7,6,5]
4720 ; SSE-NEXT: packuswb %xmm0, %xmm0
4721 ; SSE-NEXT: movdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0]
4722 ; SSE-NEXT: movdqa %xmm4, %xmm2
4723 ; SSE-NEXT: pandn %xmm0, %xmm2
4724 ; SSE-NEXT: pand %xmm4, %xmm1
4725 ; SSE-NEXT: por %xmm1, %xmm2
4726 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4727 ; SSE-NEXT: movdqa 320(%rdi), %xmm1
4728 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4729 ; SSE-NEXT: movdqa %xmm13, %xmm0
4730 ; SSE-NEXT: pandn %xmm1, %xmm0
4731 ; SSE-NEXT: movdqa 336(%rdi), %xmm12
4732 ; SSE-NEXT: movdqa %xmm3, %xmm1
4733 ; SSE-NEXT: pandn %xmm12, %xmm1
4734 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4735 ; SSE-NEXT: movdqa %xmm13, %xmm1
4736 ; SSE-NEXT: pandn %xmm12, %xmm1
4737 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4738 ; SSE-NEXT: pand %xmm13, %xmm12
4739 ; SSE-NEXT: por %xmm0, %xmm12
4740 ; SSE-NEXT: movdqa %xmm12, %xmm0
4741 ; SSE-NEXT: pand %xmm10, %xmm0
4742 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm0[0,3,2,3,4,5,6,7]
4743 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,3,2,3]
4744 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,1]
4745 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6,5]
4746 ; SSE-NEXT: packuswb %xmm1, %xmm0
4747 ; SSE-NEXT: movdqa %xmm8, %xmm1
4748 ; SSE-NEXT: pandn %xmm0, %xmm1
4749 ; SSE-NEXT: movdqa 304(%rdi), %xmm2
4750 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4751 ; SSE-NEXT: movdqa %xmm3, %xmm7
4752 ; SSE-NEXT: movdqa %xmm3, %xmm0
4753 ; SSE-NEXT: pandn %xmm2, %xmm0
4754 ; SSE-NEXT: movdqa 288(%rdi), %xmm6
4755 ; SSE-NEXT: movdqa %xmm6, %xmm2
4756 ; SSE-NEXT: pand %xmm3, %xmm2
4757 ; SSE-NEXT: por %xmm0, %xmm2
4758 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4759 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,1,3]
4760 ; SSE-NEXT: pand %xmm10, %xmm0
4761 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,6,7]
4762 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
4763 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,3,2,1,4,5,6,7]
4764 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,7,6,7]
4765 ; SSE-NEXT: packuswb %xmm0, %xmm0
4766 ; SSE-NEXT: pand %xmm8, %xmm0
4767 ; SSE-NEXT: por %xmm1, %xmm0
4768 ; SSE-NEXT: movdqa 368(%rdi), %xmm1
4769 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4770 ; SSE-NEXT: movdqa %xmm13, %xmm2
4771 ; SSE-NEXT: pandn %xmm1, %xmm2
4772 ; SSE-NEXT: movdqa 352(%rdi), %xmm3
4773 ; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4774 ; SSE-NEXT: pand %xmm13, %xmm3
4775 ; SSE-NEXT: por %xmm2, %xmm3
4776 ; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4777 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm3[3,1,2,0]
4778 ; SSE-NEXT: pand %xmm10, %xmm2
4779 ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[2,1,2,3,4,5,6,7]
4780 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,3,2,0]
4781 ; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,7,6,5]
4782 ; SSE-NEXT: packuswb %xmm2, %xmm2
4783 ; SSE-NEXT: movdqa %xmm4, %xmm3
4784 ; SSE-NEXT: pandn %xmm2, %xmm3
4785 ; SSE-NEXT: pand %xmm4, %xmm0
4786 ; SSE-NEXT: movdqa %xmm4, %xmm9
4787 ; SSE-NEXT: por %xmm0, %xmm3
4788 ; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4789 ; SSE-NEXT: movdqa 224(%rdi), %xmm1
4790 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4791 ; SSE-NEXT: movdqa %xmm13, %xmm0
4792 ; SSE-NEXT: pandn %xmm1, %xmm0
4793 ; SSE-NEXT: movdqa 240(%rdi), %xmm11
4794 ; SSE-NEXT: movdqa %xmm7, %xmm2
4795 ; SSE-NEXT: pandn %xmm11, %xmm2
4796 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4797 ; SSE-NEXT: movdqa %xmm13, %xmm2
4798 ; SSE-NEXT: pandn %xmm11, %xmm2
4799 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4800 ; SSE-NEXT: pand %xmm13, %xmm11
4801 ; SSE-NEXT: por %xmm0, %xmm11
4802 ; SSE-NEXT: movdqa %xmm11, %xmm0
4803 ; SSE-NEXT: pand %xmm10, %xmm0
4804 ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm0[0,3,2,3,4,5,6,7]
4805 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,3,2,3]
4806 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,1]
4807 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6,5]
4808 ; SSE-NEXT: packuswb %xmm2, %xmm0
4809 ; SSE-NEXT: movdqa %xmm8, %xmm2
4810 ; SSE-NEXT: pandn %xmm0, %xmm2
4811 ; SSE-NEXT: movdqa 208(%rdi), %xmm1
4812 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4813 ; SSE-NEXT: movdqa %xmm7, %xmm0
4814 ; SSE-NEXT: pandn %xmm1, %xmm0
4815 ; SSE-NEXT: movdqa 192(%rdi), %xmm3
4816 ; SSE-NEXT: movdqa %xmm3, %xmm1
4817 ; SSE-NEXT: pand %xmm7, %xmm1
4818 ; SSE-NEXT: por %xmm0, %xmm1
4819 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4820 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,2,1,3]
4821 ; SSE-NEXT: movdqa %xmm10, %xmm1
4822 ; SSE-NEXT: pand %xmm10, %xmm0
4823 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,6,7]
4824 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
4825 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,3,2,1,4,5,6,7]
4826 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,7,6,7]
4827 ; SSE-NEXT: packuswb %xmm0, %xmm0
4828 ; SSE-NEXT: pand %xmm8, %xmm0
4829 ; SSE-NEXT: movdqa %xmm8, %xmm10
4830 ; SSE-NEXT: por %xmm2, %xmm0
4831 ; SSE-NEXT: movdqa 272(%rdi), %xmm14
4832 ; SSE-NEXT: movdqa %xmm13, %xmm2
4833 ; SSE-NEXT: pandn %xmm14, %xmm2
4834 ; SSE-NEXT: movdqa 256(%rdi), %xmm15
4835 ; SSE-NEXT: movdqa %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4836 ; SSE-NEXT: pand %xmm13, %xmm15
4837 ; SSE-NEXT: por %xmm2, %xmm15
4838 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm15[3,1,2,0]
4839 ; SSE-NEXT: pand %xmm1, %xmm2
4840 ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[2,1,2,3,4,5,6,7]
4841 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,3,2,0]
4842 ; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,7,6,5]
4843 ; SSE-NEXT: packuswb %xmm2, %xmm2
4844 ; SSE-NEXT: pandn %xmm2, %xmm4
4845 ; SSE-NEXT: pand %xmm9, %xmm0
4846 ; SSE-NEXT: por %xmm0, %xmm4
4847 ; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4848 ; SSE-NEXT: movdqa 128(%rdi), %xmm2
4849 ; SSE-NEXT: movdqa %xmm2, (%rsp) # 16-byte Spill
4850 ; SSE-NEXT: movdqa %xmm13, %xmm0
4851 ; SSE-NEXT: pandn %xmm2, %xmm0
4852 ; SSE-NEXT: movdqa 144(%rdi), %xmm9
4853 ; SSE-NEXT: movdqa %xmm7, %xmm4
4854 ; SSE-NEXT: pandn %xmm9, %xmm4
4855 ; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4856 ; SSE-NEXT: movdqa %xmm13, %xmm4
4857 ; SSE-NEXT: pandn %xmm9, %xmm4
4858 ; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4859 ; SSE-NEXT: pand %xmm13, %xmm9
4860 ; SSE-NEXT: por %xmm0, %xmm9
4861 ; SSE-NEXT: movdqa %xmm9, %xmm0
4862 ; SSE-NEXT: pand %xmm1, %xmm0
4863 ; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm0[0,3,2,3,4,5,6,7]
4864 ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,3,2,3]
4865 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,1]
4866 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6,5]
4867 ; SSE-NEXT: packuswb %xmm5, %xmm0
4868 ; SSE-NEXT: pandn %xmm0, %xmm10
4869 ; SSE-NEXT: movdqa %xmm13, %xmm0
4870 ; SSE-NEXT: movdqa %xmm13, %xmm2
4871 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
4872 ; SSE-NEXT: pandn %xmm13, %xmm2
4873 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4874 ; SSE-NEXT: movdqa %xmm13, %xmm1
4875 ; SSE-NEXT: movdqa %xmm0, %xmm2
4876 ; SSE-NEXT: pandn %xmm6, %xmm2
4877 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4878 ; SSE-NEXT: movdqa %xmm6, %xmm5
4879 ; SSE-NEXT: movdqa %xmm0, %xmm2
4880 ; SSE-NEXT: pandn %xmm3, %xmm2
4881 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4882 ; SSE-NEXT: movdqa %xmm3, %xmm4
4883 ; SSE-NEXT: movdqa 112(%rdi), %xmm6
4884 ; SSE-NEXT: movdqa %xmm7, %xmm2
4885 ; SSE-NEXT: movdqa %xmm7, %xmm8
4886 ; SSE-NEXT: pandn %xmm6, %xmm8
4887 ; SSE-NEXT: movdqa 160(%rdi), %xmm7
4888 ; SSE-NEXT: movdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4889 ; SSE-NEXT: pand %xmm0, %xmm7
4890 ; SSE-NEXT: movdqa %xmm0, %xmm3
4891 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
4892 ; SSE-NEXT: pandn %xmm13, %xmm3
4893 ; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4894 ; SSE-NEXT: pand %xmm0, %xmm1
4895 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4896 ; SSE-NEXT: movdqa %xmm2, %xmm3
4897 ; SSE-NEXT: movdqa %xmm2, %xmm1
4898 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
4899 ; SSE-NEXT: pandn %xmm2, %xmm3
4900 ; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4901 ; SSE-NEXT: pand %xmm0, %xmm2
4902 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4903 ; SSE-NEXT: movdqa %xmm0, %xmm2
4904 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
4905 ; SSE-NEXT: pandn %xmm3, %xmm2
4906 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4907 ; SSE-NEXT: pand %xmm0, %xmm5
4908 ; SSE-NEXT: movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4909 ; SSE-NEXT: movdqa %xmm1, %xmm5
4910 ; SSE-NEXT: movdqa %xmm1, %xmm2
4911 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
4912 ; SSE-NEXT: pandn %xmm1, %xmm2
4913 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4914 ; SSE-NEXT: pand %xmm0, %xmm1
4915 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4916 ; SSE-NEXT: movdqa %xmm0, %xmm2
4917 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
4918 ; SSE-NEXT: pandn %xmm1, %xmm2
4919 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4920 ; SSE-NEXT: pand %xmm0, %xmm4
4921 ; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4922 ; SSE-NEXT: pandn %xmm14, %xmm5
4923 ; SSE-NEXT: movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4924 ; SSE-NEXT: pand %xmm0, %xmm14
4925 ; SSE-NEXT: movdqa %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4926 ; SSE-NEXT: movdqa %xmm0, %xmm2
4927 ; SSE-NEXT: pandn %xmm6, %xmm2
4928 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4929 ; SSE-NEXT: movdqa 96(%rdi), %xmm4
4930 ; SSE-NEXT: movdqa %xmm4, %xmm2
4931 ; SSE-NEXT: pand %xmm0, %xmm2
4932 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4933 ; SSE-NEXT: movdqa 176(%rdi), %xmm14
4934 ; SSE-NEXT: movdqa %xmm14, %xmm2
4935 ; SSE-NEXT: movdqa %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4936 ; SSE-NEXT: pand %xmm0, %xmm2
4937 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4938 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
4939 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4940 ; SSE-NEXT: pand %xmm0, %xmm2
4941 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4942 ; SSE-NEXT: pand %xmm0, %xmm13
4943 ; SSE-NEXT: movdqa %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4944 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
4945 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4946 ; SSE-NEXT: pand %xmm0, %xmm2
4947 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4948 ; SSE-NEXT: pand %xmm0, %xmm3
4949 ; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4950 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
4951 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4952 ; SSE-NEXT: pand %xmm0, %xmm2
4953 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4954 ; SSE-NEXT: pand %xmm0, %xmm1
4955 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4956 ; SSE-NEXT: movdqa (%rsp), %xmm1 # 16-byte Reload
4957 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4958 ; SSE-NEXT: pand %xmm0, %xmm1
4959 ; SSE-NEXT: movdqa %xmm1, (%rsp) # 16-byte Spill
4960 ; SSE-NEXT: movdqa %xmm0, %xmm1
4961 ; SSE-NEXT: pand %xmm0, %xmm6
4962 ; SSE-NEXT: movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4963 ; SSE-NEXT: movdqa %xmm0, %xmm13
4964 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4965 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4966 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4967 ; SSE-NEXT: pandn %xmm4, %xmm1
4968 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4969 ; SSE-NEXT: movdqa %xmm4, %xmm3
4970 ; SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3
4971 ; SSE-NEXT: por %xmm8, %xmm3
4972 ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm3[0,2,1,3]
4973 ; SSE-NEXT: movdqa {{.*#+}} xmm1 = [255,255,255,255,255,255,255,255]
4974 ; SSE-NEXT: pand %xmm1, %xmm5
4975 ; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,6,5,6,7]
4976 ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,2,1,3]
4977 ; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[0,3,2,1,4,5,6,7]
4978 ; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,7,6,7]
4979 ; SSE-NEXT: packuswb %xmm5, %xmm5
4980 ; SSE-NEXT: movdqa {{.*#+}} xmm8 = [65535,65535,65535,0,0,0,65535,65535]
4981 ; SSE-NEXT: pand %xmm8, %xmm5
4982 ; SSE-NEXT: por %xmm10, %xmm5
4983 ; SSE-NEXT: pandn %xmm14, %xmm0
4984 ; SSE-NEXT: por %xmm0, %xmm7
4985 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm7[3,1,2,0]
4986 ; SSE-NEXT: pand %xmm1, %xmm0
4987 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[2,1,2,3,4,5,6,7]
4988 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,3,2,0]
4989 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,7,6,5]
4990 ; SSE-NEXT: packuswb %xmm0, %xmm0
4991 ; SSE-NEXT: movdqa {{.*#+}} xmm10 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0]
4992 ; SSE-NEXT: movdqa %xmm10, %xmm1
4993 ; SSE-NEXT: pandn %xmm0, %xmm1
4994 ; SSE-NEXT: pand %xmm10, %xmm5
4995 ; SSE-NEXT: por %xmm5, %xmm1
4996 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4997 ; SSE-NEXT: pxor %xmm5, %xmm5
4998 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
4999 ; SSE-NEXT: movdqa %xmm1, %xmm0
5000 ; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm5[8],xmm0[9],xmm5[9],xmm0[10],xmm5[10],xmm0[11],xmm5[11],xmm0[12],xmm5[12],xmm0[13],xmm5[13],xmm0[14],xmm5[14],xmm0[15],xmm5[15]
5001 ; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1],xmm1[2],xmm5[2],xmm1[3],xmm5[3],xmm1[4],xmm5[4],xmm1[5],xmm5[5],xmm1[6],xmm5[6],xmm1[7],xmm5[7]
5002 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm1[2,2,3,3]
5003 ; SSE-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3]
5004 ; SSE-NEXT: psrld $16, %xmm0
5005 ; SSE-NEXT: pshufd {{.*#+}} xmm14 = xmm1[0,1,0,3]
5006 ; SSE-NEXT: pshufhw {{.*#+}} xmm14 = xmm14[0,1,2,3,5,7,6,7]
5007 ; SSE-NEXT: punpckhdq {{.*#+}} xmm14 = xmm14[2],xmm0[2],xmm14[3],xmm0[3]
5008 ; SSE-NEXT: packuswb %xmm14, %xmm4
5009 ; SSE-NEXT: movdqa %xmm8, %xmm1
5010 ; SSE-NEXT: pandn %xmm4, %xmm1
5011 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
5012 ; SSE-NEXT: movdqa %xmm2, %xmm4
5013 ; SSE-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm5[8],xmm4[9],xmm5[9],xmm4[10],xmm5[10],xmm4[11],xmm5[11],xmm4[12],xmm5[12],xmm4[13],xmm5[13],xmm4[14],xmm5[14],xmm4[15],xmm5[15]
5014 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[2,1,0,3]
5015 ; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[1,1,1,1,4,5,6,7]
5016 ; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,5,7,6,7]
5017 ; SSE-NEXT: movdqa {{.*#+}} xmm0 = [65535,65535,0,65535,0,0,65535,65535]
5018 ; SSE-NEXT: movdqa %xmm0, %xmm14
5019 ; SSE-NEXT: pandn %xmm4, %xmm14
5020 ; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1],xmm2[2],xmm5[2],xmm2[3],xmm5[3],xmm2[4],xmm5[4],xmm2[5],xmm5[5],xmm2[6],xmm5[6],xmm2[7],xmm5[7]
5021 ; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm2[3,1,2,3,4,5,6,7]
5022 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,3,2,3]
5023 ; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[1,3,2,0,4,5,6,7]
5024 ; SSE-NEXT: pand %xmm0, %xmm4
5025 ; SSE-NEXT: por %xmm14, %xmm4
5026 ; SSE-NEXT: packuswb %xmm4, %xmm4
5027 ; SSE-NEXT: pand %xmm8, %xmm4
5028 ; SSE-NEXT: por %xmm1, %xmm4
5029 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
5030 ; SSE-NEXT: movdqa %xmm6, %xmm1
5031 ; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1],xmm1[2],xmm5[2],xmm1[3],xmm5[3],xmm1[4],xmm5[4],xmm1[5],xmm5[5],xmm1[6],xmm5[6],xmm1[7],xmm5[7]
5032 ; SSE-NEXT: pshufhw {{.*#+}} xmm14 = xmm1[0,1,2,3,5,5,5,5]
5033 ; SSE-NEXT: movdqa {{.*#+}} xmm2 = [65535,65535,65535,0,65535,65535,0,65535]
5034 ; SSE-NEXT: movdqa %xmm2, %xmm1
5035 ; SSE-NEXT: pandn %xmm14, %xmm1
5036 ; SSE-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm5[8],xmm6[9],xmm5[9],xmm6[10],xmm5[10],xmm6[11],xmm5[11],xmm6[12],xmm5[12],xmm6[13],xmm5[13],xmm6[14],xmm5[14],xmm6[15],xmm5[15]
5037 ; SSE-NEXT: pshuflw {{.*#+}} xmm14 = xmm6[3,1,2,3,4,5,6,7]
5038 ; SSE-NEXT: pshufd {{.*#+}} xmm14 = xmm14[0,1,0,3]
5039 ; SSE-NEXT: pshufhw {{.*#+}} xmm14 = xmm14[0,1,2,3,5,7,6,4]
5040 ; SSE-NEXT: pand %xmm2, %xmm14
5041 ; SSE-NEXT: por %xmm1, %xmm14
5042 ; SSE-NEXT: packuswb %xmm14, %xmm1
5043 ; SSE-NEXT: movdqa %xmm10, %xmm14
5044 ; SSE-NEXT: pandn %xmm1, %xmm14
5045 ; SSE-NEXT: pand %xmm10, %xmm4
5046 ; SSE-NEXT: por %xmm4, %xmm14
5047 ; SSE-NEXT: movdqa %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5048 ; SSE-NEXT: movdqa %xmm12, %xmm1
5049 ; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm5[8],xmm1[9],xmm5[9],xmm1[10],xmm5[10],xmm1[11],xmm5[11],xmm1[12],xmm5[12],xmm1[13],xmm5[13],xmm1[14],xmm5[14],xmm1[15],xmm5[15]
5050 ; SSE-NEXT: punpcklbw {{.*#+}} xmm12 = xmm12[0],xmm5[0],xmm12[1],xmm5[1],xmm12[2],xmm5[2],xmm12[3],xmm5[3],xmm12[4],xmm5[4],xmm12[5],xmm5[5],xmm12[6],xmm5[6],xmm12[7],xmm5[7]
5051 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm12[2,2,3,3]
5052 ; SSE-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3]
5053 ; SSE-NEXT: psrld $16, %xmm1
5054 ; SSE-NEXT: pshufd {{.*#+}} xmm12 = xmm12[0,1,0,3]
5055 ; SSE-NEXT: pshufhw {{.*#+}} xmm12 = xmm12[0,1,2,3,5,7,6,7]
5056 ; SSE-NEXT: punpckhdq {{.*#+}} xmm12 = xmm12[2],xmm1[2],xmm12[3],xmm1[3]
5057 ; SSE-NEXT: packuswb %xmm12, %xmm4
5058 ; SSE-NEXT: movdqa %xmm8, %xmm14
5059 ; SSE-NEXT: movdqa %xmm8, %xmm1
5060 ; SSE-NEXT: pandn %xmm4, %xmm1
5061 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
5062 ; SSE-NEXT: movdqa %xmm6, %xmm4
5063 ; SSE-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm5[8],xmm4[9],xmm5[9],xmm4[10],xmm5[10],xmm4[11],xmm5[11],xmm4[12],xmm5[12],xmm4[13],xmm5[13],xmm4[14],xmm5[14],xmm4[15],xmm5[15]
5064 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[2,1,0,3]
5065 ; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[1,1,1,1,4,5,6,7]
5066 ; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,5,7,6,7]
5067 ; SSE-NEXT: movdqa %xmm0, %xmm12
5068 ; SSE-NEXT: pandn %xmm4, %xmm12
5069 ; SSE-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3],xmm6[4],xmm5[4],xmm6[5],xmm5[5],xmm6[6],xmm5[6],xmm6[7],xmm5[7]
5070 ; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm6[3,1,2,3,4,5,6,7]
5071 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,3,2,3]
5072 ; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[1,3,2,0,4,5,6,7]
5073 ; SSE-NEXT: pand %xmm0, %xmm4
5074 ; SSE-NEXT: por %xmm12, %xmm4
5075 ; SSE-NEXT: packuswb %xmm4, %xmm4
5076 ; SSE-NEXT: pand %xmm8, %xmm4
5077 ; SSE-NEXT: por %xmm1, %xmm4
5078 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
5079 ; SSE-NEXT: movdqa %xmm6, %xmm1
5080 ; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1],xmm1[2],xmm5[2],xmm1[3],xmm5[3],xmm1[4],xmm5[4],xmm1[5],xmm5[5],xmm1[6],xmm5[6],xmm1[7],xmm5[7]
5081 ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,5,5,5]
5082 ; SSE-NEXT: movdqa %xmm2, %xmm12
5083 ; SSE-NEXT: pandn %xmm1, %xmm12
5084 ; SSE-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm5[8],xmm6[9],xmm5[9],xmm6[10],xmm5[10],xmm6[11],xmm5[11],xmm6[12],xmm5[12],xmm6[13],xmm5[13],xmm6[14],xmm5[14],xmm6[15],xmm5[15]
5085 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm6[3,1,2,3,4,5,6,7]
5086 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,3]
5087 ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,7,6,4]
5088 ; SSE-NEXT: pand %xmm2, %xmm1
5089 ; SSE-NEXT: por %xmm12, %xmm1
5090 ; SSE-NEXT: packuswb %xmm1, %xmm1
5091 ; SSE-NEXT: movdqa %xmm10, %xmm12
5092 ; SSE-NEXT: pandn %xmm1, %xmm12
5093 ; SSE-NEXT: pand %xmm10, %xmm4
5094 ; SSE-NEXT: por %xmm4, %xmm12
5095 ; SSE-NEXT: movdqa %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5096 ; SSE-NEXT: movdqa %xmm11, %xmm1
5097 ; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm5[8],xmm1[9],xmm5[9],xmm1[10],xmm5[10],xmm1[11],xmm5[11],xmm1[12],xmm5[12],xmm1[13],xmm5[13],xmm1[14],xmm5[14],xmm1[15],xmm5[15]
5098 ; SSE-NEXT: punpcklbw {{.*#+}} xmm11 = xmm11[0],xmm5[0],xmm11[1],xmm5[1],xmm11[2],xmm5[2],xmm11[3],xmm5[3],xmm11[4],xmm5[4],xmm11[5],xmm5[5],xmm11[6],xmm5[6],xmm11[7],xmm5[7]
5099 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm11[2,2,3,3]
5100 ; SSE-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3]
5101 ; SSE-NEXT: psrld $16, %xmm1
5102 ; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm11[0,1,0,3]
5103 ; SSE-NEXT: pshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,5,7,6,7]
5104 ; SSE-NEXT: punpckhdq {{.*#+}} xmm8 = xmm8[2],xmm1[2],xmm8[3],xmm1[3]
5105 ; SSE-NEXT: packuswb %xmm8, %xmm4
5106 ; SSE-NEXT: movdqa %xmm14, %xmm1
5107 ; SSE-NEXT: pandn %xmm4, %xmm1
5108 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
5109 ; SSE-NEXT: movdqa %xmm6, %xmm4
5110 ; SSE-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm5[8],xmm4[9],xmm5[9],xmm4[10],xmm5[10],xmm4[11],xmm5[11],xmm4[12],xmm5[12],xmm4[13],xmm5[13],xmm4[14],xmm5[14],xmm4[15],xmm5[15]
5111 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[2,1,0,3]
5112 ; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[1,1,1,1,4,5,6,7]
5113 ; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,5,7,6,7]
5114 ; SSE-NEXT: movdqa %xmm0, %xmm8
5115 ; SSE-NEXT: pandn %xmm4, %xmm8
5116 ; SSE-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3],xmm6[4],xmm5[4],xmm6[5],xmm5[5],xmm6[6],xmm5[6],xmm6[7],xmm5[7]
5117 ; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm6[3,1,2,3,4,5,6,7]
5118 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,3,2,3]
5119 ; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[1,3,2,0,4,5,6,7]
5120 ; SSE-NEXT: pand %xmm0, %xmm4
5121 ; SSE-NEXT: por %xmm8, %xmm4
5122 ; SSE-NEXT: packuswb %xmm4, %xmm4
5123 ; SSE-NEXT: pand %xmm14, %xmm4
5124 ; SSE-NEXT: por %xmm1, %xmm4
5125 ; SSE-NEXT: movdqa %xmm15, %xmm1
5126 ; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1],xmm1[2],xmm5[2],xmm1[3],xmm5[3],xmm1[4],xmm5[4],xmm1[5],xmm5[5],xmm1[6],xmm5[6],xmm1[7],xmm5[7]
5127 ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,5,5,5]
5128 ; SSE-NEXT: movdqa %xmm2, %xmm8
5129 ; SSE-NEXT: pandn %xmm1, %xmm8
5130 ; SSE-NEXT: punpckhbw {{.*#+}} xmm15 = xmm15[8],xmm5[8],xmm15[9],xmm5[9],xmm15[10],xmm5[10],xmm15[11],xmm5[11],xmm15[12],xmm5[12],xmm15[13],xmm5[13],xmm15[14],xmm5[14],xmm15[15],xmm5[15]
5131 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm15[3,1,2,3,4,5,6,7]
5132 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,3]
5133 ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,7,6,4]
5134 ; SSE-NEXT: pand %xmm2, %xmm1
5135 ; SSE-NEXT: por %xmm8, %xmm1
5136 ; SSE-NEXT: packuswb %xmm1, %xmm1
5137 ; SSE-NEXT: movdqa %xmm10, %xmm8
5138 ; SSE-NEXT: pandn %xmm1, %xmm8
5139 ; SSE-NEXT: pand %xmm10, %xmm4
5140 ; SSE-NEXT: por %xmm4, %xmm8
5141 ; SSE-NEXT: movdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5142 ; SSE-NEXT: movdqa %xmm9, %xmm1
5143 ; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm5[8],xmm1[9],xmm5[9],xmm1[10],xmm5[10],xmm1[11],xmm5[11],xmm1[12],xmm5[12],xmm1[13],xmm5[13],xmm1[14],xmm5[14],xmm1[15],xmm5[15]
5144 ; SSE-NEXT: punpcklbw {{.*#+}} xmm9 = xmm9[0],xmm5[0],xmm9[1],xmm5[1],xmm9[2],xmm5[2],xmm9[3],xmm5[3],xmm9[4],xmm5[4],xmm9[5],xmm5[5],xmm9[6],xmm5[6],xmm9[7],xmm5[7]
5145 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm9[2,2,3,3]
5146 ; SSE-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3]
5147 ; SSE-NEXT: psrld $16, %xmm1
5148 ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm9[0,1,0,3]
5149 ; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,5,7,6,7]
5150 ; SSE-NEXT: punpckhdq {{.*#+}} xmm6 = xmm6[2],xmm1[2],xmm6[3],xmm1[3]
5151 ; SSE-NEXT: packuswb %xmm6, %xmm4
5152 ; SSE-NEXT: movdqa %xmm3, %xmm1
5153 ; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm5[8],xmm1[9],xmm5[9],xmm1[10],xmm5[10],xmm1[11],xmm5[11],xmm1[12],xmm5[12],xmm1[13],xmm5[13],xmm1[14],xmm5[14],xmm1[15],xmm5[15]
5154 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,1,0,3]
5155 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[1,1,1,1,4,5,6,7]
5156 ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,7,6,7]
5157 ; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1],xmm3[2],xmm5[2],xmm3[3],xmm5[3],xmm3[4],xmm5[4],xmm3[5],xmm5[5],xmm3[6],xmm5[6],xmm3[7],xmm5[7]
5158 ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[3,1,2,3,4,5,6,7]
5159 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,3,2,3]
5160 ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[1,3,2,0,4,5,6,7]
5161 ; SSE-NEXT: pand %xmm0, %xmm3
5162 ; SSE-NEXT: pandn %xmm1, %xmm0
5163 ; SSE-NEXT: por %xmm3, %xmm0
5164 ; SSE-NEXT: packuswb %xmm0, %xmm0
5165 ; SSE-NEXT: movdqa %xmm14, %xmm1
5166 ; SSE-NEXT: pand %xmm14, %xmm0
5167 ; SSE-NEXT: pandn %xmm4, %xmm1
5168 ; SSE-NEXT: por %xmm1, %xmm0
5169 ; SSE-NEXT: movdqa %xmm7, %xmm1
5170 ; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1],xmm1[2],xmm5[2],xmm1[3],xmm5[3],xmm1[4],xmm5[4],xmm1[5],xmm5[5],xmm1[6],xmm5[6],xmm1[7],xmm5[7]
5171 ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,5,5,5]
5172 ; SSE-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm5[8],xmm7[9],xmm5[9],xmm7[10],xmm5[10],xmm7[11],xmm5[11],xmm7[12],xmm5[12],xmm7[13],xmm5[13],xmm7[14],xmm5[14],xmm7[15],xmm5[15]
5173 ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm7[3,1,2,3,4,5,6,7]
5174 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,1,0,3]
5175 ; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,5,7,6,4]
5176 ; SSE-NEXT: pand %xmm2, %xmm3
5177 ; SSE-NEXT: pandn %xmm1, %xmm2
5178 ; SSE-NEXT: por %xmm3, %xmm2
5179 ; SSE-NEXT: packuswb %xmm2, %xmm1
5180 ; SSE-NEXT: movdqa %xmm10, %xmm2
5181 ; SSE-NEXT: pandn %xmm1, %xmm2
5182 ; SSE-NEXT: pand %xmm10, %xmm0
5183 ; SSE-NEXT: movdqa %xmm10, %xmm11
5184 ; SSE-NEXT: por %xmm0, %xmm2
5185 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5186 ; SSE-NEXT: movdqa {{.*#+}} xmm14 = [65535,0,65535,65535,0,65535,65535,0]
5187 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
5188 ; SSE-NEXT: pand %xmm14, %xmm4
5189 ; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
5190 ; SSE-NEXT: movdqa %xmm4, %xmm0
5191 ; SSE-NEXT: movdqa {{.*#+}} xmm10 = [255,255,255,255,255,255,255,255]
5192 ; SSE-NEXT: pand %xmm10, %xmm0
5193 ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm0[0,1,2,3,4,7,6,7]
5194 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
5195 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[1,2,3,3,4,5,6,7]
5196 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,3,2,3,4,5,6,7]
5197 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,3]
5198 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,5,6]
5199 ; SSE-NEXT: packuswb %xmm1, %xmm0
5200 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
5201 ; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
5202 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm6[2,1,2,3,4,5,6,7]
5203 ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,4,7]
5204 ; SSE-NEXT: pand %xmm10, %xmm1
5205 ; SSE-NEXT: movdqa %xmm10, %xmm12
5206 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,3,2,3]
5207 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[1,2,3,0,4,5,6,7]
5208 ; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm1[0,1,2,3,5,5,5,5]
5209 ; SSE-NEXT: packuswb %xmm2, %xmm2
5210 ; SSE-NEXT: movdqa {{.*#+}} xmm5 = [0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255]
5211 ; SSE-NEXT: movdqa %xmm5, %xmm3
5212 ; SSE-NEXT: pandn %xmm2, %xmm3
5213 ; SSE-NEXT: pand %xmm5, %xmm0
5214 ; SSE-NEXT: por %xmm0, %xmm3
5215 ; SSE-NEXT: movdqa %xmm13, %xmm0
5216 ; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
5217 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
5218 ; SSE-NEXT: por %xmm0, %xmm1
5219 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5220 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm1[0,3,2,3,4,5,6,7]
5221 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,6,7]
5222 ; SSE-NEXT: pand %xmm10, %xmm0
5223 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,0]
5224 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[2,2,2,2,4,5,6,7]
5225 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,6,7,4]
5226 ; SSE-NEXT: packuswb %xmm0, %xmm0
5227 ; SSE-NEXT: movdqa %xmm11, %xmm2
5228 ; SSE-NEXT: pandn %xmm0, %xmm2
5229 ; SSE-NEXT: pand %xmm11, %xmm3
5230 ; SSE-NEXT: movdqa %xmm11, %xmm8
5231 ; SSE-NEXT: por %xmm3, %xmm2
5232 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5233 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
5234 ; SSE-NEXT: pand %xmm14, %xmm13
5235 ; SSE-NEXT: movdqa %xmm14, %xmm7
5236 ; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Folded Reload
5237 ; SSE-NEXT: movdqa %xmm13, %xmm0
5238 ; SSE-NEXT: pand %xmm10, %xmm0
5239 ; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm0[0,1,2,3,4,7,6,7]
5240 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
5241 ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[1,2,3,3,4,5,6,7]
5242 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,3,2,3,4,5,6,7]
5243 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,3]
5244 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,5,6]
5245 ; SSE-NEXT: packuswb %xmm2, %xmm0
5246 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
5247 ; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Folded Reload
5248 ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm14[2,1,2,3,4,5,6,7]
5249 ; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,4,7]
5250 ; SSE-NEXT: pand %xmm10, %xmm2
5251 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,3,2,3]
5252 ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[1,2,3,0,4,5,6,7]
5253 ; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,5,5,5,5]
5254 ; SSE-NEXT: packuswb %xmm2, %xmm2
5255 ; SSE-NEXT: movdqa %xmm5, %xmm3
5256 ; SSE-NEXT: pandn %xmm2, %xmm3
5257 ; SSE-NEXT: pand %xmm5, %xmm0
5258 ; SSE-NEXT: por %xmm0, %xmm3
5259 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5260 ; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
5261 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
5262 ; SSE-NEXT: por %xmm0, %xmm11
5263 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm11[0,3,2,3,4,5,6,7]
5264 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,6,7]
5265 ; SSE-NEXT: pand %xmm10, %xmm0
5266 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,0]
5267 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[2,2,2,2,4,5,6,7]
5268 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,6,7,4]
5269 ; SSE-NEXT: packuswb %xmm0, %xmm0
5270 ; SSE-NEXT: movdqa %xmm8, %xmm2
5271 ; SSE-NEXT: pandn %xmm0, %xmm2
5272 ; SSE-NEXT: pand %xmm8, %xmm3
5273 ; SSE-NEXT: movdqa %xmm8, %xmm9
5274 ; SSE-NEXT: por %xmm3, %xmm2
5275 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5276 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
5277 ; SSE-NEXT: pand %xmm7, %xmm10
5278 ; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Folded Reload
5279 ; SSE-NEXT: movdqa %xmm10, %xmm0
5280 ; SSE-NEXT: pand %xmm12, %xmm0
5281 ; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm0[0,1,2,3,4,7,6,7]
5282 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
5283 ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[1,2,3,3,4,5,6,7]
5284 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,3,2,3,4,5,6,7]
5285 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,3]
5286 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,5,6]
5287 ; SSE-NEXT: packuswb %xmm2, %xmm0
5288 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
5289 ; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
5290 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5291 ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[2,1,2,3,4,5,6,7]
5292 ; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,4,7]
5293 ; SSE-NEXT: pand %xmm12, %xmm2
5294 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,3,2,3]
5295 ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[1,2,3,0,4,5,6,7]
5296 ; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,5,5,5,5]
5297 ; SSE-NEXT: packuswb %xmm2, %xmm2
5298 ; SSE-NEXT: movdqa %xmm5, %xmm3
5299 ; SSE-NEXT: pandn %xmm2, %xmm3
5300 ; SSE-NEXT: pand %xmm5, %xmm0
5301 ; SSE-NEXT: por %xmm0, %xmm3
5302 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5303 ; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
5304 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
5305 ; SSE-NEXT: por %xmm0, %xmm8
5306 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm8[0,3,2,3,4,5,6,7]
5307 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,6,7]
5308 ; SSE-NEXT: pand %xmm12, %xmm0
5309 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,0]
5310 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[2,2,2,2,4,5,6,7]
5311 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,6,7,4]
5312 ; SSE-NEXT: packuswb %xmm0, %xmm0
5313 ; SSE-NEXT: movdqa %xmm9, %xmm2
5314 ; SSE-NEXT: pandn %xmm0, %xmm2
5315 ; SSE-NEXT: pand %xmm9, %xmm3
5316 ; SSE-NEXT: movdqa %xmm9, %xmm1
5317 ; SSE-NEXT: por %xmm3, %xmm2
5318 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5319 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5320 ; SSE-NEXT: pand %xmm7, %xmm0
5321 ; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
5322 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5323 ; SSE-NEXT: pand %xmm12, %xmm0
5324 ; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm0[0,1,2,3,4,7,6,7]
5325 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
5326 ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[1,2,3,3,4,5,6,7]
5327 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,3,2,3,4,5,6,7]
5328 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,3]
5329 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,5,6]
5330 ; SSE-NEXT: packuswb %xmm2, %xmm0
5331 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
5332 ; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Folded Reload
5333 ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm15[2,1,2,3,4,5,6,7]
5334 ; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,4,7]
5335 ; SSE-NEXT: pand %xmm12, %xmm2
5336 ; SSE-NEXT: movdqa %xmm12, %xmm9
5337 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,3,2,3]
5338 ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[1,2,3,0,4,5,6,7]
5339 ; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,5,5,5,5]
5340 ; SSE-NEXT: packuswb %xmm2, %xmm2
5341 ; SSE-NEXT: movdqa %xmm5, %xmm3
5342 ; SSE-NEXT: pandn %xmm2, %xmm3
5343 ; SSE-NEXT: pand %xmm5, %xmm0
5344 ; SSE-NEXT: por %xmm0, %xmm3
5345 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5346 ; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
5347 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
5348 ; SSE-NEXT: por %xmm0, %xmm12
5349 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm12[0,3,2,3,4,5,6,7]
5350 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,6,7]
5351 ; SSE-NEXT: pand %xmm9, %xmm0
5352 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,0]
5353 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[2,2,2,2,4,5,6,7]
5354 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,6,7,4]
5355 ; SSE-NEXT: packuswb %xmm0, %xmm0
5356 ; SSE-NEXT: movdqa %xmm1, %xmm9
5357 ; SSE-NEXT: movdqa %xmm1, %xmm2
5358 ; SSE-NEXT: pandn %xmm0, %xmm2
5359 ; SSE-NEXT: pand %xmm1, %xmm3
5360 ; SSE-NEXT: por %xmm3, %xmm2
5361 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5362 ; SSE-NEXT: movdqa %xmm4, %xmm0
5363 ; SSE-NEXT: pxor %xmm1, %xmm1
5364 ; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
5365 ; SSE-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm1[8],xmm4[9],xmm1[9],xmm4[10],xmm1[10],xmm4[11],xmm1[11],xmm4[12],xmm1[12],xmm4[13],xmm1[13],xmm4[14],xmm1[14],xmm4[15],xmm1[15]
5366 ; SSE-NEXT: pxor %xmm7, %xmm7
5367 ; SSE-NEXT: movdqa %xmm4, %xmm2
5368 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[2,0],xmm0[3,0]
5369 ; SSE-NEXT: movaps %xmm0, %xmm3
5370 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,1],xmm2[0,2]
5371 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm4[0,0]
5372 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm4[2,3]
5373 ; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm3[0,1,2,3,7,5,6,7]
5374 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,0,2]
5375 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,1,2,3,4,5,6,7]
5376 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,3,2,3]
5377 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,3,3,4,5,6,7]
5378 ; SSE-NEXT: packuswb %xmm0, %xmm2
5379 ; SSE-NEXT: movdqa %xmm6, %xmm0
5380 ; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm7[0],xmm0[1],xmm7[1],xmm0[2],xmm7[2],xmm0[3],xmm7[3],xmm0[4],xmm7[4],xmm0[5],xmm7[5],xmm0[6],xmm7[6],xmm0[7],xmm7[7]
5381 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,1,2,3]
5382 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,1,2,1,4,5,6,7]
5383 ; SSE-NEXT: movdqa {{.*#+}} xmm3 = [0,65535,65535,0,65535,65535,65535,65535]
5384 ; SSE-NEXT: movdqa %xmm3, %xmm4
5385 ; SSE-NEXT: pandn %xmm0, %xmm4
5386 ; SSE-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm7[8],xmm6[9],xmm7[9],xmm6[10],xmm7[10],xmm6[11],xmm7[11],xmm6[12],xmm7[12],xmm6[13],xmm7[13],xmm6[14],xmm7[14],xmm6[15],xmm7[15]
5387 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm6[0,3,2,1]
5388 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,3,3,4,5,6,7]
5389 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,7,7,7]
5390 ; SSE-NEXT: pand %xmm3, %xmm0
5391 ; SSE-NEXT: por %xmm4, %xmm0
5392 ; SSE-NEXT: packuswb %xmm0, %xmm0
5393 ; SSE-NEXT: movdqa %xmm5, %xmm6
5394 ; SSE-NEXT: pandn %xmm0, %xmm6
5395 ; SSE-NEXT: pand %xmm5, %xmm2
5396 ; SSE-NEXT: por %xmm2, %xmm6
5397 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
5398 ; SSE-NEXT: movdqa %xmm1, %xmm0
5399 ; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm7[8],xmm0[9],xmm7[9],xmm0[10],xmm7[10],xmm0[11],xmm7[11],xmm0[12],xmm7[12],xmm0[13],xmm7[13],xmm0[14],xmm7[14],xmm0[15],xmm7[15]
5400 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,1]
5401 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,5,6,5]
5402 ; SSE-NEXT: movdqa {{.*#+}} xmm4 = [65535,65535,65535,65535,0,65535,65535,0]
5403 ; SSE-NEXT: movdqa %xmm4, %xmm2
5404 ; SSE-NEXT: pandn %xmm0, %xmm2
5405 ; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm7[0],xmm1[1],xmm7[1],xmm1[2],xmm7[2],xmm1[3],xmm7[3],xmm1[4],xmm7[4],xmm1[5],xmm7[5],xmm1[6],xmm7[6],xmm1[7],xmm7[7]
5406 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,2,0,3]
5407 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,7,7]
5408 ; SSE-NEXT: pand %xmm4, %xmm0
5409 ; SSE-NEXT: por %xmm2, %xmm0
5410 ; SSE-NEXT: packuswb %xmm0, %xmm0
5411 ; SSE-NEXT: movdqa %xmm9, %xmm2
5412 ; SSE-NEXT: pandn %xmm0, %xmm2
5413 ; SSE-NEXT: pand %xmm9, %xmm6
5414 ; SSE-NEXT: por %xmm6, %xmm2
5415 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5416 ; SSE-NEXT: movdqa %xmm13, %xmm0
5417 ; SSE-NEXT: pxor %xmm1, %xmm1
5418 ; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
5419 ; SSE-NEXT: punpckhbw {{.*#+}} xmm13 = xmm13[8],xmm1[8],xmm13[9],xmm1[9],xmm13[10],xmm1[10],xmm13[11],xmm1[11],xmm13[12],xmm1[12],xmm13[13],xmm1[13],xmm13[14],xmm1[14],xmm13[15],xmm1[15]
5420 ; SSE-NEXT: movdqa %xmm13, %xmm2
5421 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[2,0],xmm0[3,0]
5422 ; SSE-NEXT: movaps %xmm0, %xmm6
5423 ; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,1],xmm2[0,2]
5424 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm13[0,0]
5425 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm13[2,3]
5426 ; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm6[0,1,2,3,7,5,6,7]
5427 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,0,2]
5428 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,1,2,3,4,5,6,7]
5429 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,3,2,3]
5430 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,3,3,4,5,6,7]
5431 ; SSE-NEXT: packuswb %xmm0, %xmm2
5432 ; SSE-NEXT: movdqa %xmm14, %xmm0
5433 ; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
5434 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,1,2,3]
5435 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,1,2,1,4,5,6,7]
5436 ; SSE-NEXT: movdqa %xmm3, %xmm6
5437 ; SSE-NEXT: pandn %xmm0, %xmm6
5438 ; SSE-NEXT: punpckhbw {{.*#+}} xmm14 = xmm14[8],xmm1[8],xmm14[9],xmm1[9],xmm14[10],xmm1[10],xmm14[11],xmm1[11],xmm14[12],xmm1[12],xmm14[13],xmm1[13],xmm14[14],xmm1[14],xmm14[15],xmm1[15]
5439 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm14[0,3,2,1]
5440 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,3,3,4,5,6,7]
5441 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,7,7,7]
5442 ; SSE-NEXT: pand %xmm3, %xmm0
5443 ; SSE-NEXT: por %xmm6, %xmm0
5444 ; SSE-NEXT: packuswb %xmm0, %xmm0
5445 ; SSE-NEXT: movdqa %xmm5, %xmm6
5446 ; SSE-NEXT: pandn %xmm0, %xmm6
5447 ; SSE-NEXT: pand %xmm5, %xmm2
5448 ; SSE-NEXT: por %xmm2, %xmm6
5449 ; SSE-NEXT: movdqa %xmm11, %xmm0
5450 ; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
5451 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,1]
5452 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,5,6,5]
5453 ; SSE-NEXT: movdqa %xmm4, %xmm2
5454 ; SSE-NEXT: pandn %xmm0, %xmm2
5455 ; SSE-NEXT: punpcklbw {{.*#+}} xmm11 = xmm11[0],xmm1[0],xmm11[1],xmm1[1],xmm11[2],xmm1[2],xmm11[3],xmm1[3],xmm11[4],xmm1[4],xmm11[5],xmm1[5],xmm11[6],xmm1[6],xmm11[7],xmm1[7]
5456 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm11[0,2,0,3]
5457 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,7,7]
5458 ; SSE-NEXT: pand %xmm4, %xmm0
5459 ; SSE-NEXT: por %xmm2, %xmm0
5460 ; SSE-NEXT: packuswb %xmm0, %xmm0
5461 ; SSE-NEXT: movdqa %xmm9, %xmm2
5462 ; SSE-NEXT: pandn %xmm0, %xmm2
5463 ; SSE-NEXT: pand %xmm9, %xmm6
5464 ; SSE-NEXT: por %xmm6, %xmm2
5465 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5466 ; SSE-NEXT: movdqa %xmm10, %xmm0
5467 ; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
5468 ; SSE-NEXT: punpckhbw {{.*#+}} xmm10 = xmm10[8],xmm1[8],xmm10[9],xmm1[9],xmm10[10],xmm1[10],xmm10[11],xmm1[11],xmm10[12],xmm1[12],xmm10[13],xmm1[13],xmm10[14],xmm1[14],xmm10[15],xmm1[15]
5469 ; SSE-NEXT: movdqa %xmm10, %xmm2
5470 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[2,0],xmm0[3,0]
5471 ; SSE-NEXT: movaps %xmm0, %xmm6
5472 ; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,1],xmm2[0,2]
5473 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm10[0,0]
5474 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm10[2,3]
5475 ; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm6[0,1,2,3,7,5,6,7]
5476 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,0,2]
5477 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,1,2,3,4,5,6,7]
5478 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,3,2,3]
5479 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,3,3,4,5,6,7]
5480 ; SSE-NEXT: packuswb %xmm0, %xmm2
5481 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
5482 ; SSE-NEXT: movdqa %xmm7, %xmm0
5483 ; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
5484 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,1,2,3]
5485 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,1,2,1,4,5,6,7]
5486 ; SSE-NEXT: movdqa %xmm3, %xmm6
5487 ; SSE-NEXT: pandn %xmm0, %xmm6
5488 ; SSE-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm1[8],xmm7[9],xmm1[9],xmm7[10],xmm1[10],xmm7[11],xmm1[11],xmm7[12],xmm1[12],xmm7[13],xmm1[13],xmm7[14],xmm1[14],xmm7[15],xmm1[15]
5489 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm7[0,3,2,1]
5490 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,3,3,4,5,6,7]
5491 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,7,7,7]
5492 ; SSE-NEXT: pand %xmm3, %xmm0
5493 ; SSE-NEXT: por %xmm6, %xmm0
5494 ; SSE-NEXT: packuswb %xmm0, %xmm0
5495 ; SSE-NEXT: movdqa %xmm5, %xmm6
5496 ; SSE-NEXT: pandn %xmm0, %xmm6
5497 ; SSE-NEXT: pand %xmm5, %xmm2
5498 ; SSE-NEXT: por %xmm2, %xmm6
5499 ; SSE-NEXT: movdqa %xmm8, %xmm0
5500 ; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
5501 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,1]
5502 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,5,6,5]
5503 ; SSE-NEXT: movdqa %xmm4, %xmm2
5504 ; SSE-NEXT: pandn %xmm0, %xmm2
5505 ; SSE-NEXT: punpcklbw {{.*#+}} xmm8 = xmm8[0],xmm1[0],xmm8[1],xmm1[1],xmm8[2],xmm1[2],xmm8[3],xmm1[3],xmm8[4],xmm1[4],xmm8[5],xmm1[5],xmm8[6],xmm1[6],xmm8[7],xmm1[7]
5506 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm8[0,2,0,3]
5507 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,7,7]
5508 ; SSE-NEXT: pand %xmm4, %xmm0
5509 ; SSE-NEXT: por %xmm2, %xmm0
5510 ; SSE-NEXT: packuswb %xmm0, %xmm2
5511 ; SSE-NEXT: movdqa %xmm9, %xmm0
5512 ; SSE-NEXT: pandn %xmm2, %xmm0
5513 ; SSE-NEXT: pand %xmm9, %xmm6
5514 ; SSE-NEXT: por %xmm6, %xmm0
5515 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5516 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5517 ; SSE-NEXT: movdqa %xmm0, %xmm2
5518 ; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
5519 ; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
5520 ; SSE-NEXT: movdqa %xmm0, %xmm6
5521 ; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[2,0],xmm2[3,0]
5522 ; SSE-NEXT: movaps %xmm2, %xmm7
5523 ; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,1],xmm6[0,2]
5524 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,0],xmm0[0,0]
5525 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[2,0],xmm0[2,3]
5526 ; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm7[0,1,2,3,7,5,6,7]
5527 ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,1,0,2]
5528 ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[3,1,2,3,4,5,6,7]
5529 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,3,2,3]
5530 ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,1,3,3,4,5,6,7]
5531 ; SSE-NEXT: packuswb %xmm2, %xmm6
5532 ; SSE-NEXT: movdqa %xmm15, %xmm2
5533 ; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
5534 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,1,2,3]
5535 ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[3,1,2,1,4,5,6,7]
5536 ; SSE-NEXT: punpckhbw {{.*#+}} xmm15 = xmm15[8],xmm1[8],xmm15[9],xmm1[9],xmm15[10],xmm1[10],xmm15[11],xmm1[11],xmm15[12],xmm1[12],xmm15[13],xmm1[13],xmm15[14],xmm1[14],xmm15[15],xmm1[15]
5537 ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm15[0,3,2,1]
5538 ; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm7[0,1,3,3,4,5,6,7]
5539 ; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,7,7,7,7]
5540 ; SSE-NEXT: pand %xmm3, %xmm7
5541 ; SSE-NEXT: pandn %xmm2, %xmm3
5542 ; SSE-NEXT: por %xmm7, %xmm3
5543 ; SSE-NEXT: pand %xmm5, %xmm6
5544 ; SSE-NEXT: packuswb %xmm3, %xmm3
5545 ; SSE-NEXT: pandn %xmm3, %xmm5
5546 ; SSE-NEXT: por %xmm6, %xmm5
5547 ; SSE-NEXT: movdqa %xmm12, %xmm2
5548 ; SSE-NEXT: pxor %xmm0, %xmm0
5549 ; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
5550 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,2,1]
5551 ; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,5,6,5]
5552 ; SSE-NEXT: punpcklbw {{.*#+}} xmm12 = xmm12[0],xmm0[0],xmm12[1],xmm0[1],xmm12[2],xmm0[2],xmm12[3],xmm0[3],xmm12[4],xmm0[4],xmm12[5],xmm0[5],xmm12[6],xmm0[6],xmm12[7],xmm0[7]
5553 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm12[0,2,0,3]
5554 ; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5,7,7]
5555 ; SSE-NEXT: pand %xmm4, %xmm3
5556 ; SSE-NEXT: pandn %xmm2, %xmm4
5557 ; SSE-NEXT: por %xmm3, %xmm4
5558 ; SSE-NEXT: pand %xmm9, %xmm5
5559 ; SSE-NEXT: packuswb %xmm4, %xmm2
5560 ; SSE-NEXT: pandn %xmm2, %xmm9
5561 ; SSE-NEXT: por %xmm5, %xmm9
5562 ; SSE-NEXT: movdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5563 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5564 ; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
5565 ; SSE-NEXT: movdqa %xmm0, %xmm1
5566 ; SSE-NEXT: movdqa {{.*#+}} xmm10 = [255,255,255,255,255,255,255,255]
5567 ; SSE-NEXT: pand %xmm10, %xmm1
5568 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,1,2,3]
5569 ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[2,1,2,3,4,5,6,7]
5570 ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,7,6,7]
5571 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,2]
5572 ; SSE-NEXT: packuswb %xmm2, %xmm1
5573 ; SSE-NEXT: movdqa {{.*#+}} xmm15 = [255,255,255,255,255,0,0,0,0,0,255,255,255,255,255,255]
5574 ; SSE-NEXT: movdqa %xmm15, %xmm2
5575 ; SSE-NEXT: pandn %xmm1, %xmm2
5576 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
5577 ; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Folded Reload
5578 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm12[3,1,2,0]
5579 ; SSE-NEXT: pand %xmm10, %xmm1
5580 ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,5,6,7]
5581 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,1,0,3]
5582 ; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm1[2,1,0,3,4,5,6,7]
5583 ; SSE-NEXT: packuswb %xmm6, %xmm6
5584 ; SSE-NEXT: pand %xmm15, %xmm6
5585 ; SSE-NEXT: por %xmm2, %xmm6
5586 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
5587 ; SSE-NEXT: movdqa {{.*#+}} xmm11 = [65535,0,65535,65535,0,65535,65535,0]
5588 ; SSE-NEXT: pand %xmm11, %xmm13
5589 ; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Folded Reload
5590 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm13[0,2,1,3]
5591 ; SSE-NEXT: pand %xmm10, %xmm1
5592 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,2,1,4,5,6,7]
5593 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,1,3]
5594 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,0,3,4,5,6,7]
5595 ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,5,4,7]
5596 ; SSE-NEXT: packuswb %xmm1, %xmm2
5597 ; SSE-NEXT: movdqa {{.*#+}} xmm1 = [65535,65535,65535,65535,65535,0,0,0]
5598 ; SSE-NEXT: movdqa %xmm1, %xmm3
5599 ; SSE-NEXT: pandn %xmm2, %xmm3
5600 ; SSE-NEXT: pand %xmm1, %xmm6
5601 ; SSE-NEXT: por %xmm6, %xmm3
5602 ; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5603 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
5604 ; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Folded Reload
5605 ; SSE-NEXT: movdqa %xmm14, %xmm2
5606 ; SSE-NEXT: pand %xmm10, %xmm2
5607 ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm2[2,1,2,3]
5608 ; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[2,1,2,3,4,5,6,7]
5609 ; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,7,6,7]
5610 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,0,2]
5611 ; SSE-NEXT: packuswb %xmm6, %xmm2
5612 ; SSE-NEXT: movdqa %xmm15, %xmm6
5613 ; SSE-NEXT: pandn %xmm2, %xmm6
5614 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
5615 ; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
5616 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm4[3,1,2,0]
5617 ; SSE-NEXT: pand %xmm10, %xmm2
5618 ; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,6,5,6,7]
5619 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,1,0,3]
5620 ; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm2[2,1,0,3,4,5,6,7]
5621 ; SSE-NEXT: packuswb %xmm7, %xmm7
5622 ; SSE-NEXT: pand %xmm15, %xmm7
5623 ; SSE-NEXT: por %xmm6, %xmm7
5624 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
5625 ; SSE-NEXT: pand %xmm11, %xmm3
5626 ; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
5627 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm3[0,2,1,3]
5628 ; SSE-NEXT: pand %xmm10, %xmm2
5629 ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,1,2,1,4,5,6,7]
5630 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,1,3]
5631 ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,1,0,3,4,5,6,7]
5632 ; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,6,5,4,7]
5633 ; SSE-NEXT: packuswb %xmm2, %xmm6
5634 ; SSE-NEXT: movdqa %xmm1, %xmm2
5635 ; SSE-NEXT: pandn %xmm6, %xmm2
5636 ; SSE-NEXT: pand %xmm1, %xmm7
5637 ; SSE-NEXT: por %xmm7, %xmm2
5638 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5639 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
5640 ; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
5641 ; SSE-NEXT: movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5642 ; SSE-NEXT: pand %xmm10, %xmm6
5643 ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm6[2,1,2,3]
5644 ; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm7[2,1,2,3,4,5,6,7]
5645 ; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,7,6,7]
5646 ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,1,0,2]
5647 ; SSE-NEXT: packuswb %xmm7, %xmm6
5648 ; SSE-NEXT: movdqa %xmm15, %xmm7
5649 ; SSE-NEXT: pandn %xmm6, %xmm7
5650 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
5651 ; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
5652 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5653 ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm2[3,1,2,0]
5654 ; SSE-NEXT: pand %xmm10, %xmm6
5655 ; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,6,5,6,7]
5656 ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[2,1,0,3]
5657 ; SSE-NEXT: pshuflw {{.*#+}} xmm8 = xmm6[2,1,0,3,4,5,6,7]
5658 ; SSE-NEXT: packuswb %xmm8, %xmm8
5659 ; SSE-NEXT: pand %xmm15, %xmm8
5660 ; SSE-NEXT: por %xmm7, %xmm8
5661 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
5662 ; SSE-NEXT: pand %xmm11, %xmm2
5663 ; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
5664 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5665 ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm2[0,2,1,3]
5666 ; SSE-NEXT: pand %xmm10, %xmm6
5667 ; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[0,1,2,1,4,5,6,7]
5668 ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,2,1,3]
5669 ; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[0,1,0,3,4,5,6,7]
5670 ; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,6,5,4,7]
5671 ; SSE-NEXT: packuswb %xmm6, %xmm7
5672 ; SSE-NEXT: movdqa %xmm1, %xmm2
5673 ; SSE-NEXT: pandn %xmm7, %xmm2
5674 ; SSE-NEXT: pand %xmm1, %xmm8
5675 ; SSE-NEXT: por %xmm8, %xmm2
5676 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5677 ; SSE-NEXT: movdqa (%rsp), %xmm7 # 16-byte Reload
5678 ; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload
5679 ; SSE-NEXT: movdqa %xmm7, (%rsp) # 16-byte Spill
5680 ; SSE-NEXT: pand %xmm10, %xmm7
5681 ; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm7[2,1,2,3]
5682 ; SSE-NEXT: pshuflw {{.*#+}} xmm8 = xmm8[2,1,2,3,4,5,6,7]
5683 ; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,7,6,7]
5684 ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm7[0,1,0,2]
5685 ; SSE-NEXT: packuswb %xmm8, %xmm7
5686 ; SSE-NEXT: movdqa %xmm15, %xmm8
5687 ; SSE-NEXT: pandn %xmm7, %xmm8
5688 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
5689 ; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
5690 ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm6[3,1,2,0]
5691 ; SSE-NEXT: pand %xmm10, %xmm7
5692 ; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,6,5,6,7]
5693 ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm7[2,1,0,3]
5694 ; SSE-NEXT: pshuflw {{.*#+}} xmm9 = xmm7[2,1,0,3,4,5,6,7]
5695 ; SSE-NEXT: packuswb %xmm9, %xmm9
5696 ; SSE-NEXT: pand %xmm15, %xmm9
5697 ; SSE-NEXT: por %xmm8, %xmm9
5698 ; SSE-NEXT: movdqa %xmm11, %xmm2
5699 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
5700 ; SSE-NEXT: pand %xmm11, %xmm7
5701 ; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
5702 ; SSE-NEXT: por %xmm7, %xmm2
5703 ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm2[0,2,1,3]
5704 ; SSE-NEXT: pand %xmm10, %xmm7
5705 ; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm7[0,1,2,1,4,5,6,7]
5706 ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm7[0,2,1,3]
5707 ; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm7[0,1,0,3,4,5,6,7]
5708 ; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,6,5,4,7]
5709 ; SSE-NEXT: packuswb %xmm7, %xmm8
5710 ; SSE-NEXT: movdqa %xmm1, %xmm7
5711 ; SSE-NEXT: pandn %xmm8, %xmm7
5712 ; SSE-NEXT: pand %xmm1, %xmm9
5713 ; SSE-NEXT: por %xmm9, %xmm7
5714 ; SSE-NEXT: movdqa %xmm0, %xmm8
5715 ; SSE-NEXT: pxor %xmm5, %xmm5
5716 ; SSE-NEXT: punpckhbw {{.*#+}} xmm8 = xmm8[8],xmm5[8],xmm8[9],xmm5[9],xmm8[10],xmm5[10],xmm8[11],xmm5[11],xmm8[12],xmm5[12],xmm8[13],xmm5[13],xmm8[14],xmm5[14],xmm8[15],xmm5[15]
5717 ; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3],xmm0[4],xmm5[4],xmm0[5],xmm5[5],xmm0[6],xmm5[6],xmm0[7],xmm5[7]
5718 ; SSE-NEXT: movdqa %xmm0, %xmm9
5719 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm8[0,0]
5720 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm8[2,3]
5721 ; SSE-NEXT: psrlq $48, %xmm8
5722 ; SSE-NEXT: psrldq {{.*#+}} xmm9 = xmm9[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
5723 ; SSE-NEXT: punpcklwd {{.*#+}} xmm9 = xmm9[0],xmm8[0],xmm9[1],xmm8[1],xmm9[2],xmm8[2],xmm9[3],xmm8[3]
5724 ; SSE-NEXT: pshuflw {{.*#+}} xmm8 = xmm0[3,1,2,3,4,5,6,7]
5725 ; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm8[0,1,0,3]
5726 ; SSE-NEXT: pshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,4,4,5,7]
5727 ; SSE-NEXT: packuswb %xmm9, %xmm8
5728 ; SSE-NEXT: movdqa %xmm15, %xmm10
5729 ; SSE-NEXT: pandn %xmm8, %xmm10
5730 ; SSE-NEXT: movdqa %xmm12, %xmm8
5731 ; SSE-NEXT: punpckhbw {{.*#+}} xmm8 = xmm8[8],xmm5[8],xmm8[9],xmm5[9],xmm8[10],xmm5[10],xmm8[11],xmm5[11],xmm8[12],xmm5[12],xmm8[13],xmm5[13],xmm8[14],xmm5[14],xmm8[15],xmm5[15]
5732 ; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm8[1,1,2,3]
5733 ; SSE-NEXT: pshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,5,5,5,5]
5734 ; SSE-NEXT: movdqa {{.*#+}} xmm0 = [65535,0,65535,65535,0,65535,65535,65535]
5735 ; SSE-NEXT: movdqa %xmm0, %xmm11
5736 ; SSE-NEXT: pandn %xmm8, %xmm11
5737 ; SSE-NEXT: punpcklbw {{.*#+}} xmm12 = xmm12[0],xmm5[0],xmm12[1],xmm5[1],xmm12[2],xmm5[2],xmm12[3],xmm5[3],xmm12[4],xmm5[4],xmm12[5],xmm5[5],xmm12[6],xmm5[6],xmm12[7],xmm5[7]
5738 ; SSE-NEXT: pshufhw {{.*#+}} xmm8 = xmm12[0,1,2,3,7,5,6,7]
5739 ; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm8[0,2,2,3]
5740 ; SSE-NEXT: pshuflw {{.*#+}} xmm12 = xmm8[3,1,1,2,4,5,6,7]
5741 ; SSE-NEXT: pand %xmm0, %xmm12
5742 ; SSE-NEXT: por %xmm11, %xmm12
5743 ; SSE-NEXT: packuswb %xmm12, %xmm12
5744 ; SSE-NEXT: pand %xmm15, %xmm12
5745 ; SSE-NEXT: por %xmm10, %xmm12
5746 ; SSE-NEXT: movdqa %xmm13, %xmm8
5747 ; SSE-NEXT: punpckhbw {{.*#+}} xmm13 = xmm13[8],xmm5[8],xmm13[9],xmm5[9],xmm13[10],xmm5[10],xmm13[11],xmm5[11],xmm13[12],xmm5[12],xmm13[13],xmm5[13],xmm13[14],xmm5[14],xmm13[15],xmm5[15]
5748 ; SSE-NEXT: pshufhw {{.*#+}} xmm10 = xmm13[0,1,2,3,7,5,6,7]
5749 ; SSE-NEXT: pshufd {{.*#+}} xmm10 = xmm10[0,1,2,0]
5750 ; SSE-NEXT: pshufhw {{.*#+}} xmm10 = xmm10[0,1,2,3,5,5,7,4]
5751 ; SSE-NEXT: movdqa {{.*#+}} xmm11 = [65535,65535,65535,65535,0,65535,0,0]
5752 ; SSE-NEXT: movdqa %xmm11, %xmm13
5753 ; SSE-NEXT: pandn %xmm10, %xmm13
5754 ; SSE-NEXT: punpcklbw {{.*#+}} xmm8 = xmm8[0],xmm5[0],xmm8[1],xmm5[1],xmm8[2],xmm5[2],xmm8[3],xmm5[3],xmm8[4],xmm5[4],xmm8[5],xmm5[5],xmm8[6],xmm5[6],xmm8[7],xmm5[7]
5755 ; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm8[0,3,1,1]
5756 ; SSE-NEXT: pshuflw {{.*#+}} xmm8 = xmm8[0,1,1,3,4,5,6,7]
5757 ; SSE-NEXT: pand %xmm11, %xmm8
5758 ; SSE-NEXT: por %xmm8, %xmm13
5759 ; SSE-NEXT: packuswb %xmm13, %xmm10
5760 ; SSE-NEXT: movdqa %xmm1, %xmm8
5761 ; SSE-NEXT: pandn %xmm10, %xmm8
5762 ; SSE-NEXT: pand %xmm1, %xmm12
5763 ; SSE-NEXT: por %xmm12, %xmm8
5764 ; SSE-NEXT: movdqa %xmm14, %xmm9
5765 ; SSE-NEXT: movdqa %xmm14, %xmm10
5766 ; SSE-NEXT: punpckhbw {{.*#+}} xmm10 = xmm10[8],xmm5[8],xmm10[9],xmm5[9],xmm10[10],xmm5[10],xmm10[11],xmm5[11],xmm10[12],xmm5[12],xmm10[13],xmm5[13],xmm10[14],xmm5[14],xmm10[15],xmm5[15]
5767 ; SSE-NEXT: punpcklbw {{.*#+}} xmm9 = xmm9[0],xmm5[0],xmm9[1],xmm5[1],xmm9[2],xmm5[2],xmm9[3],xmm5[3],xmm9[4],xmm5[4],xmm9[5],xmm5[5],xmm9[6],xmm5[6],xmm9[7],xmm5[7]
5768 ; SSE-NEXT: movdqa %xmm9, %xmm12
5769 ; SSE-NEXT: shufps {{.*#+}} xmm9 = xmm9[1,0],xmm10[0,0]
5770 ; SSE-NEXT: shufps {{.*#+}} xmm9 = xmm9[2,0],xmm10[2,3]
5771 ; SSE-NEXT: psrlq $48, %xmm10
5772 ; SSE-NEXT: psrldq {{.*#+}} xmm12 = xmm12[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
5773 ; SSE-NEXT: punpcklwd {{.*#+}} xmm12 = xmm12[0],xmm10[0],xmm12[1],xmm10[1],xmm12[2],xmm10[2],xmm12[3],xmm10[3]
5774 ; SSE-NEXT: pshuflw {{.*#+}} xmm10 = xmm9[3,1,2,3,4,5,6,7]
5775 ; SSE-NEXT: pshufd {{.*#+}} xmm10 = xmm10[0,1,0,3]
5776 ; SSE-NEXT: pshufhw {{.*#+}} xmm10 = xmm10[0,1,2,3,4,4,5,7]
5777 ; SSE-NEXT: packuswb %xmm12, %xmm10
5778 ; SSE-NEXT: movdqa %xmm15, %xmm12
5779 ; SSE-NEXT: pandn %xmm10, %xmm12
5780 ; SSE-NEXT: movdqa %xmm4, %xmm10
5781 ; SSE-NEXT: punpckhbw {{.*#+}} xmm10 = xmm10[8],xmm5[8],xmm10[9],xmm5[9],xmm10[10],xmm5[10],xmm10[11],xmm5[11],xmm10[12],xmm5[12],xmm10[13],xmm5[13],xmm10[14],xmm5[14],xmm10[15],xmm5[15]
5782 ; SSE-NEXT: pshufd {{.*#+}} xmm10 = xmm10[1,1,2,3]
5783 ; SSE-NEXT: pshufhw {{.*#+}} xmm10 = xmm10[0,1,2,3,5,5,5,5]
5784 ; SSE-NEXT: movdqa %xmm0, %xmm14
5785 ; SSE-NEXT: pandn %xmm10, %xmm14
5786 ; SSE-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3],xmm4[4],xmm5[4],xmm4[5],xmm5[5],xmm4[6],xmm5[6],xmm4[7],xmm5[7]
5787 ; SSE-NEXT: pshufhw {{.*#+}} xmm10 = xmm4[0,1,2,3,7,5,6,7]
5788 ; SSE-NEXT: pshufd {{.*#+}} xmm10 = xmm10[0,2,2,3]
5789 ; SSE-NEXT: pshuflw {{.*#+}} xmm13 = xmm10[3,1,1,2,4,5,6,7]
5790 ; SSE-NEXT: pand %xmm0, %xmm13
5791 ; SSE-NEXT: por %xmm14, %xmm13
5792 ; SSE-NEXT: packuswb %xmm13, %xmm13
5793 ; SSE-NEXT: pand %xmm15, %xmm13
5794 ; SSE-NEXT: por %xmm12, %xmm13
5795 ; SSE-NEXT: movdqa %xmm3, %xmm10
5796 ; SSE-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm5[8],xmm3[9],xmm5[9],xmm3[10],xmm5[10],xmm3[11],xmm5[11],xmm3[12],xmm5[12],xmm3[13],xmm5[13],xmm3[14],xmm5[14],xmm3[15],xmm5[15]
5797 ; SSE-NEXT: pshufhw {{.*#+}} xmm12 = xmm3[0,1,2,3,7,5,6,7]
5798 ; SSE-NEXT: pshufd {{.*#+}} xmm12 = xmm12[0,1,2,0]
5799 ; SSE-NEXT: pshufhw {{.*#+}} xmm12 = xmm12[0,1,2,3,5,5,7,4]
5800 ; SSE-NEXT: movdqa %xmm11, %xmm14
5801 ; SSE-NEXT: pandn %xmm12, %xmm14
5802 ; SSE-NEXT: punpcklbw {{.*#+}} xmm10 = xmm10[0],xmm5[0],xmm10[1],xmm5[1],xmm10[2],xmm5[2],xmm10[3],xmm5[3],xmm10[4],xmm5[4],xmm10[5],xmm5[5],xmm10[6],xmm5[6],xmm10[7],xmm5[7]
5803 ; SSE-NEXT: pshufd {{.*#+}} xmm10 = xmm10[0,3,1,1]
5804 ; SSE-NEXT: pshuflw {{.*#+}} xmm10 = xmm10[0,1,1,3,4,5,6,7]
5805 ; SSE-NEXT: pand %xmm11, %xmm10
5806 ; SSE-NEXT: por %xmm10, %xmm14
5807 ; SSE-NEXT: packuswb %xmm14, %xmm10
5808 ; SSE-NEXT: movdqa %xmm1, %xmm12
5809 ; SSE-NEXT: pandn %xmm10, %xmm12
5810 ; SSE-NEXT: pand %xmm1, %xmm13
5811 ; SSE-NEXT: por %xmm13, %xmm12
5812 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
5813 ; SSE-NEXT: movdqa %xmm9, %xmm10
5814 ; SSE-NEXT: pxor %xmm3, %xmm3
5815 ; SSE-NEXT: punpckhbw {{.*#+}} xmm10 = xmm10[8],xmm3[8],xmm10[9],xmm3[9],xmm10[10],xmm3[10],xmm10[11],xmm3[11],xmm10[12],xmm3[12],xmm10[13],xmm3[13],xmm10[14],xmm3[14],xmm10[15],xmm3[15]
5816 ; SSE-NEXT: punpcklbw {{.*#+}} xmm9 = xmm9[0],xmm3[0],xmm9[1],xmm3[1],xmm9[2],xmm3[2],xmm9[3],xmm3[3],xmm9[4],xmm3[4],xmm9[5],xmm3[5],xmm9[6],xmm3[6],xmm9[7],xmm3[7]
5817 ; SSE-NEXT: pxor %xmm4, %xmm4
5818 ; SSE-NEXT: movdqa %xmm9, %xmm13
5819 ; SSE-NEXT: shufps {{.*#+}} xmm9 = xmm9[1,0],xmm10[0,0]
5820 ; SSE-NEXT: shufps {{.*#+}} xmm9 = xmm9[2,0],xmm10[2,3]
5821 ; SSE-NEXT: psrlq $48, %xmm10
5822 ; SSE-NEXT: psrldq {{.*#+}} xmm13 = xmm13[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
5823 ; SSE-NEXT: punpcklwd {{.*#+}} xmm13 = xmm13[0],xmm10[0],xmm13[1],xmm10[1],xmm13[2],xmm10[2],xmm13[3],xmm10[3]
5824 ; SSE-NEXT: pshuflw {{.*#+}} xmm10 = xmm9[3,1,2,3,4,5,6,7]
5825 ; SSE-NEXT: pshufd {{.*#+}} xmm10 = xmm10[0,1,0,3]
5826 ; SSE-NEXT: pshufhw {{.*#+}} xmm10 = xmm10[0,1,2,3,4,4,5,7]
5827 ; SSE-NEXT: packuswb %xmm13, %xmm10
5828 ; SSE-NEXT: movdqa %xmm15, %xmm13
5829 ; SSE-NEXT: pandn %xmm10, %xmm13
5830 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
5831 ; SSE-NEXT: movdqa %xmm3, %xmm10
5832 ; SSE-NEXT: punpckhbw {{.*#+}} xmm10 = xmm10[8],xmm4[8],xmm10[9],xmm4[9],xmm10[10],xmm4[10],xmm10[11],xmm4[11],xmm10[12],xmm4[12],xmm10[13],xmm4[13],xmm10[14],xmm4[14],xmm10[15],xmm4[15]
5833 ; SSE-NEXT: pshufd {{.*#+}} xmm10 = xmm10[1,1,2,3]
5834 ; SSE-NEXT: pshufhw {{.*#+}} xmm10 = xmm10[0,1,2,3,5,5,5,5]
5835 ; SSE-NEXT: movdqa %xmm0, %xmm9
5836 ; SSE-NEXT: pandn %xmm10, %xmm9
5837 ; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3],xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
5838 ; SSE-NEXT: pshufhw {{.*#+}} xmm10 = xmm3[0,1,2,3,7,5,6,7]
5839 ; SSE-NEXT: pshufd {{.*#+}} xmm10 = xmm10[0,2,2,3]
5840 ; SSE-NEXT: pshuflw {{.*#+}} xmm14 = xmm10[3,1,1,2,4,5,6,7]
5841 ; SSE-NEXT: pand %xmm0, %xmm14
5842 ; SSE-NEXT: por %xmm9, %xmm14
5843 ; SSE-NEXT: packuswb %xmm14, %xmm14
5844 ; SSE-NEXT: pand %xmm15, %xmm14
5845 ; SSE-NEXT: por %xmm13, %xmm14
5846 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
5847 ; SSE-NEXT: movdqa %xmm3, %xmm9
5848 ; SSE-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm4[8],xmm3[9],xmm4[9],xmm3[10],xmm4[10],xmm3[11],xmm4[11],xmm3[12],xmm4[12],xmm3[13],xmm4[13],xmm3[14],xmm4[14],xmm3[15],xmm4[15]
5849 ; SSE-NEXT: pshufhw {{.*#+}} xmm10 = xmm3[0,1,2,3,7,5,6,7]
5850 ; SSE-NEXT: pshufd {{.*#+}} xmm10 = xmm10[0,1,2,0]
5851 ; SSE-NEXT: pshufhw {{.*#+}} xmm10 = xmm10[0,1,2,3,5,5,7,4]
5852 ; SSE-NEXT: movdqa %xmm11, %xmm13
5853 ; SSE-NEXT: pandn %xmm10, %xmm13
5854 ; SSE-NEXT: punpcklbw {{.*#+}} xmm9 = xmm9[0],xmm4[0],xmm9[1],xmm4[1],xmm9[2],xmm4[2],xmm9[3],xmm4[3],xmm9[4],xmm4[4],xmm9[5],xmm4[5],xmm9[6],xmm4[6],xmm9[7],xmm4[7]
5855 ; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm9[0,3,1,1]
5856 ; SSE-NEXT: pshuflw {{.*#+}} xmm9 = xmm9[0,1,1,3,4,5,6,7]
5857 ; SSE-NEXT: pand %xmm11, %xmm9
5858 ; SSE-NEXT: por %xmm9, %xmm13
5859 ; SSE-NEXT: packuswb %xmm13, %xmm9
5860 ; SSE-NEXT: movdqa %xmm1, %xmm13
5861 ; SSE-NEXT: pandn %xmm9, %xmm13
5862 ; SSE-NEXT: pand %xmm1, %xmm14
5863 ; SSE-NEXT: por %xmm14, %xmm13
5864 ; SSE-NEXT: movdqa (%rsp), %xmm3 # 16-byte Reload
5865 ; SSE-NEXT: movdqa %xmm3, %xmm9
5866 ; SSE-NEXT: punpckhbw {{.*#+}} xmm9 = xmm9[8],xmm4[8],xmm9[9],xmm4[9],xmm9[10],xmm4[10],xmm9[11],xmm4[11],xmm9[12],xmm4[12],xmm9[13],xmm4[13],xmm9[14],xmm4[14],xmm9[15],xmm4[15]
5867 ; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3],xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
5868 ; SSE-NEXT: movdqa %xmm3, %xmm10
5869 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,0],xmm9[0,0]
5870 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[2,0],xmm9[2,3]
5871 ; SSE-NEXT: psrlq $48, %xmm9
5872 ; SSE-NEXT: psrldq {{.*#+}} xmm10 = xmm10[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
5873 ; SSE-NEXT: punpcklwd {{.*#+}} xmm10 = xmm10[0],xmm9[0],xmm10[1],xmm9[1],xmm10[2],xmm9[2],xmm10[3],xmm9[3]
5874 ; SSE-NEXT: pshuflw {{.*#+}} xmm9 = xmm3[3,1,2,3,4,5,6,7]
5875 ; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm9[0,1,0,3]
5876 ; SSE-NEXT: pshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,4,4,5,7]
5877 ; SSE-NEXT: packuswb %xmm10, %xmm9
5878 ; SSE-NEXT: movdqa %xmm6, %xmm10
5879 ; SSE-NEXT: punpckhbw {{.*#+}} xmm10 = xmm10[8],xmm5[8],xmm10[9],xmm5[9],xmm10[10],xmm5[10],xmm10[11],xmm5[11],xmm10[12],xmm5[12],xmm10[13],xmm5[13],xmm10[14],xmm5[14],xmm10[15],xmm5[15]
5880 ; SSE-NEXT: pshufd {{.*#+}} xmm10 = xmm10[1,1,2,3]
5881 ; SSE-NEXT: pshufhw {{.*#+}} xmm10 = xmm10[0,1,2,3,5,5,5,5]
5882 ; SSE-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3],xmm6[4],xmm5[4],xmm6[5],xmm5[5],xmm6[6],xmm5[6],xmm6[7],xmm5[7]
5883 ; SSE-NEXT: pshufhw {{.*#+}} xmm14 = xmm6[0,1,2,3,7,5,6,7]
5884 ; SSE-NEXT: pshufd {{.*#+}} xmm14 = xmm14[0,2,2,3]
5885 ; SSE-NEXT: pshuflw {{.*#+}} xmm14 = xmm14[3,1,1,2,4,5,6,7]
5886 ; SSE-NEXT: pand %xmm0, %xmm14
5887 ; SSE-NEXT: pandn %xmm10, %xmm0
5888 ; SSE-NEXT: por %xmm14, %xmm0
5889 ; SSE-NEXT: packuswb %xmm0, %xmm0
5890 ; SSE-NEXT: pand %xmm15, %xmm0
5891 ; SSE-NEXT: pandn %xmm9, %xmm15
5892 ; SSE-NEXT: por %xmm15, %xmm0
5893 ; SSE-NEXT: movdqa %xmm2, %xmm4
5894 ; SSE-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3],xmm4[4],xmm5[4],xmm4[5],xmm5[5],xmm4[6],xmm5[6],xmm4[7],xmm5[7]
5895 ; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm5[8],xmm2[9],xmm5[9],xmm2[10],xmm5[10],xmm2[11],xmm5[11],xmm2[12],xmm5[12],xmm2[13],xmm5[13],xmm2[14],xmm5[14],xmm2[15],xmm5[15]
5896 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,3,1,1]
5897 ; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[0,1,1,3,4,5,6,7]
5898 ; SSE-NEXT: pand %xmm11, %xmm4
5899 ; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm2[0,1,2,3,7,5,6,7]
5900 ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,1,2,0]
5901 ; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,5,5,7,4]
5902 ; SSE-NEXT: pandn %xmm5, %xmm11
5903 ; SSE-NEXT: por %xmm4, %xmm11
5904 ; SSE-NEXT: pand %xmm1, %xmm0
5905 ; SSE-NEXT: packuswb %xmm11, %xmm4
5906 ; SSE-NEXT: pandn %xmm4, %xmm1
5907 ; SSE-NEXT: por %xmm0, %xmm1
5908 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5909 ; SSE-NEXT: movaps %xmm0, 16(%rsi)
5910 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5911 ; SSE-NEXT: movaps %xmm0, 32(%rsi)
5912 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5913 ; SSE-NEXT: movaps %xmm0, 48(%rsi)
5914 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5915 ; SSE-NEXT: movaps %xmm0, (%rsi)
5916 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5917 ; SSE-NEXT: movaps %xmm0, 16(%rdx)
5918 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5919 ; SSE-NEXT: movaps %xmm0, 32(%rdx)
5920 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5921 ; SSE-NEXT: movaps %xmm0, 48(%rdx)
5922 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5923 ; SSE-NEXT: movaps %xmm0, (%rdx)
5924 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5925 ; SSE-NEXT: movaps %xmm0, 16(%rcx)
5926 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5927 ; SSE-NEXT: movaps %xmm0, 32(%rcx)
5928 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5929 ; SSE-NEXT: movaps %xmm0, 48(%rcx)
5930 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5931 ; SSE-NEXT: movaps %xmm0, (%rcx)
5932 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5933 ; SSE-NEXT: movaps %xmm0, 16(%r8)
5934 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5935 ; SSE-NEXT: movaps %xmm0, 32(%r8)
5936 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5937 ; SSE-NEXT: movaps %xmm0, 48(%r8)
5938 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5939 ; SSE-NEXT: movaps %xmm0, (%r8)
5940 ; SSE-NEXT: movdqa %xmm7, 16(%r9)
5941 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5942 ; SSE-NEXT: movaps %xmm0, 32(%r9)
5943 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5944 ; SSE-NEXT: movaps %xmm0, 48(%r9)
5945 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5946 ; SSE-NEXT: movaps %xmm0, (%r9)
5947 ; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax
5948 ; SSE-NEXT: movdqa %xmm1, 16(%rax)
5949 ; SSE-NEXT: movdqa %xmm13, 32(%rax)
5950 ; SSE-NEXT: movdqa %xmm12, 48(%rax)
5951 ; SSE-NEXT: movdqa %xmm8, (%rax)
5952 ; SSE-NEXT: addq $792, %rsp # imm = 0x318
5955 ; AVX-LABEL: load_i8_stride6_vf64:
5957 ; AVX-NEXT: subq $616, %rsp # imm = 0x268
5958 ; AVX-NEXT: vmovdqa (%rdi), %xmm2
5959 ; AVX-NEXT: vmovdqa 16(%rdi), %xmm7
5960 ; AVX-NEXT: vmovdqa 32(%rdi), %xmm5
5961 ; AVX-NEXT: vmovdqa 48(%rdi), %xmm6
5962 ; AVX-NEXT: vmovdqa 224(%rdi), %xmm8
5963 ; AVX-NEXT: vmovdqa 240(%rdi), %xmm9
5964 ; AVX-NEXT: vmovdqa 208(%rdi), %xmm10
5965 ; AVX-NEXT: vmovdqa 192(%rdi), %xmm11
5966 ; AVX-NEXT: vmovq {{.*#+}} xmm3 = [128,128,128,4,10,0,0,0,0,0,0,0,0,0,0,0]
5967 ; AVX-NEXT: vmovq {{.*#+}} xmm12 = [2,8,14,128,128,0,0,0,0,0,0,0,0,0,0,0]
5968 ; AVX-NEXT: vmovd {{.*#+}} xmm1 = [2,8,14,0,0,0,0,0,0,0,0,0,0,0,0,0]
5969 ; AVX-NEXT: vpshufb %xmm1, %xmm6, %xmm0
5970 ; AVX-NEXT: vmovdqa %xmm1, %xmm13
5971 ; AVX-NEXT: vbroadcastss {{.*#+}} xmm4 = [0,0,6,12,0,0,6,12,0,0,6,12,0,0,6,12]
5972 ; AVX-NEXT: vpshufb %xmm4, %xmm5, %xmm1
5973 ; AVX-NEXT: vmovdqa %xmm5, %xmm15
5974 ; AVX-NEXT: vmovdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5975 ; AVX-NEXT: vmovdqa %xmm4, %xmm14
5976 ; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
5977 ; AVX-NEXT: vpshufb %xmm3, %xmm7, %xmm0
5978 ; AVX-NEXT: vmovdqa %xmm2, %xmm4
5979 ; AVX-NEXT: vpshufb %xmm12, %xmm2, %xmm2
5980 ; AVX-NEXT: vpor %xmm0, %xmm2, %xmm2
5981 ; AVX-NEXT: vmovdqa {{.*#+}} xmm5 = [0,0,0,0,0,255,255,255,255,255,255,u,u,u,u,u]
5982 ; AVX-NEXT: vpblendvb %xmm5, %xmm1, %xmm2, %xmm1
5983 ; AVX-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5984 ; AVX-NEXT: vpshufb %xmm13, %xmm9, %xmm1
5985 ; AVX-NEXT: vpshufb %xmm14, %xmm8, %xmm2
5986 ; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
5987 ; AVX-NEXT: vpshufb %xmm3, %xmm10, %xmm2
5988 ; AVX-NEXT: vpshufb %xmm12, %xmm11, %xmm3
5989 ; AVX-NEXT: vpor %xmm2, %xmm3, %xmm2
5990 ; AVX-NEXT: vpblendvb %xmm5, %xmm1, %xmm2, %xmm1
5991 ; AVX-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5992 ; AVX-NEXT: vmovd {{.*#+}} xmm2 = [3,9,15,0,0,0,0,0,0,0,0,0,0,0,0,0]
5993 ; AVX-NEXT: vpshufb %xmm2, %xmm6, %xmm1
5994 ; AVX-NEXT: vmovdqa %xmm2, %xmm13
5995 ; AVX-NEXT: vbroadcastss {{.*#+}} xmm0 = [0,1,7,13,0,1,7,13,0,1,7,13,0,1,7,13]
5996 ; AVX-NEXT: vpshufb %xmm0, %xmm15, %xmm2
5997 ; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
5998 ; AVX-NEXT: vmovq {{.*#+}} xmm3 = [128,128,128,5,11,0,0,0,0,0,0,0,0,0,0,0]
5999 ; AVX-NEXT: vmovq {{.*#+}} xmm12 = [3,9,15,128,128,0,0,0,0,0,0,0,0,0,0,0]
6000 ; AVX-NEXT: vpshufb %xmm3, %xmm7, %xmm2
6001 ; AVX-NEXT: vmovdqa %xmm7, %xmm14
6002 ; AVX-NEXT: vmovdqa %xmm3, %xmm15
6003 ; AVX-NEXT: vpshufb %xmm12, %xmm4, %xmm3
6004 ; AVX-NEXT: vmovdqa %xmm4, %xmm7
6005 ; AVX-NEXT: vpor %xmm2, %xmm3, %xmm2
6006 ; AVX-NEXT: vpblendvb %xmm5, %xmm1, %xmm2, %xmm1
6007 ; AVX-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6008 ; AVX-NEXT: vpshufb %xmm13, %xmm9, %xmm1
6009 ; AVX-NEXT: vmovdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6010 ; AVX-NEXT: vpshufb %xmm0, %xmm8, %xmm2
6011 ; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
6012 ; AVX-NEXT: vpshufb %xmm15, %xmm10, %xmm2
6013 ; AVX-NEXT: vpshufb %xmm12, %xmm11, %xmm3
6014 ; AVX-NEXT: vpor %xmm2, %xmm3, %xmm2
6015 ; AVX-NEXT: vpblendvb %xmm5, %xmm1, %xmm2, %xmm0
6016 ; AVX-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6017 ; AVX-NEXT: vmovq {{.*#+}} xmm1 = [4,10,128,128,128,0,0,0,0,0,0,0,0,0,0,0]
6018 ; AVX-NEXT: vpshufb %xmm1, %xmm4, %xmm0
6019 ; AVX-NEXT: vmovdqa %xmm1, %xmm12
6020 ; AVX-NEXT: vmovq {{.*#+}} xmm2 = [128,128,0,6,12,0,0,0,0,0,0,0,0,0,0,0]
6021 ; AVX-NEXT: vpshufb %xmm2, %xmm14, %xmm1
6022 ; AVX-NEXT: vmovdqa %xmm2, %xmm13
6023 ; AVX-NEXT: vpor %xmm0, %xmm1, %xmm1
6024 ; AVX-NEXT: vbroadcastss {{.*#+}} xmm2 = [4,10,0,0,4,10,0,0,4,10,0,0,4,10,0,0]
6025 ; AVX-NEXT: vpshufb %xmm2, %xmm6, %xmm0
6026 ; AVX-NEXT: vmovdqa %xmm2, %xmm4
6027 ; AVX-NEXT: vbroadcastss {{.*#+}} xmm3 = [0,2,8,14,0,2,8,14,0,2,8,14,0,2,8,14]
6028 ; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
6029 ; AVX-NEXT: vpshufb %xmm3, %xmm5, %xmm2
6030 ; AVX-NEXT: vpunpckhqdq {{.*#+}} xmm2 = xmm2[1],xmm0[1]
6031 ; AVX-NEXT: vmovq {{.*#+}} xmm0 = [255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0]
6032 ; AVX-NEXT: vpblendvb %xmm0, %xmm1, %xmm2, %xmm1
6033 ; AVX-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6034 ; AVX-NEXT: vpshufb %xmm12, %xmm11, %xmm1
6035 ; AVX-NEXT: vpshufb %xmm13, %xmm10, %xmm2
6036 ; AVX-NEXT: vmovdqa %xmm10, %xmm12
6037 ; AVX-NEXT: vpor %xmm1, %xmm2, %xmm1
6038 ; AVX-NEXT: vpshufb %xmm4, %xmm9, %xmm2
6039 ; AVX-NEXT: vmovdqa %xmm9, %xmm15
6040 ; AVX-NEXT: vmovdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6041 ; AVX-NEXT: vpshufb %xmm3, %xmm8, %xmm3
6042 ; AVX-NEXT: vpunpckhqdq {{.*#+}} xmm2 = xmm3[1],xmm2[1]
6043 ; AVX-NEXT: vpblendvb %xmm0, %xmm1, %xmm2, %xmm1
6044 ; AVX-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6045 ; AVX-NEXT: vmovq {{.*#+}} xmm2 = [5,11,128,128,128,0,0,0,0,0,0,0,0,0,0,0]
6046 ; AVX-NEXT: vmovdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6047 ; AVX-NEXT: vpshufb %xmm2, %xmm7, %xmm1
6048 ; AVX-NEXT: vmovdqa %xmm2, %xmm8
6049 ; AVX-NEXT: vmovq {{.*#+}} xmm3 = [128,128,1,7,13,0,0,0,0,0,0,0,0,0,0,0]
6050 ; AVX-NEXT: vmovdqa %xmm14, %xmm13
6051 ; AVX-NEXT: vpshufb %xmm3, %xmm14, %xmm2
6052 ; AVX-NEXT: vmovdqa %xmm3, %xmm14
6053 ; AVX-NEXT: vpor %xmm1, %xmm2, %xmm1
6054 ; AVX-NEXT: vbroadcastss {{.*#+}} xmm3 = [5,11,0,0,5,11,0,0,5,11,0,0,5,11,0,0]
6055 ; AVX-NEXT: vpshufb %xmm3, %xmm6, %xmm2
6056 ; AVX-NEXT: vmovdqa %xmm3, %xmm9
6057 ; AVX-NEXT: vmovdqa %xmm6, %xmm10
6058 ; AVX-NEXT: vmovdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6059 ; AVX-NEXT: vbroadcastss {{.*#+}} xmm4 = [0,3,9,15,0,3,9,15,0,3,9,15,0,3,9,15]
6060 ; AVX-NEXT: vpshufb %xmm4, %xmm5, %xmm3
6061 ; AVX-NEXT: vpunpckhqdq {{.*#+}} xmm2 = xmm3[1],xmm2[1]
6062 ; AVX-NEXT: vpblendvb %xmm0, %xmm1, %xmm2, %xmm1
6063 ; AVX-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6064 ; AVX-NEXT: vpshufb %xmm8, %xmm11, %xmm1
6065 ; AVX-NEXT: vmovdqa %xmm11, %xmm8
6066 ; AVX-NEXT: vmovdqa %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6067 ; AVX-NEXT: vpshufb %xmm14, %xmm12, %xmm2
6068 ; AVX-NEXT: vmovdqa %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6069 ; AVX-NEXT: vpor %xmm1, %xmm2, %xmm1
6070 ; AVX-NEXT: vpshufb %xmm9, %xmm15, %xmm2
6071 ; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
6072 ; AVX-NEXT: vpshufb %xmm4, %xmm6, %xmm3
6073 ; AVX-NEXT: vpunpckhqdq {{.*#+}} xmm2 = xmm3[1],xmm2[1]
6074 ; AVX-NEXT: vpblendvb %xmm0, %xmm1, %xmm2, %xmm0
6075 ; AVX-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6076 ; AVX-NEXT: vmovq {{.*#+}} xmm3 = [128,128,128,2,8,14,0,0,0,0,0,0,0,0,0,0]
6077 ; AVX-NEXT: vmovdqa 112(%rdi), %xmm0
6078 ; AVX-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6079 ; AVX-NEXT: vpshufb %xmm3, %xmm0, %xmm0
6080 ; AVX-NEXT: vmovq {{.*#+}} xmm15 = [0,6,12,128,128,128,0,0,0,0,0,0,0,0,0,0]
6081 ; AVX-NEXT: vmovdqa 96(%rdi), %xmm1
6082 ; AVX-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6083 ; AVX-NEXT: vpshufb %xmm15, %xmm1, %xmm1
6084 ; AVX-NEXT: vpor %xmm0, %xmm1, %xmm0
6085 ; AVX-NEXT: vmovddup {{.*#+}} xmm4 = [0,0,0,128,128,128,4,10,0,0,0,128,128,128,4,10]
6086 ; AVX-NEXT: # xmm4 = mem[0,0]
6087 ; AVX-NEXT: vmovdqa 80(%rdi), %xmm1
6088 ; AVX-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6089 ; AVX-NEXT: vpshufb %xmm4, %xmm1, %xmm1
6090 ; AVX-NEXT: vmovdqa %xmm4, %xmm11
6091 ; AVX-NEXT: vmovdqa 64(%rdi), %xmm2
6092 ; AVX-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6093 ; AVX-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,u,u,u,u,u,u,u,2,8,14],zero,zero
6094 ; AVX-NEXT: vpor %xmm1, %xmm2, %xmm1
6095 ; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm2
6096 ; AVX-NEXT: vmovd {{.*#+}} xmm14 = [0,0,4,10,0,0,0,0,0,0,0,0,0,0,0,0]
6097 ; AVX-NEXT: vpshufb %xmm14, %xmm5, %xmm4
6098 ; AVX-NEXT: vbroadcastss {{.*#+}} xmm1 = [0,6,12,0,0,6,12,0,0,6,12,0,0,6,12,0]
6099 ; AVX-NEXT: vpshufb %xmm1, %xmm10, %xmm5
6100 ; AVX-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm5[0],xmm4[0],xmm5[1],xmm4[1]
6101 ; AVX-NEXT: vmovdqa %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6102 ; AVX-NEXT: vpshufb %xmm3, %xmm13, %xmm5
6103 ; AVX-NEXT: vpshufb %xmm15, %xmm7, %xmm9
6104 ; AVX-NEXT: vpor %xmm5, %xmm9, %xmm5
6105 ; AVX-NEXT: vpblendw {{.*#+}} xmm4 = xmm5[0,1,2],xmm4[3,4,5],xmm5[6,7]
6106 ; AVX-NEXT: vmovaps {{.*#+}} ymm13 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255]
6107 ; AVX-NEXT: vandnps %ymm2, %ymm13, %ymm2
6108 ; AVX-NEXT: vandps %ymm4, %ymm13, %ymm4
6109 ; AVX-NEXT: vorps %ymm2, %ymm4, %ymm9
6110 ; AVX-NEXT: vmovdqa 128(%rdi), %xmm2
6111 ; AVX-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6112 ; AVX-NEXT: vpshufb %xmm14, %xmm2, %xmm2
6113 ; AVX-NEXT: vmovdqa 144(%rdi), %xmm4
6114 ; AVX-NEXT: vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6115 ; AVX-NEXT: vpshufb %xmm1, %xmm4, %xmm4
6116 ; AVX-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1]
6117 ; AVX-NEXT: vmovdqa 176(%rdi), %xmm2
6118 ; AVX-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6119 ; AVX-NEXT: vpshufb %xmm11, %xmm2, %xmm2
6120 ; AVX-NEXT: vmovdqa 160(%rdi), %xmm5
6121 ; AVX-NEXT: vmovdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6122 ; AVX-NEXT: vmovddup {{.*#+}} xmm0 = [0,0,0,2,8,14,128,128,0,0,0,2,8,14,128,128]
6123 ; AVX-NEXT: # xmm0 = mem[0,0]
6124 ; AVX-NEXT: vpshufb %xmm0, %xmm5, %xmm10
6125 ; AVX-NEXT: vpor %xmm2, %xmm10, %xmm10
6126 ; AVX-NEXT: vmovdqa {{.*#+}} ymm2 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255]
6127 ; AVX-NEXT: vpblendvb %xmm2, %xmm4, %xmm10, %xmm10
6128 ; AVX-NEXT: vmovdqa %ymm2, %ymm5
6129 ; AVX-NEXT: vmovaps {{.*#+}} ymm2 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0]
6130 ; AVX-NEXT: vandps %ymm2, %ymm9, %ymm9
6131 ; AVX-NEXT: vinsertf128 $1, %xmm10, %ymm0, %ymm10
6132 ; AVX-NEXT: vandnps %ymm10, %ymm2, %ymm10
6133 ; AVX-NEXT: vmovaps %ymm2, %ymm7
6134 ; AVX-NEXT: vorps %ymm10, %ymm9, %ymm4
6135 ; AVX-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6136 ; AVX-NEXT: vmovdqa 304(%rdi), %xmm2
6137 ; AVX-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6138 ; AVX-NEXT: vpshufb %xmm3, %xmm2, %xmm9
6139 ; AVX-NEXT: vmovdqa 288(%rdi), %xmm2
6140 ; AVX-NEXT: vmovdqa %xmm2, (%rsp) # 16-byte Spill
6141 ; AVX-NEXT: vpshufb %xmm15, %xmm2, %xmm10
6142 ; AVX-NEXT: vpor %xmm9, %xmm10, %xmm9
6143 ; AVX-NEXT: vmovdqa 272(%rdi), %xmm2
6144 ; AVX-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6145 ; AVX-NEXT: vpshufb %xmm11, %xmm2, %xmm10
6146 ; AVX-NEXT: vmovdqa 256(%rdi), %xmm2
6147 ; AVX-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6148 ; AVX-NEXT: vpshufb %xmm0, %xmm2, %xmm11
6149 ; AVX-NEXT: vpor %xmm10, %xmm11, %xmm10
6150 ; AVX-NEXT: vinsertf128 $1, %xmm9, %ymm10, %ymm9
6151 ; AVX-NEXT: vpshufb %xmm14, %xmm6, %xmm10
6152 ; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
6153 ; AVX-NEXT: vpshufb %xmm1, %xmm2, %xmm11
6154 ; AVX-NEXT: vpunpckldq {{.*#+}} xmm10 = xmm11[0],xmm10[0],xmm11[1],xmm10[1]
6155 ; AVX-NEXT: vpshufb %xmm3, %xmm12, %xmm3
6156 ; AVX-NEXT: vpshufb %xmm15, %xmm8, %xmm6
6157 ; AVX-NEXT: vpor %xmm3, %xmm6, %xmm3
6158 ; AVX-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1,2],xmm10[3,4,5],xmm3[6,7]
6159 ; AVX-NEXT: vandnps %ymm9, %ymm13, %ymm6
6160 ; AVX-NEXT: vandps %ymm3, %ymm13, %ymm3
6161 ; AVX-NEXT: vmovaps %ymm13, %ymm11
6162 ; AVX-NEXT: vorps %ymm6, %ymm3, %ymm3
6163 ; AVX-NEXT: vmovdqa 320(%rdi), %xmm4
6164 ; AVX-NEXT: vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6165 ; AVX-NEXT: vpshufb %xmm14, %xmm4, %xmm0
6166 ; AVX-NEXT: vmovdqa 336(%rdi), %xmm4
6167 ; AVX-NEXT: vpshufb %xmm1, %xmm4, %xmm1
6168 ; AVX-NEXT: vmovdqa %xmm4, %xmm10
6169 ; AVX-NEXT: vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6170 ; AVX-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
6171 ; AVX-NEXT: vmovdqa 368(%rdi), %xmm1
6172 ; AVX-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6173 ; AVX-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm1[4,10]
6174 ; AVX-NEXT: vmovdqa 352(%rdi), %xmm4
6175 ; AVX-NEXT: vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6176 ; AVX-NEXT: vpshufb {{.*#+}} xmm6 = xmm4[u,u,u,u,u,u,u,u,u,u,u,2,8,14],zero,zero
6177 ; AVX-NEXT: vpor %xmm1, %xmm6, %xmm1
6178 ; AVX-NEXT: vpblendvb %xmm5, %xmm0, %xmm1, %xmm0
6179 ; AVX-NEXT: vandps %ymm7, %ymm3, %ymm1
6180 ; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
6181 ; AVX-NEXT: vandnps %ymm0, %ymm7, %ymm0
6182 ; AVX-NEXT: vorps %ymm0, %ymm1, %ymm0
6183 ; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6184 ; AVX-NEXT: vmovq {{.*#+}} xmm8 = [128,128,128,3,9,15,0,0,0,0,0,0,0,0,0,0]
6185 ; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6186 ; AVX-NEXT: vpshufb %xmm8, %xmm0, %xmm0
6187 ; AVX-NEXT: vmovq {{.*#+}} xmm4 = [1,7,13,128,128,128,0,0,0,0,0,0,0,0,0,0]
6188 ; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6189 ; AVX-NEXT: vpshufb %xmm4, %xmm1, %xmm1
6190 ; AVX-NEXT: vpor %xmm0, %xmm1, %xmm0
6191 ; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6192 ; AVX-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm1[5,11]
6193 ; AVX-NEXT: vmovddup {{.*#+}} xmm9 = [0,0,0,3,9,15,128,128,0,0,0,3,9,15,128,128]
6194 ; AVX-NEXT: # xmm9 = mem[0,0]
6195 ; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
6196 ; AVX-NEXT: vpshufb %xmm9, %xmm5, %xmm14
6197 ; AVX-NEXT: vpor %xmm1, %xmm14, %xmm1
6198 ; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm14
6199 ; AVX-NEXT: vmovd {{.*#+}} xmm3 = [0,0,5,11,0,0,0,0,0,0,0,0,0,0,0,0]
6200 ; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6201 ; AVX-NEXT: vpshufb %xmm3, %xmm0, %xmm0
6202 ; AVX-NEXT: vbroadcastss {{.*#+}} xmm7 = [1,7,13,0,1,7,13,0,1,7,13,0,1,7,13,0]
6203 ; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
6204 ; AVX-NEXT: vpshufb %xmm7, %xmm6, %xmm15
6205 ; AVX-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm15[0],xmm0[0],xmm15[1],xmm0[1]
6206 ; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
6207 ; AVX-NEXT: vpshufb %xmm8, %xmm6, %xmm15
6208 ; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
6209 ; AVX-NEXT: vpshufb %xmm4, %xmm6, %xmm13
6210 ; AVX-NEXT: vpor %xmm15, %xmm13, %xmm13
6211 ; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm13[0,1,2],xmm0[3,4,5],xmm13[6,7]
6212 ; AVX-NEXT: vandnps %ymm14, %ymm11, %ymm13
6213 ; AVX-NEXT: vandps %ymm0, %ymm11, %ymm0
6214 ; AVX-NEXT: vorps %ymm0, %ymm13, %ymm0
6215 ; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
6216 ; AVX-NEXT: vpshufb %xmm3, %xmm12, %xmm13
6217 ; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
6218 ; AVX-NEXT: vpshufb %xmm7, %xmm11, %xmm14
6219 ; AVX-NEXT: vpunpckldq {{.*#+}} xmm13 = xmm14[0],xmm13[0],xmm14[1],xmm13[1]
6220 ; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
6221 ; AVX-NEXT: vmovddup {{.*#+}} xmm1 = [0,0,0,128,128,128,5,11,0,0,0,128,128,128,5,11]
6222 ; AVX-NEXT: # xmm1 = mem[0,0]
6223 ; AVX-NEXT: vpshufb %xmm1, %xmm6, %xmm14
6224 ; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
6225 ; AVX-NEXT: vpshufb %xmm9, %xmm15, %xmm15
6226 ; AVX-NEXT: vpor %xmm14, %xmm15, %xmm14
6227 ; AVX-NEXT: vmovdqa {{.*#+}} ymm15 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255]
6228 ; AVX-NEXT: vpblendvb %xmm15, %xmm13, %xmm14, %xmm13
6229 ; AVX-NEXT: vmovaps {{.*#+}} ymm15 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0]
6230 ; AVX-NEXT: vandps %ymm0, %ymm15, %ymm0
6231 ; AVX-NEXT: vinsertf128 $1, %xmm13, %ymm0, %ymm13
6232 ; AVX-NEXT: vandnps %ymm13, %ymm15, %ymm13
6233 ; AVX-NEXT: vorps %ymm0, %ymm13, %ymm0
6234 ; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6235 ; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6236 ; AVX-NEXT: vpshufb %xmm8, %xmm0, %xmm0
6237 ; AVX-NEXT: vmovdqa (%rsp), %xmm13 # 16-byte Reload
6238 ; AVX-NEXT: vpshufb %xmm4, %xmm13, %xmm13
6239 ; AVX-NEXT: vpor %xmm0, %xmm13, %xmm0
6240 ; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
6241 ; AVX-NEXT: vpshufb %xmm1, %xmm13, %xmm13
6242 ; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
6243 ; AVX-NEXT: vpshufb %xmm9, %xmm14, %xmm14
6244 ; AVX-NEXT: vpor %xmm13, %xmm14, %xmm13
6245 ; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm13, %ymm0
6246 ; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
6247 ; AVX-NEXT: vpshufb %xmm3, %xmm13, %xmm13
6248 ; AVX-NEXT: vpshufb %xmm7, %xmm2, %xmm14
6249 ; AVX-NEXT: vpunpckldq {{.*#+}} xmm13 = xmm14[0],xmm13[0],xmm14[1],xmm13[1]
6250 ; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
6251 ; AVX-NEXT: vpshufb %xmm8, %xmm2, %xmm8
6252 ; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
6253 ; AVX-NEXT: vpshufb %xmm4, %xmm2, %xmm4
6254 ; AVX-NEXT: vpor %xmm4, %xmm8, %xmm4
6255 ; AVX-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1,2],xmm13[3,4,5],xmm4[6,7]
6256 ; AVX-NEXT: vmovaps {{.*#+}} ymm1 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255]
6257 ; AVX-NEXT: vandnps %ymm0, %ymm1, %ymm0
6258 ; AVX-NEXT: vandps %ymm1, %ymm4, %ymm4
6259 ; AVX-NEXT: vorps %ymm0, %ymm4, %ymm0
6260 ; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
6261 ; AVX-NEXT: vpshufb %xmm3, %xmm2, %xmm3
6262 ; AVX-NEXT: vpshufb %xmm7, %xmm10, %xmm1
6263 ; AVX-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
6264 ; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
6265 ; AVX-NEXT: vpshufb {{.*#+}} xmm3 = xmm14[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm14[5,11]
6266 ; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
6267 ; AVX-NEXT: vpshufb %xmm9, %xmm4, %xmm4
6268 ; AVX-NEXT: vpor %xmm3, %xmm4, %xmm3
6269 ; AVX-NEXT: vmovdqa {{.*#+}} ymm8 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255]
6270 ; AVX-NEXT: vpblendvb %xmm8, %xmm1, %xmm3, %xmm1
6271 ; AVX-NEXT: vandps %ymm0, %ymm15, %ymm0
6272 ; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
6273 ; AVX-NEXT: vandnps %ymm1, %ymm15, %ymm1
6274 ; AVX-NEXT: vorps %ymm1, %ymm0, %ymm0
6275 ; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6276 ; AVX-NEXT: vmovq {{.*#+}} xmm9 = [128,128,128,4,10,0,0,0,0,0,0,0,0,0,0,0]
6277 ; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
6278 ; AVX-NEXT: vpshufb %xmm9, %xmm10, %xmm0
6279 ; AVX-NEXT: vmovq {{.*#+}} xmm13 = [2,8,14,128,128,0,0,0,0,0,0,0,0,0,0,0]
6280 ; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6281 ; AVX-NEXT: vpshufb %xmm13, %xmm1, %xmm1
6282 ; AVX-NEXT: vpor %xmm0, %xmm1, %xmm1
6283 ; AVX-NEXT: vmovddup {{.*#+}} xmm0 = [0,0,0,4,10,128,128,128,0,0,0,4,10,128,128,128]
6284 ; AVX-NEXT: # xmm0 = mem[0,0]
6285 ; AVX-NEXT: vpshufb %xmm0, %xmm5, %xmm4
6286 ; AVX-NEXT: vmovddup {{.*#+}} xmm3 = [0,0,0,128,128,0,6,12,0,0,0,128,128,0,6,12]
6287 ; AVX-NEXT: # xmm3 = mem[0,0]
6288 ; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
6289 ; AVX-NEXT: vpshufb %xmm3, %xmm5, %xmm5
6290 ; AVX-NEXT: vpor %xmm4, %xmm5, %xmm4
6291 ; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm4, %ymm1
6292 ; AVX-NEXT: vmovdqa %ymm8, %ymm9
6293 ; AVX-NEXT: vandnps %ymm1, %ymm8, %ymm1
6294 ; AVX-NEXT: vandps {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm4 # 32-byte Folded Reload
6295 ; AVX-NEXT: vorps %ymm1, %ymm4, %ymm4
6296 ; AVX-NEXT: vmovd {{.*#+}} xmm13 = [2,8,14,0,0,0,0,0,0,0,0,0,0,0,0,0]
6297 ; AVX-NEXT: vpshufb %xmm13, %xmm11, %xmm1
6298 ; AVX-NEXT: vbroadcastss {{.*#+}} xmm8 = [0,0,6,12,0,0,6,12,0,0,6,12,0,0,6,12]
6299 ; AVX-NEXT: vpshufb %xmm8, %xmm12, %xmm5
6300 ; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm5[0],xmm1[0]
6301 ; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
6302 ; AVX-NEXT: vpshufb %xmm0, %xmm7, %xmm5
6303 ; AVX-NEXT: vmovdqa %xmm6, %xmm15
6304 ; AVX-NEXT: vpshufb %xmm3, %xmm6, %xmm6
6305 ; AVX-NEXT: vpor %xmm5, %xmm6, %xmm5
6306 ; AVX-NEXT: vpblendvb %xmm9, %xmm1, %xmm5, %xmm5
6307 ; AVX-NEXT: vmovaps {{.*#+}} ymm1 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0]
6308 ; AVX-NEXT: vandps %ymm1, %ymm4, %ymm4
6309 ; AVX-NEXT: vinsertf128 $1, %xmm5, %ymm0, %ymm5
6310 ; AVX-NEXT: vandnps %ymm5, %ymm1, %ymm5
6311 ; AVX-NEXT: vorps %ymm5, %ymm4, %ymm1
6312 ; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6313 ; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
6314 ; AVX-NEXT: vpshufb {{.*#+}} xmm4 = zero,zero,zero,xmm12[4,10,u,u,u,u,u,u,u,u,u,u,u]
6315 ; AVX-NEXT: vmovdqa (%rsp), %xmm1 # 16-byte Reload
6316 ; AVX-NEXT: vpshufb {{.*#+}} xmm5 = xmm1[2,8,14],zero,zero,xmm1[u,u,u,u,u,u,u,u,u,u,u]
6317 ; AVX-NEXT: vpor %xmm4, %xmm5, %xmm4
6318 ; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
6319 ; AVX-NEXT: vpshufb %xmm0, %xmm11, %xmm5
6320 ; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
6321 ; AVX-NEXT: vpshufb %xmm3, %xmm6, %xmm6
6322 ; AVX-NEXT: vpor %xmm5, %xmm6, %xmm5
6323 ; AVX-NEXT: vinsertf128 $1, %xmm4, %ymm5, %ymm4
6324 ; AVX-NEXT: vandnps %ymm4, %ymm9, %ymm4
6325 ; AVX-NEXT: vandps {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm5 # 32-byte Folded Reload
6326 ; AVX-NEXT: vorps %ymm4, %ymm5, %ymm4
6327 ; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6328 ; AVX-NEXT: vpshufb %xmm13, %xmm1, %xmm5
6329 ; AVX-NEXT: vpshufb %xmm8, %xmm2, %xmm6
6330 ; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm5 = xmm6[0],xmm5[0]
6331 ; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
6332 ; AVX-NEXT: vpshufb %xmm0, %xmm2, %xmm0
6333 ; AVX-NEXT: vpshufb %xmm3, %xmm14, %xmm3
6334 ; AVX-NEXT: vpor %xmm0, %xmm3, %xmm0
6335 ; AVX-NEXT: vpblendvb %xmm9, %xmm5, %xmm0, %xmm0
6336 ; AVX-NEXT: vmovaps {{.*#+}} ymm5 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0]
6337 ; AVX-NEXT: vandps %ymm5, %ymm4, %ymm3
6338 ; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
6339 ; AVX-NEXT: vandnps %ymm0, %ymm5, %ymm0
6340 ; AVX-NEXT: vorps %ymm0, %ymm3, %ymm0
6341 ; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6342 ; AVX-NEXT: vmovq {{.*#+}} xmm14 = [128,128,128,5,11,0,0,0,0,0,0,0,0,0,0,0]
6343 ; AVX-NEXT: vpshufb %xmm14, %xmm10, %xmm0
6344 ; AVX-NEXT: vmovq {{.*#+}} xmm13 = [3,9,15,128,128,0,0,0,0,0,0,0,0,0,0,0]
6345 ; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
6346 ; AVX-NEXT: vpshufb %xmm13, %xmm14, %xmm3
6347 ; AVX-NEXT: vpor %xmm0, %xmm3, %xmm0
6348 ; AVX-NEXT: vmovddup {{.*#+}} xmm3 = [0,0,0,5,11,128,128,128,0,0,0,5,11,128,128,128]
6349 ; AVX-NEXT: # xmm3 = mem[0,0]
6350 ; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
6351 ; AVX-NEXT: vpshufb %xmm3, %xmm4, %xmm4
6352 ; AVX-NEXT: vmovddup {{.*#+}} xmm5 = [0,0,0,128,128,1,7,13,0,0,0,128,128,1,7,13]
6353 ; AVX-NEXT: # xmm5 = mem[0,0]
6354 ; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
6355 ; AVX-NEXT: vpshufb %xmm5, %xmm10, %xmm6
6356 ; AVX-NEXT: vpor %xmm4, %xmm6, %xmm4
6357 ; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm4, %ymm0
6358 ; AVX-NEXT: vmovdqa %ymm9, %ymm13
6359 ; AVX-NEXT: vandnps %ymm0, %ymm9, %ymm0
6360 ; AVX-NEXT: vandps {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm4 # 32-byte Folded Reload
6361 ; AVX-NEXT: vorps %ymm0, %ymm4, %ymm0
6362 ; AVX-NEXT: vmovd {{.*#+}} xmm8 = [3,9,15,0,0,0,0,0,0,0,0,0,0,0,0,0]
6363 ; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
6364 ; AVX-NEXT: vpshufb %xmm8, %xmm4, %xmm4
6365 ; AVX-NEXT: vbroadcastss {{.*#+}} xmm9 = [0,1,7,13,0,1,7,13,0,1,7,13,0,1,7,13]
6366 ; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
6367 ; AVX-NEXT: vpshufb %xmm9, %xmm6, %xmm6
6368 ; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm4 = xmm6[0],xmm4[0]
6369 ; AVX-NEXT: vpshufb %xmm3, %xmm7, %xmm6
6370 ; AVX-NEXT: vpshufb %xmm5, %xmm15, %xmm7
6371 ; AVX-NEXT: vmovdqa %xmm15, %xmm8
6372 ; AVX-NEXT: vpor %xmm6, %xmm7, %xmm6
6373 ; AVX-NEXT: vpblendvb %xmm13, %xmm4, %xmm6, %xmm4
6374 ; AVX-NEXT: vmovaps {{.*#+}} ymm6 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0]
6375 ; AVX-NEXT: vandps %ymm6, %ymm0, %ymm0
6376 ; AVX-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm4
6377 ; AVX-NEXT: vandnps %ymm4, %ymm6, %ymm4
6378 ; AVX-NEXT: vmovaps %ymm6, %ymm7
6379 ; AVX-NEXT: vorps %ymm4, %ymm0, %ymm0
6380 ; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6381 ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = zero,zero,zero,xmm12[5,11,u,u,u,u,u,u,u,u,u,u,u]
6382 ; AVX-NEXT: vmovdqa (%rsp), %xmm12 # 16-byte Reload
6383 ; AVX-NEXT: vpshufb {{.*#+}} xmm4 = xmm12[3,9,15],zero,zero,xmm12[u,u,u,u,u,u,u,u,u,u,u]
6384 ; AVX-NEXT: vpor %xmm0, %xmm4, %xmm0
6385 ; AVX-NEXT: vpshufb %xmm3, %xmm11, %xmm4
6386 ; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
6387 ; AVX-NEXT: vpshufb %xmm5, %xmm15, %xmm6
6388 ; AVX-NEXT: vpor %xmm4, %xmm6, %xmm4
6389 ; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm4, %ymm0
6390 ; AVX-NEXT: vpshufb {{.*#+}} xmm4 = xmm1[3,9,15,u,u,u,u,u,u,u,u,u,u,u,u,u]
6391 ; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6392 ; AVX-NEXT: vpshufb %xmm9, %xmm1, %xmm6
6393 ; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm4 = xmm6[0],xmm4[0]
6394 ; AVX-NEXT: vpshufb %xmm3, %xmm2, %xmm3
6395 ; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
6396 ; AVX-NEXT: vpshufb %xmm5, %xmm11, %xmm5
6397 ; AVX-NEXT: vpor %xmm3, %xmm5, %xmm3
6398 ; AVX-NEXT: vpblendvb %xmm13, %xmm4, %xmm3, %xmm3
6399 ; AVX-NEXT: vandnps %ymm0, %ymm13, %ymm0
6400 ; AVX-NEXT: vandps {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm2 # 32-byte Folded Reload
6401 ; AVX-NEXT: vorps %ymm0, %ymm2, %ymm0
6402 ; AVX-NEXT: vandps %ymm7, %ymm0, %ymm0
6403 ; AVX-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm2
6404 ; AVX-NEXT: vandnps %ymm2, %ymm7, %ymm2
6405 ; AVX-NEXT: vorps %ymm2, %ymm0, %ymm0
6406 ; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6407 ; AVX-NEXT: vmovq {{.*#+}} xmm1 = [4,10,128,128,128,0,0,0,0,0,0,0,0,0,0,0]
6408 ; AVX-NEXT: vpshufb %xmm1, %xmm14, %xmm0
6409 ; AVX-NEXT: vmovq {{.*#+}} xmm14 = [128,128,0,6,12,0,0,0,0,0,0,0,0,0,0,0]
6410 ; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
6411 ; AVX-NEXT: vpshufb %xmm14, %xmm13, %xmm2
6412 ; AVX-NEXT: vpor %xmm0, %xmm2, %xmm0
6413 ; AVX-NEXT: vmovddup {{.*#+}} xmm3 = [0,0,128,128,128,2,8,14,0,0,128,128,128,2,8,14]
6414 ; AVX-NEXT: # xmm3 = mem[0,0]
6415 ; AVX-NEXT: vpshufb %xmm3, %xmm10, %xmm2
6416 ; AVX-NEXT: vmovddup {{.*#+}} xmm4 = [0,0,0,6,12,128,128,128,0,0,0,6,12,128,128,128]
6417 ; AVX-NEXT: # xmm4 = mem[0,0]
6418 ; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
6419 ; AVX-NEXT: vpshufb %xmm4, %xmm5, %xmm5
6420 ; AVX-NEXT: vpor %xmm2, %xmm5, %xmm2
6421 ; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm2
6422 ; AVX-NEXT: vmovaps {{.*#+}} ymm10 = [0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
6423 ; AVX-NEXT: vandnps {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm5 # 32-byte Folded Reload
6424 ; AVX-NEXT: vandps %ymm2, %ymm10, %ymm2
6425 ; AVX-NEXT: vorps %ymm5, %ymm2, %ymm2
6426 ; AVX-NEXT: vpshufb %xmm3, %xmm8, %xmm5
6427 ; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6428 ; AVX-NEXT: vpshufb %xmm4, %xmm0, %xmm6
6429 ; AVX-NEXT: vpor %xmm5, %xmm6, %xmm5
6430 ; AVX-NEXT: vbroadcastss {{.*#+}} xmm8 = [4,10,0,0,4,10,0,0,4,10,0,0,4,10,0,0]
6431 ; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
6432 ; AVX-NEXT: vpshufb %xmm8, %xmm14, %xmm6
6433 ; AVX-NEXT: vbroadcastss {{.*#+}} xmm9 = [0,2,8,14,0,2,8,14,0,2,8,14,0,2,8,14]
6434 ; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6435 ; AVX-NEXT: vpshufb %xmm9, %xmm0, %xmm7
6436 ; AVX-NEXT: vpunpckhqdq {{.*#+}} xmm6 = xmm7[1],xmm6[1]
6437 ; AVX-NEXT: vpblendw {{.*#+}} xmm5 = xmm6[0,1,2,3,4],xmm5[5,6,7]
6438 ; AVX-NEXT: vmovaps {{.*#+}} ymm6 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0]
6439 ; AVX-NEXT: vandps %ymm6, %ymm2, %ymm2
6440 ; AVX-NEXT: vinsertf128 $1, %xmm5, %ymm0, %ymm5
6441 ; AVX-NEXT: vandnps %ymm5, %ymm6, %ymm5
6442 ; AVX-NEXT: vmovaps %ymm6, %ymm0
6443 ; AVX-NEXT: vorps %ymm5, %ymm2, %ymm2
6444 ; AVX-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6445 ; AVX-NEXT: vpshufb %xmm1, %xmm12, %xmm5
6446 ; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6447 ; AVX-NEXT: vpshufb {{.*#+}} xmm6 = zero,zero,xmm1[0,6,12,u,u,u,u,u,u,u,u,u,u,u]
6448 ; AVX-NEXT: vpor %xmm5, %xmm6, %xmm5
6449 ; AVX-NEXT: vpshufb %xmm3, %xmm15, %xmm6
6450 ; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6451 ; AVX-NEXT: vpshufb %xmm4, %xmm1, %xmm7
6452 ; AVX-NEXT: vpor %xmm6, %xmm7, %xmm6
6453 ; AVX-NEXT: vinsertf128 $1, %xmm5, %ymm6, %ymm5
6454 ; AVX-NEXT: vandnps {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm6 # 32-byte Folded Reload
6455 ; AVX-NEXT: vandps %ymm5, %ymm10, %ymm5
6456 ; AVX-NEXT: vorps %ymm6, %ymm5, %ymm5
6457 ; AVX-NEXT: vpshufb %xmm3, %xmm11, %xmm3
6458 ; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
6459 ; AVX-NEXT: vpshufb %xmm4, %xmm11, %xmm4
6460 ; AVX-NEXT: vpor %xmm3, %xmm4, %xmm3
6461 ; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
6462 ; AVX-NEXT: vpshufb %xmm8, %xmm12, %xmm4
6463 ; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6464 ; AVX-NEXT: vpshufb %xmm9, %xmm1, %xmm6
6465 ; AVX-NEXT: vpunpckhqdq {{.*#+}} xmm4 = xmm6[1],xmm4[1]
6466 ; AVX-NEXT: vpblendw {{.*#+}} xmm3 = xmm4[0,1,2,3,4],xmm3[5,6,7]
6467 ; AVX-NEXT: vandps %ymm0, %ymm5, %ymm4
6468 ; AVX-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm3
6469 ; AVX-NEXT: vandnps %ymm3, %ymm0, %ymm3
6470 ; AVX-NEXT: vorps %ymm3, %ymm4, %ymm2
6471 ; AVX-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6472 ; AVX-NEXT: vmovq {{.*#+}} xmm15 = [5,11,128,128,128,0,0,0,0,0,0,0,0,0,0,0]
6473 ; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
6474 ; AVX-NEXT: vpshufb %xmm15, %xmm2, %xmm3
6475 ; AVX-NEXT: vmovq {{.*#+}} xmm4 = [128,128,1,7,13,0,0,0,0,0,0,0,0,0,0,0]
6476 ; AVX-NEXT: vpshufb %xmm4, %xmm13, %xmm5
6477 ; AVX-NEXT: vpor %xmm3, %xmm5, %xmm3
6478 ; AVX-NEXT: vmovddup {{.*#+}} xmm5 = [0,0,128,128,128,3,9,15,0,0,128,128,128,3,9,15]
6479 ; AVX-NEXT: # xmm5 = mem[0,0]
6480 ; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
6481 ; AVX-NEXT: vpshufb %xmm5, %xmm2, %xmm7
6482 ; AVX-NEXT: vmovddup {{.*#+}} xmm6 = [0,0,1,7,13,128,128,128,0,0,1,7,13,128,128,128]
6483 ; AVX-NEXT: # xmm6 = mem[0,0]
6484 ; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
6485 ; AVX-NEXT: vpshufb %xmm6, %xmm2, %xmm8
6486 ; AVX-NEXT: vpor %xmm7, %xmm8, %xmm7
6487 ; AVX-NEXT: vinsertf128 $1, %xmm3, %ymm7, %ymm3
6488 ; AVX-NEXT: vandnps {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm7 # 32-byte Folded Reload
6489 ; AVX-NEXT: vandps %ymm3, %ymm10, %ymm3
6490 ; AVX-NEXT: vorps %ymm7, %ymm3, %ymm3
6491 ; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
6492 ; AVX-NEXT: vpshufb %xmm5, %xmm2, %xmm7
6493 ; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6494 ; AVX-NEXT: vpshufb %xmm6, %xmm0, %xmm8
6495 ; AVX-NEXT: vpor %xmm7, %xmm8, %xmm7
6496 ; AVX-NEXT: vbroadcastss {{.*#+}} xmm13 = [5,11,0,0,5,11,0,0,5,11,0,0,5,11,0,0]
6497 ; AVX-NEXT: vpshufb %xmm13, %xmm14, %xmm8
6498 ; AVX-NEXT: vbroadcastss {{.*#+}} xmm14 = [0,3,9,15,0,3,9,15,0,3,9,15,0,3,9,15]
6499 ; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6500 ; AVX-NEXT: vpshufb %xmm14, %xmm0, %xmm9
6501 ; AVX-NEXT: vpunpckhqdq {{.*#+}} xmm8 = xmm9[1],xmm8[1]
6502 ; AVX-NEXT: vpblendw {{.*#+}} xmm7 = xmm8[0,1,2,3,4],xmm7[5,6,7]
6503 ; AVX-NEXT: vmovaps {{.*#+}} ymm2 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0]
6504 ; AVX-NEXT: vandps %ymm2, %ymm3, %ymm3
6505 ; AVX-NEXT: vinsertf128 $1, %xmm7, %ymm0, %ymm7
6506 ; AVX-NEXT: vandnps %ymm7, %ymm2, %ymm7
6507 ; AVX-NEXT: vorps %ymm7, %ymm3, %ymm3
6508 ; AVX-NEXT: vmovdqa (%rsp), %xmm7 # 16-byte Reload
6509 ; AVX-NEXT: vpshufb %xmm15, %xmm7, %xmm7
6510 ; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
6511 ; AVX-NEXT: vpshufb %xmm4, %xmm8, %xmm8
6512 ; AVX-NEXT: vpor %xmm7, %xmm8, %xmm7
6513 ; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
6514 ; AVX-NEXT: vpshufb %xmm5, %xmm4, %xmm8
6515 ; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
6516 ; AVX-NEXT: vpshufb %xmm6, %xmm4, %xmm9
6517 ; AVX-NEXT: vpor %xmm8, %xmm9, %xmm8
6518 ; AVX-NEXT: vinsertf128 $1, %xmm7, %ymm8, %ymm7
6519 ; AVX-NEXT: vandnps {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm8 # 32-byte Folded Reload
6520 ; AVX-NEXT: vandps %ymm7, %ymm10, %ymm0
6521 ; AVX-NEXT: vorps %ymm0, %ymm8, %ymm0
6522 ; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
6523 ; AVX-NEXT: vpshufb %xmm5, %xmm4, %xmm5
6524 ; AVX-NEXT: vpshufb %xmm6, %xmm11, %xmm6
6525 ; AVX-NEXT: vpor %xmm5, %xmm6, %xmm5
6526 ; AVX-NEXT: vpshufb %xmm13, %xmm12, %xmm6
6527 ; AVX-NEXT: vpshufb %xmm14, %xmm1, %xmm7
6528 ; AVX-NEXT: vpunpckhqdq {{.*#+}} xmm6 = xmm7[1],xmm6[1]
6529 ; AVX-NEXT: vpblendw {{.*#+}} xmm5 = xmm6[0,1,2,3,4],xmm5[5,6,7]
6530 ; AVX-NEXT: vandps %ymm2, %ymm0, %ymm0
6531 ; AVX-NEXT: vinsertf128 $1, %xmm5, %ymm0, %ymm5
6532 ; AVX-NEXT: vandnps %ymm5, %ymm2, %ymm1
6533 ; AVX-NEXT: vorps %ymm1, %ymm0, %ymm0
6534 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
6535 ; AVX-NEXT: vmovaps %ymm1, 32(%rsi)
6536 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
6537 ; AVX-NEXT: vmovaps %ymm1, (%rsi)
6538 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
6539 ; AVX-NEXT: vmovaps %ymm1, 32(%rdx)
6540 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
6541 ; AVX-NEXT: vmovaps %ymm1, (%rdx)
6542 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
6543 ; AVX-NEXT: vmovaps %ymm1, 32(%rcx)
6544 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
6545 ; AVX-NEXT: vmovaps %ymm1, (%rcx)
6546 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
6547 ; AVX-NEXT: vmovaps %ymm1, 32(%r8)
6548 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
6549 ; AVX-NEXT: vmovaps %ymm1, (%r8)
6550 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
6551 ; AVX-NEXT: vmovaps %ymm1, 32(%r9)
6552 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
6553 ; AVX-NEXT: vmovaps %ymm1, (%r9)
6554 ; AVX-NEXT: movq {{[0-9]+}}(%rsp), %rax
6555 ; AVX-NEXT: vmovaps %ymm0, 32(%rax)
6556 ; AVX-NEXT: vmovaps %ymm3, (%rax)
6557 ; AVX-NEXT: addq $616, %rsp # imm = 0x268
6558 ; AVX-NEXT: vzeroupper
6561 ; AVX2-LABEL: load_i8_stride6_vf64:
6563 ; AVX2-NEXT: subq $328, %rsp # imm = 0x148
6564 ; AVX2-NEXT: vmovdqa 192(%rdi), %ymm7
6565 ; AVX2-NEXT: vmovdqa (%rdi), %ymm3
6566 ; AVX2-NEXT: vmovdqa 32(%rdi), %ymm5
6567 ; AVX2-NEXT: vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6568 ; AVX2-NEXT: vmovdqa 64(%rdi), %ymm0
6569 ; AVX2-NEXT: vmovdqa 96(%rdi), %ymm1
6570 ; AVX2-NEXT: vpmovsxbw {{.*#+}} ymm13 = [0,65535,0,0,65535,0,0,65535,65535,0,0,65535,0,0,65535,0]
6571 ; AVX2-NEXT: vperm2i128 {{.*#+}} ymm2 = ymm0[0,1],ymm1[0,1]
6572 ; AVX2-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6573 ; AVX2-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
6574 ; AVX2-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6575 ; AVX2-NEXT: vpblendvb %ymm13, %ymm2, %ymm0, %ymm4
6576 ; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm4[u,u,u,u,u,u,u,u,u,u,u,2,8,14,4,10,16,22,28,18,24,30,u,u,u,u,u,u,u,u,u,u]
6577 ; AVX2-NEXT: vpmovsxbw {{.*#+}} ymm1 = [65535,0,0,65535,0,0,65535,0,0,65535,0,0,65535,0,0,65535]
6578 ; AVX2-NEXT: vpblendvb %ymm1, %ymm3, %ymm5, %ymm2
6579 ; AVX2-NEXT: vmovdqa %ymm3, %ymm5
6580 ; AVX2-NEXT: vmovdqu %ymm3, (%rsp) # 32-byte Spill
6581 ; AVX2-NEXT: vmovdqa {{.*#+}} xmm10 = [0,6,12,128,128,128,4,10,128,128,128,u,u,u,u,u]
6582 ; AVX2-NEXT: vpshufb %xmm10, %xmm2, %xmm9
6583 ; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm3
6584 ; AVX2-NEXT: vmovdqa {{.*#+}} xmm11 = [128,128,128,2,8,14,128,128,0,6,12,u,u,u,u,u]
6585 ; AVX2-NEXT: vpshufb %xmm11, %xmm3, %xmm12
6586 ; AVX2-NEXT: vpor %xmm9, %xmm12, %xmm9
6587 ; AVX2-NEXT: vpmovsxdq {{.*#+}} xmm12 = [18446744073709551615,16777215]
6588 ; AVX2-NEXT: vpblendvb %ymm12, %ymm9, %ymm0, %ymm0
6589 ; AVX2-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6590 ; AVX2-NEXT: vmovdqa 224(%rdi), %ymm8
6591 ; AVX2-NEXT: vpblendvb %ymm1, %ymm7, %ymm8, %ymm14
6592 ; AVX2-NEXT: vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6593 ; AVX2-NEXT: vpshufb %xmm10, %xmm14, %xmm0
6594 ; AVX2-NEXT: vextracti128 $1, %ymm14, %xmm15
6595 ; AVX2-NEXT: vpshufb %xmm11, %xmm15, %xmm10
6596 ; AVX2-NEXT: vpor %xmm0, %xmm10, %xmm1
6597 ; AVX2-NEXT: vmovdqa 288(%rdi), %ymm11
6598 ; AVX2-NEXT: vmovdqa 256(%rdi), %ymm0
6599 ; AVX2-NEXT: vperm2i128 {{.*#+}} ymm9 = ymm0[0,1],ymm11[0,1]
6600 ; AVX2-NEXT: vperm2i128 {{.*#+}} ymm11 = ymm0[2,3],ymm11[2,3]
6601 ; AVX2-NEXT: vpblendvb %ymm13, %ymm9, %ymm11, %ymm13
6602 ; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm13[u,u,u,u,u,u,u,u,u,u,u,2,8,14,4,10,16,22,28,18,24,30,u,u,u,u,u,u,u,u,u,u]
6603 ; AVX2-NEXT: vpblendvb %ymm12, %ymm1, %ymm0, %ymm0
6604 ; AVX2-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6605 ; AVX2-NEXT: vmovdqa {{.*#+}} xmm0 = [1,7,13,128,128,128,5,11,128,128,128,u,u,u,u,u]
6606 ; AVX2-NEXT: vpshufb %xmm0, %xmm2, %xmm1
6607 ; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [128,128,128,3,9,15,128,128,1,7,13,u,u,u,u,u]
6608 ; AVX2-NEXT: vpshufb %xmm2, %xmm3, %xmm3
6609 ; AVX2-NEXT: vpor %xmm1, %xmm3, %xmm1
6610 ; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm3 = [1,7,13,3,9,15,5,11,1,7,13,3,9,15,5,11,1,7,13,3,9,15,5,11,1,7,13,3,9,15,5,11]
6611 ; AVX2-NEXT: vpshufb %ymm3, %ymm4, %ymm4
6612 ; AVX2-NEXT: vpblendvb %ymm12, %ymm1, %ymm4, %ymm1
6613 ; AVX2-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6614 ; AVX2-NEXT: vpshufb %xmm0, %xmm14, %xmm0
6615 ; AVX2-NEXT: vpshufb %xmm2, %xmm15, %xmm1
6616 ; AVX2-NEXT: vpor %xmm0, %xmm1, %xmm0
6617 ; AVX2-NEXT: vpshufb %ymm3, %ymm13, %ymm1
6618 ; AVX2-NEXT: vpblendvb %ymm12, %ymm0, %ymm1, %ymm0
6619 ; AVX2-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6620 ; AVX2-NEXT: vpmovsxbw {{.*#+}} ymm13 = [65535,0,0,65535,0,0,65535,0,0,65535,0,0,65535,0,0,65535]
6621 ; AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
6622 ; AVX2-NEXT: vpblendvb %ymm13, %ymm10, %ymm5, %ymm1
6623 ; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
6624 ; AVX2-NEXT: vmovdqa {{.*#+}} xmm6 = [128,128,128,4,10,128,128,128,2,8,14,u,u,u,u,u]
6625 ; AVX2-NEXT: vpshufb %xmm6, %xmm2, %xmm3
6626 ; AVX2-NEXT: vmovdqa {{.*#+}} xmm4 = [2,8,14,128,128,0,6,12,128,128,128,u,u,u,u,u]
6627 ; AVX2-NEXT: vpshufb %xmm4, %xmm1, %xmm5
6628 ; AVX2-NEXT: vpor %xmm3, %xmm5, %xmm5
6629 ; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm14 = [2,8,14,4,10,0,6,12,2,8,14,4,10,0,6,12,2,8,14,4,10,0,6,12,2,8,14,4,10,0,6,12]
6630 ; AVX2-NEXT: vpmovsxbw {{.*#+}} ymm0 = [65535,0,0,65535,0,0,65535,0,0,0,65535,0,0,65535,0,0]
6631 ; AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
6632 ; AVX2-NEXT: vpblendvb %ymm0, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
6633 ; AVX2-NEXT: vpshufb %ymm14, %ymm3, %ymm15
6634 ; AVX2-NEXT: vpblendvb %ymm12, %ymm5, %ymm15, %ymm5
6635 ; AVX2-NEXT: vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6636 ; AVX2-NEXT: vpblendvb %ymm13, %ymm8, %ymm7, %ymm5
6637 ; AVX2-NEXT: vmovdqa %ymm8, %ymm7
6638 ; AVX2-NEXT: vextracti128 $1, %ymm5, %xmm15
6639 ; AVX2-NEXT: vpshufb %xmm6, %xmm15, %xmm6
6640 ; AVX2-NEXT: vpshufb %xmm4, %xmm5, %xmm4
6641 ; AVX2-NEXT: vpor %xmm6, %xmm4, %xmm4
6642 ; AVX2-NEXT: vpblendvb %ymm0, %ymm11, %ymm9, %ymm0
6643 ; AVX2-NEXT: vpshufb %ymm14, %ymm0, %ymm6
6644 ; AVX2-NEXT: vpblendvb %ymm12, %ymm4, %ymm6, %ymm4
6645 ; AVX2-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6646 ; AVX2-NEXT: vmovdqa {{.*#+}} xmm4 = [128,128,128,5,11,128,128,128,3,9,15,u,u,u,u,u]
6647 ; AVX2-NEXT: vpshufb %xmm4, %xmm2, %xmm2
6648 ; AVX2-NEXT: vmovdqa {{.*#+}} xmm6 = [3,9,15,128,128,1,7,13,128,128,128,u,u,u,u,u]
6649 ; AVX2-NEXT: vpshufb %xmm6, %xmm1, %xmm1
6650 ; AVX2-NEXT: vpor %xmm2, %xmm1, %xmm1
6651 ; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [3,9,15,5,11,1,7,13,3,9,15,5,11,1,7,13,3,9,15,5,11,1,7,13,3,9,15,5,11,1,7,13]
6652 ; AVX2-NEXT: vpshufb %ymm2, %ymm3, %ymm3
6653 ; AVX2-NEXT: vpblendvb %ymm12, %ymm1, %ymm3, %ymm1
6654 ; AVX2-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6655 ; AVX2-NEXT: vpshufb %xmm4, %xmm15, %xmm1
6656 ; AVX2-NEXT: vpshufb %xmm6, %xmm5, %xmm3
6657 ; AVX2-NEXT: vpor %xmm1, %xmm3, %xmm1
6658 ; AVX2-NEXT: vpshufb %ymm2, %ymm0, %ymm0
6659 ; AVX2-NEXT: vpblendvb %ymm12, %ymm1, %ymm0, %ymm14
6660 ; AVX2-NEXT: vmovdqa 160(%rdi), %ymm0
6661 ; AVX2-NEXT: vmovdqa 128(%rdi), %ymm3
6662 ; AVX2-NEXT: vpmovsxbw {{.*#+}} ymm2 = [0,65535,0,0,65535,0,0,65535,0,0,65535,0,0,65535,0,0]
6663 ; AVX2-NEXT: vpblendvb %ymm2, %ymm0, %ymm3, %ymm1
6664 ; AVX2-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6665 ; AVX2-NEXT: vpmovsxbw {{.*#+}} ymm1 = [65535,0,0,65535,0,0,65535,0,0,65535,0,0,65535,0,0,65535]
6666 ; AVX2-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm5
6667 ; AVX2-NEXT: vpblendvb %ymm13, %ymm0, %ymm3, %ymm15
6668 ; AVX2-NEXT: vmovdqa 352(%rdi), %ymm4
6669 ; AVX2-NEXT: vmovdqa 320(%rdi), %ymm6
6670 ; AVX2-NEXT: vpblendvb %ymm1, %ymm6, %ymm4, %ymm1
6671 ; AVX2-NEXT: vpblendvb %ymm13, %ymm4, %ymm6, %ymm12
6672 ; AVX2-NEXT: vpblendvb %ymm2, %ymm4, %ymm6, %ymm0
6673 ; AVX2-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6674 ; AVX2-NEXT: vmovdqu (%rsp), %ymm0 # 32-byte Reload
6675 ; AVX2-NEXT: vpblendvb %ymm2, %ymm10, %ymm0, %ymm8
6676 ; AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
6677 ; AVX2-NEXT: vpblendvb %ymm2, %ymm7, %ymm0, %ymm10
6678 ; AVX2-NEXT: vpmovsxbw {{.*#+}} ymm2 = [0,65535,0,0,65535,0,0,65535,65535,0,0,65535,0,0,65535,0]
6679 ; AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
6680 ; AVX2-NEXT: vpblendvb %ymm2, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm13 # 32-byte Folded Reload
6681 ; AVX2-NEXT: vpblendvb %ymm2, %ymm11, %ymm9, %ymm0
6682 ; AVX2-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6683 ; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [u,u,u,u,u,0,6,12,128,128,128,4,10,128,128,128]
6684 ; AVX2-NEXT: vpshufb %xmm2, %xmm5, %xmm6
6685 ; AVX2-NEXT: vextracti128 $1, %ymm5, %xmm11
6686 ; AVX2-NEXT: vmovdqa {{.*#+}} xmm0 = [u,u,u,u,u,128,128,128,2,8,14,128,128,0,6,12]
6687 ; AVX2-NEXT: vpshufb %xmm0, %xmm11, %xmm9
6688 ; AVX2-NEXT: vpor %xmm6, %xmm9, %xmm6
6689 ; AVX2-NEXT: vinserti128 $1, %xmm6, %ymm0, %ymm6
6690 ; AVX2-NEXT: vpmovsxwd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm9
6691 ; AVX2-NEXT: vpblendvb %ymm9, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm3 # 32-byte Folded Reload
6692 ; AVX2-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6693 ; AVX2-NEXT: vpshufb %xmm2, %xmm1, %xmm2
6694 ; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm6
6695 ; AVX2-NEXT: vpshufb %xmm0, %xmm6, %xmm0
6696 ; AVX2-NEXT: vpor %xmm2, %xmm0, %xmm0
6697 ; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
6698 ; AVX2-NEXT: vpblendvb %ymm9, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
6699 ; AVX2-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6700 ; AVX2-NEXT: vmovdqa {{.*#+}} xmm0 = [u,u,u,u,u,1,7,13,128,128,128,5,11,128,128,128]
6701 ; AVX2-NEXT: vpshufb %xmm0, %xmm5, %xmm2
6702 ; AVX2-NEXT: vmovdqa {{.*#+}} xmm5 = [u,u,u,u,u,128,128,128,3,9,15,128,128,1,7,13]
6703 ; AVX2-NEXT: vpshufb %xmm5, %xmm11, %xmm11
6704 ; AVX2-NEXT: vpor %xmm2, %xmm11, %xmm2
6705 ; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
6706 ; AVX2-NEXT: vpblendvb %ymm9, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
6707 ; AVX2-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6708 ; AVX2-NEXT: vpshufb %xmm0, %xmm1, %xmm0
6709 ; AVX2-NEXT: vpshufb %xmm5, %xmm6, %xmm1
6710 ; AVX2-NEXT: vpor %xmm0, %xmm1, %xmm0
6711 ; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
6712 ; AVX2-NEXT: vpblendvb %ymm9, %ymm14, %ymm0, %ymm0
6713 ; AVX2-NEXT: vmovdqu %ymm0, (%rsp) # 32-byte Spill
6714 ; AVX2-NEXT: vextracti128 $1, %ymm15, %xmm14
6715 ; AVX2-NEXT: vmovdqa {{.*#+}} xmm7 = [u,u,u,u,u,128,128,128,4,10,128,128,128,2,8,14]
6716 ; AVX2-NEXT: vpshufb %xmm7, %xmm14, %xmm0
6717 ; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [u,u,u,u,u,2,8,14,128,128,0,6,12,128,128,128]
6718 ; AVX2-NEXT: vpshufb %xmm2, %xmm15, %xmm1
6719 ; AVX2-NEXT: vpor %xmm0, %xmm1, %xmm1
6720 ; AVX2-NEXT: vextracti128 $1, %ymm8, %xmm3
6721 ; AVX2-NEXT: vmovdqa {{.*#+}} xmm5 = [128,128,0,6,12,128,128,128,4,10,u,u,u,u,u,u]
6722 ; AVX2-NEXT: vpshufb %xmm5, %xmm3, %xmm6
6723 ; AVX2-NEXT: vmovdqa {{.*#+}} xmm4 = [4,10,128,128,128,2,8,14,128,128,u,u,u,u,u,u]
6724 ; AVX2-NEXT: vpshufb %xmm4, %xmm8, %xmm11
6725 ; AVX2-NEXT: vpor %xmm6, %xmm11, %xmm6
6726 ; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm11 = [4,10,0,6,12,2,8,14,4,10,0,6,12,2,8,14,4,10,0,6,12,2,8,14,4,10,0,6,12,2,8,14]
6727 ; AVX2-NEXT: vpshufb %ymm11, %ymm13, %ymm0
6728 ; AVX2-NEXT: vpblendw {{.*#+}} xmm6 = xmm6[0,1,2,3,4],xmm0[5,6,7]
6729 ; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm6[0,1,2,3],ymm0[4,5,6,7]
6730 ; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
6731 ; AVX2-NEXT: vpblendvb %ymm9, %ymm0, %ymm1, %ymm0
6732 ; AVX2-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6733 ; AVX2-NEXT: vextracti128 $1, %ymm12, %xmm1
6734 ; AVX2-NEXT: vpshufb %xmm7, %xmm1, %xmm0
6735 ; AVX2-NEXT: vpshufb %xmm2, %xmm12, %xmm2
6736 ; AVX2-NEXT: vpor %xmm0, %xmm2, %xmm0
6737 ; AVX2-NEXT: vextracti128 $1, %ymm10, %xmm2
6738 ; AVX2-NEXT: vpshufb %xmm5, %xmm2, %xmm5
6739 ; AVX2-NEXT: vpshufb %xmm4, %xmm10, %xmm4
6740 ; AVX2-NEXT: vpor %xmm5, %xmm4, %xmm4
6741 ; AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
6742 ; AVX2-NEXT: vpshufb %ymm11, %ymm6, %ymm5
6743 ; AVX2-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3,4],xmm5[5,6,7]
6744 ; AVX2-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm5[4,5,6,7]
6745 ; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
6746 ; AVX2-NEXT: vpblendvb %ymm9, %ymm4, %ymm0, %ymm5
6747 ; AVX2-NEXT: vmovdqa {{.*#+}} xmm0 = [u,u,u,u,u,128,128,128,5,11,128,128,128,3,9,15]
6748 ; AVX2-NEXT: vpshufb %xmm0, %xmm14, %xmm4
6749 ; AVX2-NEXT: vmovdqa {{.*#+}} xmm7 = [u,u,u,u,u,3,9,15,128,128,1,7,13,128,128,128]
6750 ; AVX2-NEXT: vpshufb %xmm7, %xmm15, %xmm11
6751 ; AVX2-NEXT: vpor %xmm4, %xmm11, %xmm4
6752 ; AVX2-NEXT: vmovdqa {{.*#+}} xmm11 = [128,128,1,7,13,128,128,128,5,11,u,u,u,u,u,u]
6753 ; AVX2-NEXT: vpshufb %xmm11, %xmm3, %xmm3
6754 ; AVX2-NEXT: vmovdqa {{.*#+}} xmm14 = [5,11,128,128,128,3,9,15,128,128,u,u,u,u,u,u]
6755 ; AVX2-NEXT: vpshufb %xmm14, %xmm8, %xmm8
6756 ; AVX2-NEXT: vpor %xmm3, %xmm8, %xmm3
6757 ; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm8 = [5,11,1,7,13,3,9,15,5,11,1,7,13,3,9,15,5,11,1,7,13,3,9,15,5,11,1,7,13,3,9,15]
6758 ; AVX2-NEXT: vpshufb %ymm8, %ymm13, %ymm13
6759 ; AVX2-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3,4],xmm13[5,6,7]
6760 ; AVX2-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm13[4,5,6,7]
6761 ; AVX2-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4
6762 ; AVX2-NEXT: vpblendvb %ymm9, %ymm3, %ymm4, %ymm4
6763 ; AVX2-NEXT: vpshufb %xmm0, %xmm1, %xmm0
6764 ; AVX2-NEXT: vpshufb %xmm7, %xmm12, %xmm1
6765 ; AVX2-NEXT: vpor %xmm0, %xmm1, %xmm0
6766 ; AVX2-NEXT: vpshufb %xmm11, %xmm2, %xmm1
6767 ; AVX2-NEXT: vpshufb %xmm14, %xmm10, %xmm2
6768 ; AVX2-NEXT: vpor %xmm1, %xmm2, %xmm1
6769 ; AVX2-NEXT: vpshufb %ymm8, %ymm6, %ymm2
6770 ; AVX2-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm2[5,6,7]
6771 ; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
6772 ; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
6773 ; AVX2-NEXT: vpblendvb %ymm9, %ymm1, %ymm0, %ymm3
6774 ; AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
6775 ; AVX2-NEXT: vextracti128 $1, %ymm9, %xmm0
6776 ; AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [u,u,u,u,u,u,128,128,0,6,12,128,128,128,4,10]
6777 ; AVX2-NEXT: vpshufb %xmm1, %xmm0, %xmm2
6778 ; AVX2-NEXT: vmovdqa {{.*#+}} xmm7 = [u,u,u,u,u,u,4,10,128,128,128,2,8,14,128,128]
6779 ; AVX2-NEXT: vpshufb %xmm7, %xmm9, %xmm8
6780 ; AVX2-NEXT: vmovdqa %ymm9, %ymm10
6781 ; AVX2-NEXT: vpor %xmm2, %xmm8, %xmm2
6782 ; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
6783 ; AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
6784 ; AVX2-NEXT: vpblendw {{.*#+}} ymm2 = ymm8[0,1,2],ymm2[3,4,5,6,7],ymm8[8,9,10],ymm2[11,12,13,14,15]
6785 ; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm8[0,1,2,3],ymm2[4,5,6,7]
6786 ; AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
6787 ; AVX2-NEXT: vextracti128 $1, %ymm9, %xmm8
6788 ; AVX2-NEXT: vpshufb %xmm1, %xmm8, %xmm1
6789 ; AVX2-NEXT: vpshufb %xmm7, %xmm9, %xmm7
6790 ; AVX2-NEXT: vmovdqa %ymm9, %ymm11
6791 ; AVX2-NEXT: vpor %xmm1, %xmm7, %xmm1
6792 ; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
6793 ; AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
6794 ; AVX2-NEXT: vpblendw {{.*#+}} ymm1 = ymm7[0,1,2],ymm1[3,4,5,6,7],ymm7[8,9,10],ymm1[11,12,13,14,15]
6795 ; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm7[0,1,2,3],ymm1[4,5,6,7]
6796 ; AVX2-NEXT: vmovdqa {{.*#+}} xmm7 = [u,u,u,u,u,u,128,128,1,7,13,128,128,128,5,11]
6797 ; AVX2-NEXT: vpshufb %xmm7, %xmm0, %xmm0
6798 ; AVX2-NEXT: vmovdqa {{.*#+}} xmm9 = [u,u,u,u,u,u,5,11,128,128,128,3,9,15,128,128]
6799 ; AVX2-NEXT: vpshufb %xmm9, %xmm10, %xmm10
6800 ; AVX2-NEXT: vpor %xmm0, %xmm10, %xmm0
6801 ; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
6802 ; AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
6803 ; AVX2-NEXT: vpblendw {{.*#+}} ymm0 = ymm10[0,1,2],ymm0[3,4,5,6,7],ymm10[8,9,10],ymm0[11,12,13,14,15]
6804 ; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm10[0,1,2,3],ymm0[4,5,6,7]
6805 ; AVX2-NEXT: vpshufb %xmm7, %xmm8, %xmm7
6806 ; AVX2-NEXT: vpshufb %xmm9, %xmm11, %xmm8
6807 ; AVX2-NEXT: vpor %xmm7, %xmm8, %xmm7
6808 ; AVX2-NEXT: vinserti128 $1, %xmm7, %ymm0, %ymm7
6809 ; AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
6810 ; AVX2-NEXT: vpblendw {{.*#+}} ymm7 = ymm8[0,1,2],ymm7[3,4,5,6,7],ymm8[8,9,10],ymm7[11,12,13,14,15]
6811 ; AVX2-NEXT: vpblendd {{.*#+}} ymm7 = ymm8[0,1,2,3],ymm7[4,5,6,7]
6812 ; AVX2-NEXT: vmovdqa %ymm1, 32(%rsi)
6813 ; AVX2-NEXT: vmovdqa %ymm2, (%rsi)
6814 ; AVX2-NEXT: vmovdqa %ymm7, 32(%rdx)
6815 ; AVX2-NEXT: vmovdqa %ymm0, (%rdx)
6816 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
6817 ; AVX2-NEXT: vmovaps %ymm0, 32(%rcx)
6818 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
6819 ; AVX2-NEXT: vmovaps %ymm0, (%rcx)
6820 ; AVX2-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
6821 ; AVX2-NEXT: vmovaps %ymm0, 32(%r8)
6822 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
6823 ; AVX2-NEXT: vmovaps %ymm0, (%r8)
6824 ; AVX2-NEXT: vmovdqa %ymm5, 32(%r9)
6825 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
6826 ; AVX2-NEXT: vmovaps %ymm0, (%r9)
6827 ; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax
6828 ; AVX2-NEXT: vmovdqa %ymm3, 32(%rax)
6829 ; AVX2-NEXT: vmovdqa %ymm4, (%rax)
6830 ; AVX2-NEXT: addq $328, %rsp # imm = 0x148
6831 ; AVX2-NEXT: vzeroupper
6834 ; AVX2-FP-LABEL: load_i8_stride6_vf64:
6836 ; AVX2-FP-NEXT: subq $328, %rsp # imm = 0x148
6837 ; AVX2-FP-NEXT: vmovdqa 192(%rdi), %ymm7
6838 ; AVX2-FP-NEXT: vmovdqa (%rdi), %ymm3
6839 ; AVX2-FP-NEXT: vmovdqa 32(%rdi), %ymm5
6840 ; AVX2-FP-NEXT: vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6841 ; AVX2-FP-NEXT: vmovdqa 64(%rdi), %ymm0
6842 ; AVX2-FP-NEXT: vmovdqa 96(%rdi), %ymm1
6843 ; AVX2-FP-NEXT: vpmovsxbw {{.*#+}} ymm13 = [0,65535,0,0,65535,0,0,65535,65535,0,0,65535,0,0,65535,0]
6844 ; AVX2-FP-NEXT: vperm2i128 {{.*#+}} ymm2 = ymm0[0,1],ymm1[0,1]
6845 ; AVX2-FP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6846 ; AVX2-FP-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
6847 ; AVX2-FP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6848 ; AVX2-FP-NEXT: vpblendvb %ymm13, %ymm2, %ymm0, %ymm4
6849 ; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm0 = ymm4[u,u,u,u,u,u,u,u,u,u,u,2,8,14,4,10,16,22,28,18,24,30,u,u,u,u,u,u,u,u,u,u]
6850 ; AVX2-FP-NEXT: vpmovsxbw {{.*#+}} ymm1 = [65535,0,0,65535,0,0,65535,0,0,65535,0,0,65535,0,0,65535]
6851 ; AVX2-FP-NEXT: vpblendvb %ymm1, %ymm3, %ymm5, %ymm2
6852 ; AVX2-FP-NEXT: vmovdqa %ymm3, %ymm5
6853 ; AVX2-FP-NEXT: vmovdqu %ymm3, (%rsp) # 32-byte Spill
6854 ; AVX2-FP-NEXT: vmovdqa {{.*#+}} xmm10 = [0,6,12,128,128,128,4,10,128,128,128,u,u,u,u,u]
6855 ; AVX2-FP-NEXT: vpshufb %xmm10, %xmm2, %xmm9
6856 ; AVX2-FP-NEXT: vextracti128 $1, %ymm2, %xmm3
6857 ; AVX2-FP-NEXT: vmovdqa {{.*#+}} xmm11 = [128,128,128,2,8,14,128,128,0,6,12,u,u,u,u,u]
6858 ; AVX2-FP-NEXT: vpshufb %xmm11, %xmm3, %xmm12
6859 ; AVX2-FP-NEXT: vpor %xmm9, %xmm12, %xmm9
6860 ; AVX2-FP-NEXT: vpmovsxdq {{.*#+}} xmm12 = [18446744073709551615,16777215]
6861 ; AVX2-FP-NEXT: vpblendvb %ymm12, %ymm9, %ymm0, %ymm0
6862 ; AVX2-FP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6863 ; AVX2-FP-NEXT: vmovdqa 224(%rdi), %ymm8
6864 ; AVX2-FP-NEXT: vpblendvb %ymm1, %ymm7, %ymm8, %ymm14
6865 ; AVX2-FP-NEXT: vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6866 ; AVX2-FP-NEXT: vpshufb %xmm10, %xmm14, %xmm0
6867 ; AVX2-FP-NEXT: vextracti128 $1, %ymm14, %xmm15
6868 ; AVX2-FP-NEXT: vpshufb %xmm11, %xmm15, %xmm10
6869 ; AVX2-FP-NEXT: vpor %xmm0, %xmm10, %xmm1
6870 ; AVX2-FP-NEXT: vmovdqa 288(%rdi), %ymm11
6871 ; AVX2-FP-NEXT: vmovdqa 256(%rdi), %ymm0
6872 ; AVX2-FP-NEXT: vperm2i128 {{.*#+}} ymm9 = ymm0[0,1],ymm11[0,1]
6873 ; AVX2-FP-NEXT: vperm2i128 {{.*#+}} ymm11 = ymm0[2,3],ymm11[2,3]
6874 ; AVX2-FP-NEXT: vpblendvb %ymm13, %ymm9, %ymm11, %ymm13
6875 ; AVX2-FP-NEXT: vpshufb {{.*#+}} ymm0 = ymm13[u,u,u,u,u,u,u,u,u,u,u,2,8,14,4,10,16,22,28,18,24,30,u,u,u,u,u,u,u,u,u,u]
6876 ; AVX2-FP-NEXT: vpblendvb %ymm12, %ymm1, %ymm0, %ymm0
6877 ; AVX2-FP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6878 ; AVX2-FP-NEXT: vmovdqa {{.*#+}} xmm0 = [1,7,13,128,128,128,5,11,128,128,128,u,u,u,u,u]
6879 ; AVX2-FP-NEXT: vpshufb %xmm0, %xmm2, %xmm1
6880 ; AVX2-FP-NEXT: vmovdqa {{.*#+}} xmm2 = [128,128,128,3,9,15,128,128,1,7,13,u,u,u,u,u]
6881 ; AVX2-FP-NEXT: vpshufb %xmm2, %xmm3, %xmm3
6882 ; AVX2-FP-NEXT: vpor %xmm1, %xmm3, %xmm1
6883 ; AVX2-FP-NEXT: vpbroadcastq {{.*#+}} ymm3 = [1,7,13,3,9,15,5,11,1,7,13,3,9,15,5,11,1,7,13,3,9,15,5,11,1,7,13,3,9,15,5,11]
6884 ; AVX2-FP-NEXT: vpshufb %ymm3, %ymm4, %ymm4
6885 ; AVX2-FP-NEXT: vpblendvb %ymm12, %ymm1, %ymm4, %ymm1
6886 ; AVX2-FP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6887 ; AVX2-FP-NEXT: vpshufb %xmm0, %xmm14, %xmm0
6888 ; AVX2-FP-NEXT: vpshufb %xmm2, %xmm15, %xmm1
6889 ; AVX2-FP-NEXT: vpor %xmm0, %xmm1, %xmm0
6890 ; AVX2-FP-NEXT: vpshufb %ymm3, %ymm13, %ymm1
6891 ; AVX2-FP-NEXT: vpblendvb %ymm12, %ymm0, %ymm1, %ymm0
6892 ; AVX2-FP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6893 ; AVX2-FP-NEXT: vpmovsxbw {{.*#+}} ymm13 = [65535,0,0,65535,0,0,65535,0,0,65535,0,0,65535,0,0,65535]
6894 ; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
6895 ; AVX2-FP-NEXT: vpblendvb %ymm13, %ymm10, %ymm5, %ymm1
6896 ; AVX2-FP-NEXT: vextracti128 $1, %ymm1, %xmm2
6897 ; AVX2-FP-NEXT: vmovdqa {{.*#+}} xmm6 = [128,128,128,4,10,128,128,128,2,8,14,u,u,u,u,u]
6898 ; AVX2-FP-NEXT: vpshufb %xmm6, %xmm2, %xmm3
6899 ; AVX2-FP-NEXT: vmovdqa {{.*#+}} xmm4 = [2,8,14,128,128,0,6,12,128,128,128,u,u,u,u,u]
6900 ; AVX2-FP-NEXT: vpshufb %xmm4, %xmm1, %xmm5
6901 ; AVX2-FP-NEXT: vpor %xmm3, %xmm5, %xmm5
6902 ; AVX2-FP-NEXT: vpbroadcastq {{.*#+}} ymm14 = [2,8,14,4,10,0,6,12,2,8,14,4,10,0,6,12,2,8,14,4,10,0,6,12,2,8,14,4,10,0,6,12]
6903 ; AVX2-FP-NEXT: vpmovsxbw {{.*#+}} ymm0 = [65535,0,0,65535,0,0,65535,0,0,0,65535,0,0,65535,0,0]
6904 ; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
6905 ; AVX2-FP-NEXT: vpblendvb %ymm0, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
6906 ; AVX2-FP-NEXT: vpshufb %ymm14, %ymm3, %ymm15
6907 ; AVX2-FP-NEXT: vpblendvb %ymm12, %ymm5, %ymm15, %ymm5
6908 ; AVX2-FP-NEXT: vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6909 ; AVX2-FP-NEXT: vpblendvb %ymm13, %ymm8, %ymm7, %ymm5
6910 ; AVX2-FP-NEXT: vmovdqa %ymm8, %ymm7
6911 ; AVX2-FP-NEXT: vextracti128 $1, %ymm5, %xmm15
6912 ; AVX2-FP-NEXT: vpshufb %xmm6, %xmm15, %xmm6
6913 ; AVX2-FP-NEXT: vpshufb %xmm4, %xmm5, %xmm4
6914 ; AVX2-FP-NEXT: vpor %xmm6, %xmm4, %xmm4
6915 ; AVX2-FP-NEXT: vpblendvb %ymm0, %ymm11, %ymm9, %ymm0
6916 ; AVX2-FP-NEXT: vpshufb %ymm14, %ymm0, %ymm6
6917 ; AVX2-FP-NEXT: vpblendvb %ymm12, %ymm4, %ymm6, %ymm4
6918 ; AVX2-FP-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6919 ; AVX2-FP-NEXT: vmovdqa {{.*#+}} xmm4 = [128,128,128,5,11,128,128,128,3,9,15,u,u,u,u,u]
6920 ; AVX2-FP-NEXT: vpshufb %xmm4, %xmm2, %xmm2
6921 ; AVX2-FP-NEXT: vmovdqa {{.*#+}} xmm6 = [3,9,15,128,128,1,7,13,128,128,128,u,u,u,u,u]
6922 ; AVX2-FP-NEXT: vpshufb %xmm6, %xmm1, %xmm1
6923 ; AVX2-FP-NEXT: vpor %xmm2, %xmm1, %xmm1
6924 ; AVX2-FP-NEXT: vpbroadcastq {{.*#+}} ymm2 = [3,9,15,5,11,1,7,13,3,9,15,5,11,1,7,13,3,9,15,5,11,1,7,13,3,9,15,5,11,1,7,13]
6925 ; AVX2-FP-NEXT: vpshufb %ymm2, %ymm3, %ymm3
6926 ; AVX2-FP-NEXT: vpblendvb %ymm12, %ymm1, %ymm3, %ymm1
6927 ; AVX2-FP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6928 ; AVX2-FP-NEXT: vpshufb %xmm4, %xmm15, %xmm1
6929 ; AVX2-FP-NEXT: vpshufb %xmm6, %xmm5, %xmm3
6930 ; AVX2-FP-NEXT: vpor %xmm1, %xmm3, %xmm1
6931 ; AVX2-FP-NEXT: vpshufb %ymm2, %ymm0, %ymm0
6932 ; AVX2-FP-NEXT: vpblendvb %ymm12, %ymm1, %ymm0, %ymm14
6933 ; AVX2-FP-NEXT: vmovdqa 160(%rdi), %ymm0
6934 ; AVX2-FP-NEXT: vmovdqa 128(%rdi), %ymm3
6935 ; AVX2-FP-NEXT: vpmovsxbw {{.*#+}} ymm2 = [0,65535,0,0,65535,0,0,65535,0,0,65535,0,0,65535,0,0]
6936 ; AVX2-FP-NEXT: vpblendvb %ymm2, %ymm0, %ymm3, %ymm1
6937 ; AVX2-FP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6938 ; AVX2-FP-NEXT: vpmovsxbw {{.*#+}} ymm1 = [65535,0,0,65535,0,0,65535,0,0,65535,0,0,65535,0,0,65535]
6939 ; AVX2-FP-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm5
6940 ; AVX2-FP-NEXT: vpblendvb %ymm13, %ymm0, %ymm3, %ymm15
6941 ; AVX2-FP-NEXT: vmovdqa 352(%rdi), %ymm4
6942 ; AVX2-FP-NEXT: vmovdqa 320(%rdi), %ymm6
6943 ; AVX2-FP-NEXT: vpblendvb %ymm1, %ymm6, %ymm4, %ymm1
6944 ; AVX2-FP-NEXT: vpblendvb %ymm13, %ymm4, %ymm6, %ymm12
6945 ; AVX2-FP-NEXT: vpblendvb %ymm2, %ymm4, %ymm6, %ymm0
6946 ; AVX2-FP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6947 ; AVX2-FP-NEXT: vmovdqu (%rsp), %ymm0 # 32-byte Reload
6948 ; AVX2-FP-NEXT: vpblendvb %ymm2, %ymm10, %ymm0, %ymm8
6949 ; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
6950 ; AVX2-FP-NEXT: vpblendvb %ymm2, %ymm7, %ymm0, %ymm10
6951 ; AVX2-FP-NEXT: vpmovsxbw {{.*#+}} ymm2 = [0,65535,0,0,65535,0,0,65535,65535,0,0,65535,0,0,65535,0]
6952 ; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
6953 ; AVX2-FP-NEXT: vpblendvb %ymm2, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm13 # 32-byte Folded Reload
6954 ; AVX2-FP-NEXT: vpblendvb %ymm2, %ymm11, %ymm9, %ymm0
6955 ; AVX2-FP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6956 ; AVX2-FP-NEXT: vmovdqa {{.*#+}} xmm2 = [u,u,u,u,u,0,6,12,128,128,128,4,10,128,128,128]
6957 ; AVX2-FP-NEXT: vpshufb %xmm2, %xmm5, %xmm6
6958 ; AVX2-FP-NEXT: vextracti128 $1, %ymm5, %xmm11
6959 ; AVX2-FP-NEXT: vmovdqa {{.*#+}} xmm0 = [u,u,u,u,u,128,128,128,2,8,14,128,128,0,6,12]
6960 ; AVX2-FP-NEXT: vpshufb %xmm0, %xmm11, %xmm9
6961 ; AVX2-FP-NEXT: vpor %xmm6, %xmm9, %xmm6
6962 ; AVX2-FP-NEXT: vinserti128 $1, %xmm6, %ymm0, %ymm6
6963 ; AVX2-FP-NEXT: vpmovsxwd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm9
6964 ; AVX2-FP-NEXT: vpblendvb %ymm9, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm3 # 32-byte Folded Reload
6965 ; AVX2-FP-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6966 ; AVX2-FP-NEXT: vpshufb %xmm2, %xmm1, %xmm2
6967 ; AVX2-FP-NEXT: vextracti128 $1, %ymm1, %xmm6
6968 ; AVX2-FP-NEXT: vpshufb %xmm0, %xmm6, %xmm0
6969 ; AVX2-FP-NEXT: vpor %xmm2, %xmm0, %xmm0
6970 ; AVX2-FP-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
6971 ; AVX2-FP-NEXT: vpblendvb %ymm9, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
6972 ; AVX2-FP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6973 ; AVX2-FP-NEXT: vmovdqa {{.*#+}} xmm0 = [u,u,u,u,u,1,7,13,128,128,128,5,11,128,128,128]
6974 ; AVX2-FP-NEXT: vpshufb %xmm0, %xmm5, %xmm2
6975 ; AVX2-FP-NEXT: vmovdqa {{.*#+}} xmm5 = [u,u,u,u,u,128,128,128,3,9,15,128,128,1,7,13]
6976 ; AVX2-FP-NEXT: vpshufb %xmm5, %xmm11, %xmm11
6977 ; AVX2-FP-NEXT: vpor %xmm2, %xmm11, %xmm2
6978 ; AVX2-FP-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
6979 ; AVX2-FP-NEXT: vpblendvb %ymm9, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
6980 ; AVX2-FP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6981 ; AVX2-FP-NEXT: vpshufb %xmm0, %xmm1, %xmm0
6982 ; AVX2-FP-NEXT: vpshufb %xmm5, %xmm6, %xmm1
6983 ; AVX2-FP-NEXT: vpor %xmm0, %xmm1, %xmm0
6984 ; AVX2-FP-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
6985 ; AVX2-FP-NEXT: vpblendvb %ymm9, %ymm14, %ymm0, %ymm0
6986 ; AVX2-FP-NEXT: vmovdqu %ymm0, (%rsp) # 32-byte Spill
6987 ; AVX2-FP-NEXT: vextracti128 $1, %ymm15, %xmm14
6988 ; AVX2-FP-NEXT: vmovdqa {{.*#+}} xmm7 = [u,u,u,u,u,128,128,128,4,10,128,128,128,2,8,14]
6989 ; AVX2-FP-NEXT: vpshufb %xmm7, %xmm14, %xmm0
6990 ; AVX2-FP-NEXT: vmovdqa {{.*#+}} xmm2 = [u,u,u,u,u,2,8,14,128,128,0,6,12,128,128,128]
6991 ; AVX2-FP-NEXT: vpshufb %xmm2, %xmm15, %xmm1
6992 ; AVX2-FP-NEXT: vpor %xmm0, %xmm1, %xmm1
6993 ; AVX2-FP-NEXT: vextracti128 $1, %ymm8, %xmm3
6994 ; AVX2-FP-NEXT: vmovdqa {{.*#+}} xmm5 = [128,128,0,6,12,128,128,128,4,10,u,u,u,u,u,u]
6995 ; AVX2-FP-NEXT: vpshufb %xmm5, %xmm3, %xmm6
6996 ; AVX2-FP-NEXT: vmovdqa {{.*#+}} xmm4 = [4,10,128,128,128,2,8,14,128,128,u,u,u,u,u,u]
6997 ; AVX2-FP-NEXT: vpshufb %xmm4, %xmm8, %xmm11
6998 ; AVX2-FP-NEXT: vpor %xmm6, %xmm11, %xmm6
6999 ; AVX2-FP-NEXT: vpbroadcastq {{.*#+}} ymm11 = [4,10,0,6,12,2,8,14,4,10,0,6,12,2,8,14,4,10,0,6,12,2,8,14,4,10,0,6,12,2,8,14]
7000 ; AVX2-FP-NEXT: vpshufb %ymm11, %ymm13, %ymm0
7001 ; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm6 = xmm6[0,1,2,3,4],xmm0[5,6,7]
7002 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm6[0,1,2,3],ymm0[4,5,6,7]
7003 ; AVX2-FP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
7004 ; AVX2-FP-NEXT: vpblendvb %ymm9, %ymm0, %ymm1, %ymm0
7005 ; AVX2-FP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7006 ; AVX2-FP-NEXT: vextracti128 $1, %ymm12, %xmm1
7007 ; AVX2-FP-NEXT: vpshufb %xmm7, %xmm1, %xmm0
7008 ; AVX2-FP-NEXT: vpshufb %xmm2, %xmm12, %xmm2
7009 ; AVX2-FP-NEXT: vpor %xmm0, %xmm2, %xmm0
7010 ; AVX2-FP-NEXT: vextracti128 $1, %ymm10, %xmm2
7011 ; AVX2-FP-NEXT: vpshufb %xmm5, %xmm2, %xmm5
7012 ; AVX2-FP-NEXT: vpshufb %xmm4, %xmm10, %xmm4
7013 ; AVX2-FP-NEXT: vpor %xmm5, %xmm4, %xmm4
7014 ; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
7015 ; AVX2-FP-NEXT: vpshufb %ymm11, %ymm6, %ymm5
7016 ; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3,4],xmm5[5,6,7]
7017 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm5[4,5,6,7]
7018 ; AVX2-FP-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
7019 ; AVX2-FP-NEXT: vpblendvb %ymm9, %ymm4, %ymm0, %ymm5
7020 ; AVX2-FP-NEXT: vmovdqa {{.*#+}} xmm0 = [u,u,u,u,u,128,128,128,5,11,128,128,128,3,9,15]
7021 ; AVX2-FP-NEXT: vpshufb %xmm0, %xmm14, %xmm4
7022 ; AVX2-FP-NEXT: vmovdqa {{.*#+}} xmm7 = [u,u,u,u,u,3,9,15,128,128,1,7,13,128,128,128]
7023 ; AVX2-FP-NEXT: vpshufb %xmm7, %xmm15, %xmm11
7024 ; AVX2-FP-NEXT: vpor %xmm4, %xmm11, %xmm4
7025 ; AVX2-FP-NEXT: vmovdqa {{.*#+}} xmm11 = [128,128,1,7,13,128,128,128,5,11,u,u,u,u,u,u]
7026 ; AVX2-FP-NEXT: vpshufb %xmm11, %xmm3, %xmm3
7027 ; AVX2-FP-NEXT: vmovdqa {{.*#+}} xmm14 = [5,11,128,128,128,3,9,15,128,128,u,u,u,u,u,u]
7028 ; AVX2-FP-NEXT: vpshufb %xmm14, %xmm8, %xmm8
7029 ; AVX2-FP-NEXT: vpor %xmm3, %xmm8, %xmm3
7030 ; AVX2-FP-NEXT: vpbroadcastq {{.*#+}} ymm8 = [5,11,1,7,13,3,9,15,5,11,1,7,13,3,9,15,5,11,1,7,13,3,9,15,5,11,1,7,13,3,9,15]
7031 ; AVX2-FP-NEXT: vpshufb %ymm8, %ymm13, %ymm13
7032 ; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3,4],xmm13[5,6,7]
7033 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm13[4,5,6,7]
7034 ; AVX2-FP-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4
7035 ; AVX2-FP-NEXT: vpblendvb %ymm9, %ymm3, %ymm4, %ymm4
7036 ; AVX2-FP-NEXT: vpshufb %xmm0, %xmm1, %xmm0
7037 ; AVX2-FP-NEXT: vpshufb %xmm7, %xmm12, %xmm1
7038 ; AVX2-FP-NEXT: vpor %xmm0, %xmm1, %xmm0
7039 ; AVX2-FP-NEXT: vpshufb %xmm11, %xmm2, %xmm1
7040 ; AVX2-FP-NEXT: vpshufb %xmm14, %xmm10, %xmm2
7041 ; AVX2-FP-NEXT: vpor %xmm1, %xmm2, %xmm1
7042 ; AVX2-FP-NEXT: vpshufb %ymm8, %ymm6, %ymm2
7043 ; AVX2-FP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm2[5,6,7]
7044 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
7045 ; AVX2-FP-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
7046 ; AVX2-FP-NEXT: vpblendvb %ymm9, %ymm1, %ymm0, %ymm3
7047 ; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
7048 ; AVX2-FP-NEXT: vextracti128 $1, %ymm9, %xmm0
7049 ; AVX2-FP-NEXT: vmovdqa {{.*#+}} xmm1 = [u,u,u,u,u,u,128,128,0,6,12,128,128,128,4,10]
7050 ; AVX2-FP-NEXT: vpshufb %xmm1, %xmm0, %xmm2
7051 ; AVX2-FP-NEXT: vmovdqa {{.*#+}} xmm7 = [u,u,u,u,u,u,4,10,128,128,128,2,8,14,128,128]
7052 ; AVX2-FP-NEXT: vpshufb %xmm7, %xmm9, %xmm8
7053 ; AVX2-FP-NEXT: vmovdqa %ymm9, %ymm10
7054 ; AVX2-FP-NEXT: vpor %xmm2, %xmm8, %xmm2
7055 ; AVX2-FP-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
7056 ; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
7057 ; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm2 = ymm8[0,1,2],ymm2[3,4,5,6,7],ymm8[8,9,10],ymm2[11,12,13,14,15]
7058 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm2 = ymm8[0,1,2,3],ymm2[4,5,6,7]
7059 ; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
7060 ; AVX2-FP-NEXT: vextracti128 $1, %ymm9, %xmm8
7061 ; AVX2-FP-NEXT: vpshufb %xmm1, %xmm8, %xmm1
7062 ; AVX2-FP-NEXT: vpshufb %xmm7, %xmm9, %xmm7
7063 ; AVX2-FP-NEXT: vmovdqa %ymm9, %ymm11
7064 ; AVX2-FP-NEXT: vpor %xmm1, %xmm7, %xmm1
7065 ; AVX2-FP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
7066 ; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
7067 ; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm1 = ymm7[0,1,2],ymm1[3,4,5,6,7],ymm7[8,9,10],ymm1[11,12,13,14,15]
7068 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm1 = ymm7[0,1,2,3],ymm1[4,5,6,7]
7069 ; AVX2-FP-NEXT: vmovdqa {{.*#+}} xmm7 = [u,u,u,u,u,u,128,128,1,7,13,128,128,128,5,11]
7070 ; AVX2-FP-NEXT: vpshufb %xmm7, %xmm0, %xmm0
7071 ; AVX2-FP-NEXT: vmovdqa {{.*#+}} xmm9 = [u,u,u,u,u,u,5,11,128,128,128,3,9,15,128,128]
7072 ; AVX2-FP-NEXT: vpshufb %xmm9, %xmm10, %xmm10
7073 ; AVX2-FP-NEXT: vpor %xmm0, %xmm10, %xmm0
7074 ; AVX2-FP-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
7075 ; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
7076 ; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm0 = ymm10[0,1,2],ymm0[3,4,5,6,7],ymm10[8,9,10],ymm0[11,12,13,14,15]
7077 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm0 = ymm10[0,1,2,3],ymm0[4,5,6,7]
7078 ; AVX2-FP-NEXT: vpshufb %xmm7, %xmm8, %xmm7
7079 ; AVX2-FP-NEXT: vpshufb %xmm9, %xmm11, %xmm8
7080 ; AVX2-FP-NEXT: vpor %xmm7, %xmm8, %xmm7
7081 ; AVX2-FP-NEXT: vinserti128 $1, %xmm7, %ymm0, %ymm7
7082 ; AVX2-FP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
7083 ; AVX2-FP-NEXT: vpblendw {{.*#+}} ymm7 = ymm8[0,1,2],ymm7[3,4,5,6,7],ymm8[8,9,10],ymm7[11,12,13,14,15]
7084 ; AVX2-FP-NEXT: vpblendd {{.*#+}} ymm7 = ymm8[0,1,2,3],ymm7[4,5,6,7]
7085 ; AVX2-FP-NEXT: vmovdqa %ymm1, 32(%rsi)
7086 ; AVX2-FP-NEXT: vmovdqa %ymm2, (%rsi)
7087 ; AVX2-FP-NEXT: vmovdqa %ymm7, 32(%rdx)
7088 ; AVX2-FP-NEXT: vmovdqa %ymm0, (%rdx)
7089 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
7090 ; AVX2-FP-NEXT: vmovaps %ymm0, 32(%rcx)
7091 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
7092 ; AVX2-FP-NEXT: vmovaps %ymm0, (%rcx)
7093 ; AVX2-FP-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
7094 ; AVX2-FP-NEXT: vmovaps %ymm0, 32(%r8)
7095 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
7096 ; AVX2-FP-NEXT: vmovaps %ymm0, (%r8)
7097 ; AVX2-FP-NEXT: vmovdqa %ymm5, 32(%r9)
7098 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
7099 ; AVX2-FP-NEXT: vmovaps %ymm0, (%r9)
7100 ; AVX2-FP-NEXT: movq {{[0-9]+}}(%rsp), %rax
7101 ; AVX2-FP-NEXT: vmovdqa %ymm3, 32(%rax)
7102 ; AVX2-FP-NEXT: vmovdqa %ymm4, (%rax)
7103 ; AVX2-FP-NEXT: addq $328, %rsp # imm = 0x148
7104 ; AVX2-FP-NEXT: vzeroupper
7105 ; AVX2-FP-NEXT: retq
7107 ; AVX2-FCP-LABEL: load_i8_stride6_vf64:
7108 ; AVX2-FCP: # %bb.0:
7109 ; AVX2-FCP-NEXT: subq $328, %rsp # imm = 0x148
7110 ; AVX2-FCP-NEXT: vmovdqa 192(%rdi), %ymm7
7111 ; AVX2-FCP-NEXT: vmovdqa (%rdi), %ymm3
7112 ; AVX2-FCP-NEXT: vmovdqa 32(%rdi), %ymm5
7113 ; AVX2-FCP-NEXT: vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7114 ; AVX2-FCP-NEXT: vmovdqa 64(%rdi), %ymm0
7115 ; AVX2-FCP-NEXT: vmovdqa 96(%rdi), %ymm1
7116 ; AVX2-FCP-NEXT: vpmovsxbw {{.*#+}} ymm13 = [0,65535,0,0,65535,0,0,65535,65535,0,0,65535,0,0,65535,0]
7117 ; AVX2-FCP-NEXT: vperm2i128 {{.*#+}} ymm2 = ymm0[0,1],ymm1[0,1]
7118 ; AVX2-FCP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7119 ; AVX2-FCP-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
7120 ; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7121 ; AVX2-FCP-NEXT: vpblendvb %ymm13, %ymm2, %ymm0, %ymm4
7122 ; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm4[u,u,u,u,u,u,u,u,u,u,u,2,8,14,4,10,16,22,28,18,24,30,u,u,u,u,u,u,u,u,u,u]
7123 ; AVX2-FCP-NEXT: vpmovsxbw {{.*#+}} ymm1 = [65535,0,0,65535,0,0,65535,0,0,65535,0,0,65535,0,0,65535]
7124 ; AVX2-FCP-NEXT: vpblendvb %ymm1, %ymm3, %ymm5, %ymm2
7125 ; AVX2-FCP-NEXT: vmovdqa %ymm3, %ymm5
7126 ; AVX2-FCP-NEXT: vmovdqu %ymm3, (%rsp) # 32-byte Spill
7127 ; AVX2-FCP-NEXT: vmovdqa {{.*#+}} xmm10 = [0,6,12,128,128,128,4,10,128,128,128,u,u,u,u,u]
7128 ; AVX2-FCP-NEXT: vpshufb %xmm10, %xmm2, %xmm9
7129 ; AVX2-FCP-NEXT: vextracti128 $1, %ymm2, %xmm3
7130 ; AVX2-FCP-NEXT: vmovdqa {{.*#+}} xmm11 = [128,128,128,2,8,14,128,128,0,6,12,u,u,u,u,u]
7131 ; AVX2-FCP-NEXT: vpshufb %xmm11, %xmm3, %xmm12
7132 ; AVX2-FCP-NEXT: vpor %xmm9, %xmm12, %xmm9
7133 ; AVX2-FCP-NEXT: vpmovsxdq {{.*#+}} xmm12 = [18446744073709551615,16777215]
7134 ; AVX2-FCP-NEXT: vpblendvb %ymm12, %ymm9, %ymm0, %ymm0
7135 ; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7136 ; AVX2-FCP-NEXT: vmovdqa 224(%rdi), %ymm8
7137 ; AVX2-FCP-NEXT: vpblendvb %ymm1, %ymm7, %ymm8, %ymm14
7138 ; AVX2-FCP-NEXT: vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7139 ; AVX2-FCP-NEXT: vpshufb %xmm10, %xmm14, %xmm0
7140 ; AVX2-FCP-NEXT: vextracti128 $1, %ymm14, %xmm15
7141 ; AVX2-FCP-NEXT: vpshufb %xmm11, %xmm15, %xmm10
7142 ; AVX2-FCP-NEXT: vpor %xmm0, %xmm10, %xmm1
7143 ; AVX2-FCP-NEXT: vmovdqa 288(%rdi), %ymm11
7144 ; AVX2-FCP-NEXT: vmovdqa 256(%rdi), %ymm0
7145 ; AVX2-FCP-NEXT: vperm2i128 {{.*#+}} ymm9 = ymm0[0,1],ymm11[0,1]
7146 ; AVX2-FCP-NEXT: vperm2i128 {{.*#+}} ymm11 = ymm0[2,3],ymm11[2,3]
7147 ; AVX2-FCP-NEXT: vpblendvb %ymm13, %ymm9, %ymm11, %ymm13
7148 ; AVX2-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm13[u,u,u,u,u,u,u,u,u,u,u,2,8,14,4,10,16,22,28,18,24,30,u,u,u,u,u,u,u,u,u,u]
7149 ; AVX2-FCP-NEXT: vpblendvb %ymm12, %ymm1, %ymm0, %ymm0
7150 ; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7151 ; AVX2-FCP-NEXT: vmovdqa {{.*#+}} xmm0 = [1,7,13,128,128,128,5,11,128,128,128,u,u,u,u,u]
7152 ; AVX2-FCP-NEXT: vpshufb %xmm0, %xmm2, %xmm1
7153 ; AVX2-FCP-NEXT: vmovdqa {{.*#+}} xmm2 = [128,128,128,3,9,15,128,128,1,7,13,u,u,u,u,u]
7154 ; AVX2-FCP-NEXT: vpshufb %xmm2, %xmm3, %xmm3
7155 ; AVX2-FCP-NEXT: vpor %xmm1, %xmm3, %xmm1
7156 ; AVX2-FCP-NEXT: vpbroadcastq {{.*#+}} ymm3 = [1,7,13,3,9,15,5,11,1,7,13,3,9,15,5,11,1,7,13,3,9,15,5,11,1,7,13,3,9,15,5,11]
7157 ; AVX2-FCP-NEXT: vpshufb %ymm3, %ymm4, %ymm4
7158 ; AVX2-FCP-NEXT: vpblendvb %ymm12, %ymm1, %ymm4, %ymm1
7159 ; AVX2-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7160 ; AVX2-FCP-NEXT: vpshufb %xmm0, %xmm14, %xmm0
7161 ; AVX2-FCP-NEXT: vpshufb %xmm2, %xmm15, %xmm1
7162 ; AVX2-FCP-NEXT: vpor %xmm0, %xmm1, %xmm0
7163 ; AVX2-FCP-NEXT: vpshufb %ymm3, %ymm13, %ymm1
7164 ; AVX2-FCP-NEXT: vpblendvb %ymm12, %ymm0, %ymm1, %ymm0
7165 ; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7166 ; AVX2-FCP-NEXT: vpmovsxbw {{.*#+}} ymm13 = [65535,0,0,65535,0,0,65535,0,0,65535,0,0,65535,0,0,65535]
7167 ; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
7168 ; AVX2-FCP-NEXT: vpblendvb %ymm13, %ymm10, %ymm5, %ymm1
7169 ; AVX2-FCP-NEXT: vextracti128 $1, %ymm1, %xmm2
7170 ; AVX2-FCP-NEXT: vmovdqa {{.*#+}} xmm6 = [128,128,128,4,10,128,128,128,2,8,14,u,u,u,u,u]
7171 ; AVX2-FCP-NEXT: vpshufb %xmm6, %xmm2, %xmm3
7172 ; AVX2-FCP-NEXT: vmovdqa {{.*#+}} xmm4 = [2,8,14,128,128,0,6,12,128,128,128,u,u,u,u,u]
7173 ; AVX2-FCP-NEXT: vpshufb %xmm4, %xmm1, %xmm5
7174 ; AVX2-FCP-NEXT: vpor %xmm3, %xmm5, %xmm5
7175 ; AVX2-FCP-NEXT: vpbroadcastq {{.*#+}} ymm14 = [2,8,14,4,10,0,6,12,2,8,14,4,10,0,6,12,2,8,14,4,10,0,6,12,2,8,14,4,10,0,6,12]
7176 ; AVX2-FCP-NEXT: vpmovsxbw {{.*#+}} ymm0 = [65535,0,0,65535,0,0,65535,0,0,0,65535,0,0,65535,0,0]
7177 ; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
7178 ; AVX2-FCP-NEXT: vpblendvb %ymm0, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
7179 ; AVX2-FCP-NEXT: vpshufb %ymm14, %ymm3, %ymm15
7180 ; AVX2-FCP-NEXT: vpblendvb %ymm12, %ymm5, %ymm15, %ymm5
7181 ; AVX2-FCP-NEXT: vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7182 ; AVX2-FCP-NEXT: vpblendvb %ymm13, %ymm8, %ymm7, %ymm5
7183 ; AVX2-FCP-NEXT: vmovdqa %ymm8, %ymm7
7184 ; AVX2-FCP-NEXT: vextracti128 $1, %ymm5, %xmm15
7185 ; AVX2-FCP-NEXT: vpshufb %xmm6, %xmm15, %xmm6
7186 ; AVX2-FCP-NEXT: vpshufb %xmm4, %xmm5, %xmm4
7187 ; AVX2-FCP-NEXT: vpor %xmm6, %xmm4, %xmm4
7188 ; AVX2-FCP-NEXT: vpblendvb %ymm0, %ymm11, %ymm9, %ymm0
7189 ; AVX2-FCP-NEXT: vpshufb %ymm14, %ymm0, %ymm6
7190 ; AVX2-FCP-NEXT: vpblendvb %ymm12, %ymm4, %ymm6, %ymm4
7191 ; AVX2-FCP-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7192 ; AVX2-FCP-NEXT: vmovdqa {{.*#+}} xmm4 = [128,128,128,5,11,128,128,128,3,9,15,u,u,u,u,u]
7193 ; AVX2-FCP-NEXT: vpshufb %xmm4, %xmm2, %xmm2
7194 ; AVX2-FCP-NEXT: vmovdqa {{.*#+}} xmm6 = [3,9,15,128,128,1,7,13,128,128,128,u,u,u,u,u]
7195 ; AVX2-FCP-NEXT: vpshufb %xmm6, %xmm1, %xmm1
7196 ; AVX2-FCP-NEXT: vpor %xmm2, %xmm1, %xmm1
7197 ; AVX2-FCP-NEXT: vpbroadcastq {{.*#+}} ymm2 = [3,9,15,5,11,1,7,13,3,9,15,5,11,1,7,13,3,9,15,5,11,1,7,13,3,9,15,5,11,1,7,13]
7198 ; AVX2-FCP-NEXT: vpshufb %ymm2, %ymm3, %ymm3
7199 ; AVX2-FCP-NEXT: vpblendvb %ymm12, %ymm1, %ymm3, %ymm1
7200 ; AVX2-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7201 ; AVX2-FCP-NEXT: vpshufb %xmm4, %xmm15, %xmm1
7202 ; AVX2-FCP-NEXT: vpshufb %xmm6, %xmm5, %xmm3
7203 ; AVX2-FCP-NEXT: vpor %xmm1, %xmm3, %xmm1
7204 ; AVX2-FCP-NEXT: vpshufb %ymm2, %ymm0, %ymm0
7205 ; AVX2-FCP-NEXT: vpblendvb %ymm12, %ymm1, %ymm0, %ymm14
7206 ; AVX2-FCP-NEXT: vmovdqa 160(%rdi), %ymm0
7207 ; AVX2-FCP-NEXT: vmovdqa 128(%rdi), %ymm3
7208 ; AVX2-FCP-NEXT: vpmovsxbw {{.*#+}} ymm2 = [0,65535,0,0,65535,0,0,65535,0,0,65535,0,0,65535,0,0]
7209 ; AVX2-FCP-NEXT: vpblendvb %ymm2, %ymm0, %ymm3, %ymm1
7210 ; AVX2-FCP-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7211 ; AVX2-FCP-NEXT: vpmovsxbw {{.*#+}} ymm1 = [65535,0,0,65535,0,0,65535,0,0,65535,0,0,65535,0,0,65535]
7212 ; AVX2-FCP-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm5
7213 ; AVX2-FCP-NEXT: vpblendvb %ymm13, %ymm0, %ymm3, %ymm15
7214 ; AVX2-FCP-NEXT: vmovdqa 352(%rdi), %ymm4
7215 ; AVX2-FCP-NEXT: vmovdqa 320(%rdi), %ymm6
7216 ; AVX2-FCP-NEXT: vpblendvb %ymm1, %ymm6, %ymm4, %ymm1
7217 ; AVX2-FCP-NEXT: vpblendvb %ymm13, %ymm4, %ymm6, %ymm12
7218 ; AVX2-FCP-NEXT: vpblendvb %ymm2, %ymm4, %ymm6, %ymm0
7219 ; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7220 ; AVX2-FCP-NEXT: vmovdqu (%rsp), %ymm0 # 32-byte Reload
7221 ; AVX2-FCP-NEXT: vpblendvb %ymm2, %ymm10, %ymm0, %ymm8
7222 ; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
7223 ; AVX2-FCP-NEXT: vpblendvb %ymm2, %ymm7, %ymm0, %ymm10
7224 ; AVX2-FCP-NEXT: vpmovsxbw {{.*#+}} ymm2 = [0,65535,0,0,65535,0,0,65535,65535,0,0,65535,0,0,65535,0]
7225 ; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
7226 ; AVX2-FCP-NEXT: vpblendvb %ymm2, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm13 # 32-byte Folded Reload
7227 ; AVX2-FCP-NEXT: vpblendvb %ymm2, %ymm11, %ymm9, %ymm0
7228 ; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7229 ; AVX2-FCP-NEXT: vmovdqa {{.*#+}} xmm2 = [u,u,u,u,u,0,6,12,128,128,128,4,10,128,128,128]
7230 ; AVX2-FCP-NEXT: vpshufb %xmm2, %xmm5, %xmm6
7231 ; AVX2-FCP-NEXT: vextracti128 $1, %ymm5, %xmm11
7232 ; AVX2-FCP-NEXT: vmovdqa {{.*#+}} xmm0 = [u,u,u,u,u,128,128,128,2,8,14,128,128,0,6,12]
7233 ; AVX2-FCP-NEXT: vpshufb %xmm0, %xmm11, %xmm9
7234 ; AVX2-FCP-NEXT: vpor %xmm6, %xmm9, %xmm6
7235 ; AVX2-FCP-NEXT: vinserti128 $1, %xmm6, %ymm0, %ymm6
7236 ; AVX2-FCP-NEXT: vpmovsxwd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm9
7237 ; AVX2-FCP-NEXT: vpblendvb %ymm9, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm3 # 32-byte Folded Reload
7238 ; AVX2-FCP-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7239 ; AVX2-FCP-NEXT: vpshufb %xmm2, %xmm1, %xmm2
7240 ; AVX2-FCP-NEXT: vextracti128 $1, %ymm1, %xmm6
7241 ; AVX2-FCP-NEXT: vpshufb %xmm0, %xmm6, %xmm0
7242 ; AVX2-FCP-NEXT: vpor %xmm2, %xmm0, %xmm0
7243 ; AVX2-FCP-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
7244 ; AVX2-FCP-NEXT: vpblendvb %ymm9, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
7245 ; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7246 ; AVX2-FCP-NEXT: vmovdqa {{.*#+}} xmm0 = [u,u,u,u,u,1,7,13,128,128,128,5,11,128,128,128]
7247 ; AVX2-FCP-NEXT: vpshufb %xmm0, %xmm5, %xmm2
7248 ; AVX2-FCP-NEXT: vmovdqa {{.*#+}} xmm5 = [u,u,u,u,u,128,128,128,3,9,15,128,128,1,7,13]
7249 ; AVX2-FCP-NEXT: vpshufb %xmm5, %xmm11, %xmm11
7250 ; AVX2-FCP-NEXT: vpor %xmm2, %xmm11, %xmm2
7251 ; AVX2-FCP-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
7252 ; AVX2-FCP-NEXT: vpblendvb %ymm9, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
7253 ; AVX2-FCP-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7254 ; AVX2-FCP-NEXT: vpshufb %xmm0, %xmm1, %xmm0
7255 ; AVX2-FCP-NEXT: vpshufb %xmm5, %xmm6, %xmm1
7256 ; AVX2-FCP-NEXT: vpor %xmm0, %xmm1, %xmm0
7257 ; AVX2-FCP-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
7258 ; AVX2-FCP-NEXT: vpblendvb %ymm9, %ymm14, %ymm0, %ymm0
7259 ; AVX2-FCP-NEXT: vmovdqu %ymm0, (%rsp) # 32-byte Spill
7260 ; AVX2-FCP-NEXT: vextracti128 $1, %ymm15, %xmm14
7261 ; AVX2-FCP-NEXT: vmovdqa {{.*#+}} xmm7 = [u,u,u,u,u,128,128,128,4,10,128,128,128,2,8,14]
7262 ; AVX2-FCP-NEXT: vpshufb %xmm7, %xmm14, %xmm0
7263 ; AVX2-FCP-NEXT: vmovdqa {{.*#+}} xmm2 = [u,u,u,u,u,2,8,14,128,128,0,6,12,128,128,128]
7264 ; AVX2-FCP-NEXT: vpshufb %xmm2, %xmm15, %xmm1
7265 ; AVX2-FCP-NEXT: vpor %xmm0, %xmm1, %xmm1
7266 ; AVX2-FCP-NEXT: vextracti128 $1, %ymm8, %xmm3
7267 ; AVX2-FCP-NEXT: vmovdqa {{.*#+}} xmm5 = [128,128,0,6,12,128,128,128,4,10,u,u,u,u,u,u]
7268 ; AVX2-FCP-NEXT: vpshufb %xmm5, %xmm3, %xmm6
7269 ; AVX2-FCP-NEXT: vmovdqa {{.*#+}} xmm4 = [4,10,128,128,128,2,8,14,128,128,u,u,u,u,u,u]
7270 ; AVX2-FCP-NEXT: vpshufb %xmm4, %xmm8, %xmm11
7271 ; AVX2-FCP-NEXT: vpor %xmm6, %xmm11, %xmm6
7272 ; AVX2-FCP-NEXT: vpbroadcastq {{.*#+}} ymm11 = [4,10,0,6,12,2,8,14,4,10,0,6,12,2,8,14,4,10,0,6,12,2,8,14,4,10,0,6,12,2,8,14]
7273 ; AVX2-FCP-NEXT: vpshufb %ymm11, %ymm13, %ymm0
7274 ; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm6 = xmm6[0,1,2,3,4],xmm0[5,6,7]
7275 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm6[0,1,2,3],ymm0[4,5,6,7]
7276 ; AVX2-FCP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
7277 ; AVX2-FCP-NEXT: vpblendvb %ymm9, %ymm0, %ymm1, %ymm0
7278 ; AVX2-FCP-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7279 ; AVX2-FCP-NEXT: vextracti128 $1, %ymm12, %xmm1
7280 ; AVX2-FCP-NEXT: vpshufb %xmm7, %xmm1, %xmm0
7281 ; AVX2-FCP-NEXT: vpshufb %xmm2, %xmm12, %xmm2
7282 ; AVX2-FCP-NEXT: vpor %xmm0, %xmm2, %xmm0
7283 ; AVX2-FCP-NEXT: vextracti128 $1, %ymm10, %xmm2
7284 ; AVX2-FCP-NEXT: vpshufb %xmm5, %xmm2, %xmm5
7285 ; AVX2-FCP-NEXT: vpshufb %xmm4, %xmm10, %xmm4
7286 ; AVX2-FCP-NEXT: vpor %xmm5, %xmm4, %xmm4
7287 ; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
7288 ; AVX2-FCP-NEXT: vpshufb %ymm11, %ymm6, %ymm5
7289 ; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3,4],xmm5[5,6,7]
7290 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm5[4,5,6,7]
7291 ; AVX2-FCP-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
7292 ; AVX2-FCP-NEXT: vpblendvb %ymm9, %ymm4, %ymm0, %ymm5
7293 ; AVX2-FCP-NEXT: vmovdqa {{.*#+}} xmm0 = [u,u,u,u,u,128,128,128,5,11,128,128,128,3,9,15]
7294 ; AVX2-FCP-NEXT: vpshufb %xmm0, %xmm14, %xmm4
7295 ; AVX2-FCP-NEXT: vmovdqa {{.*#+}} xmm7 = [u,u,u,u,u,3,9,15,128,128,1,7,13,128,128,128]
7296 ; AVX2-FCP-NEXT: vpshufb %xmm7, %xmm15, %xmm11
7297 ; AVX2-FCP-NEXT: vpor %xmm4, %xmm11, %xmm4
7298 ; AVX2-FCP-NEXT: vmovdqa {{.*#+}} xmm11 = [128,128,1,7,13,128,128,128,5,11,u,u,u,u,u,u]
7299 ; AVX2-FCP-NEXT: vpshufb %xmm11, %xmm3, %xmm3
7300 ; AVX2-FCP-NEXT: vmovdqa {{.*#+}} xmm14 = [5,11,128,128,128,3,9,15,128,128,u,u,u,u,u,u]
7301 ; AVX2-FCP-NEXT: vpshufb %xmm14, %xmm8, %xmm8
7302 ; AVX2-FCP-NEXT: vpor %xmm3, %xmm8, %xmm3
7303 ; AVX2-FCP-NEXT: vpbroadcastq {{.*#+}} ymm8 = [5,11,1,7,13,3,9,15,5,11,1,7,13,3,9,15,5,11,1,7,13,3,9,15,5,11,1,7,13,3,9,15]
7304 ; AVX2-FCP-NEXT: vpshufb %ymm8, %ymm13, %ymm13
7305 ; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3,4],xmm13[5,6,7]
7306 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm13[4,5,6,7]
7307 ; AVX2-FCP-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4
7308 ; AVX2-FCP-NEXT: vpblendvb %ymm9, %ymm3, %ymm4, %ymm4
7309 ; AVX2-FCP-NEXT: vpshufb %xmm0, %xmm1, %xmm0
7310 ; AVX2-FCP-NEXT: vpshufb %xmm7, %xmm12, %xmm1
7311 ; AVX2-FCP-NEXT: vpor %xmm0, %xmm1, %xmm0
7312 ; AVX2-FCP-NEXT: vpshufb %xmm11, %xmm2, %xmm1
7313 ; AVX2-FCP-NEXT: vpshufb %xmm14, %xmm10, %xmm2
7314 ; AVX2-FCP-NEXT: vpor %xmm1, %xmm2, %xmm1
7315 ; AVX2-FCP-NEXT: vpshufb %ymm8, %ymm6, %ymm2
7316 ; AVX2-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm2[5,6,7]
7317 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
7318 ; AVX2-FCP-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
7319 ; AVX2-FCP-NEXT: vpblendvb %ymm9, %ymm1, %ymm0, %ymm3
7320 ; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
7321 ; AVX2-FCP-NEXT: vextracti128 $1, %ymm9, %xmm0
7322 ; AVX2-FCP-NEXT: vmovdqa {{.*#+}} xmm1 = [u,u,u,u,u,u,128,128,0,6,12,128,128,128,4,10]
7323 ; AVX2-FCP-NEXT: vpshufb %xmm1, %xmm0, %xmm2
7324 ; AVX2-FCP-NEXT: vmovdqa {{.*#+}} xmm7 = [u,u,u,u,u,u,4,10,128,128,128,2,8,14,128,128]
7325 ; AVX2-FCP-NEXT: vpshufb %xmm7, %xmm9, %xmm8
7326 ; AVX2-FCP-NEXT: vmovdqa %ymm9, %ymm10
7327 ; AVX2-FCP-NEXT: vpor %xmm2, %xmm8, %xmm2
7328 ; AVX2-FCP-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
7329 ; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
7330 ; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm2 = ymm8[0,1,2],ymm2[3,4,5,6,7],ymm8[8,9,10],ymm2[11,12,13,14,15]
7331 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm8[0,1,2,3],ymm2[4,5,6,7]
7332 ; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
7333 ; AVX2-FCP-NEXT: vextracti128 $1, %ymm9, %xmm8
7334 ; AVX2-FCP-NEXT: vpshufb %xmm1, %xmm8, %xmm1
7335 ; AVX2-FCP-NEXT: vpshufb %xmm7, %xmm9, %xmm7
7336 ; AVX2-FCP-NEXT: vmovdqa %ymm9, %ymm11
7337 ; AVX2-FCP-NEXT: vpor %xmm1, %xmm7, %xmm1
7338 ; AVX2-FCP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
7339 ; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
7340 ; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm7[0,1,2],ymm1[3,4,5,6,7],ymm7[8,9,10],ymm1[11,12,13,14,15]
7341 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm7[0,1,2,3],ymm1[4,5,6,7]
7342 ; AVX2-FCP-NEXT: vmovdqa {{.*#+}} xmm7 = [u,u,u,u,u,u,128,128,1,7,13,128,128,128,5,11]
7343 ; AVX2-FCP-NEXT: vpshufb %xmm7, %xmm0, %xmm0
7344 ; AVX2-FCP-NEXT: vmovdqa {{.*#+}} xmm9 = [u,u,u,u,u,u,5,11,128,128,128,3,9,15,128,128]
7345 ; AVX2-FCP-NEXT: vpshufb %xmm9, %xmm10, %xmm10
7346 ; AVX2-FCP-NEXT: vpor %xmm0, %xmm10, %xmm0
7347 ; AVX2-FCP-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
7348 ; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
7349 ; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm10[0,1,2],ymm0[3,4,5,6,7],ymm10[8,9,10],ymm0[11,12,13,14,15]
7350 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm10[0,1,2,3],ymm0[4,5,6,7]
7351 ; AVX2-FCP-NEXT: vpshufb %xmm7, %xmm8, %xmm7
7352 ; AVX2-FCP-NEXT: vpshufb %xmm9, %xmm11, %xmm8
7353 ; AVX2-FCP-NEXT: vpor %xmm7, %xmm8, %xmm7
7354 ; AVX2-FCP-NEXT: vinserti128 $1, %xmm7, %ymm0, %ymm7
7355 ; AVX2-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
7356 ; AVX2-FCP-NEXT: vpblendw {{.*#+}} ymm7 = ymm8[0,1,2],ymm7[3,4,5,6,7],ymm8[8,9,10],ymm7[11,12,13,14,15]
7357 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} ymm7 = ymm8[0,1,2,3],ymm7[4,5,6,7]
7358 ; AVX2-FCP-NEXT: vmovdqa %ymm1, 32(%rsi)
7359 ; AVX2-FCP-NEXT: vmovdqa %ymm2, (%rsi)
7360 ; AVX2-FCP-NEXT: vmovdqa %ymm7, 32(%rdx)
7361 ; AVX2-FCP-NEXT: vmovdqa %ymm0, (%rdx)
7362 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
7363 ; AVX2-FCP-NEXT: vmovaps %ymm0, 32(%rcx)
7364 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
7365 ; AVX2-FCP-NEXT: vmovaps %ymm0, (%rcx)
7366 ; AVX2-FCP-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
7367 ; AVX2-FCP-NEXT: vmovaps %ymm0, 32(%r8)
7368 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
7369 ; AVX2-FCP-NEXT: vmovaps %ymm0, (%r8)
7370 ; AVX2-FCP-NEXT: vmovdqa %ymm5, 32(%r9)
7371 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
7372 ; AVX2-FCP-NEXT: vmovaps %ymm0, (%r9)
7373 ; AVX2-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
7374 ; AVX2-FCP-NEXT: vmovdqa %ymm3, 32(%rax)
7375 ; AVX2-FCP-NEXT: vmovdqa %ymm4, (%rax)
7376 ; AVX2-FCP-NEXT: addq $328, %rsp # imm = 0x148
7377 ; AVX2-FCP-NEXT: vzeroupper
7378 ; AVX2-FCP-NEXT: retq
7380 ; AVX512-LABEL: load_i8_stride6_vf64:
7382 ; AVX512-NEXT: subq $40, %rsp
7383 ; AVX512-NEXT: vmovdqa {{.*#+}} xmm1 = [0,6,12,128,128,128,4,10,128,128,128,u,u,u,u,u]
7384 ; AVX512-NEXT: vmovdqa {{.*#+}} ymm12 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535]
7385 ; AVX512-NEXT: vmovdqa64 224(%rdi), %ymm25
7386 ; AVX512-NEXT: vmovdqa64 192(%rdi), %ymm26
7387 ; AVX512-NEXT: vmovdqa %ymm12, %ymm0
7388 ; AVX512-NEXT: vpternlogq $202, %ymm25, %ymm26, %ymm0
7389 ; AVX512-NEXT: vpshufb %xmm1, %xmm0, %xmm3
7390 ; AVX512-NEXT: vmovdqa {{.*#+}} xmm5 = [128,128,128,2,8,14,128,128,0,6,12,u,u,u,u,u]
7391 ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm4
7392 ; AVX512-NEXT: vpshufb %xmm5, %xmm4, %xmm6
7393 ; AVX512-NEXT: vpor %xmm3, %xmm6, %xmm9
7394 ; AVX512-NEXT: vmovdqa64 (%rdi), %ymm30
7395 ; AVX512-NEXT: vmovdqa64 32(%rdi), %ymm31
7396 ; AVX512-NEXT: vmovdqa64 128(%rdi), %ymm24
7397 ; AVX512-NEXT: vmovdqa64 160(%rdi), %ymm18
7398 ; AVX512-NEXT: vmovdqa %ymm12, %ymm6
7399 ; AVX512-NEXT: vpternlogq $202, %ymm24, %ymm18, %ymm6
7400 ; AVX512-NEXT: vextracti128 $1, %ymm6, %xmm7
7401 ; AVX512-NEXT: vmovdqa {{.*#+}} xmm3 = [u,u,u,u,u,u,128,128,0,6,12,128,128,128,4,10]
7402 ; AVX512-NEXT: vpshufb %xmm3, %xmm7, %xmm10
7403 ; AVX512-NEXT: vmovdqa {{.*#+}} xmm8 = [u,u,u,u,u,u,4,10,128,128,128,2,8,14,128,128]
7404 ; AVX512-NEXT: vpshufb %xmm8, %xmm6, %xmm13
7405 ; AVX512-NEXT: vpor %xmm10, %xmm13, %xmm10
7406 ; AVX512-NEXT: vinserti128 $1, %xmm10, %ymm0, %ymm10
7407 ; AVX512-NEXT: vinserti32x4 $2, %xmm9, %zmm10, %zmm2
7408 ; AVX512-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
7409 ; AVX512-NEXT: vmovdqa %ymm12, %ymm9
7410 ; AVX512-NEXT: vpternlogq $202, %ymm31, %ymm30, %ymm9
7411 ; AVX512-NEXT: vpshufb %xmm1, %xmm9, %xmm1
7412 ; AVX512-NEXT: vextracti128 $1, %ymm9, %xmm13
7413 ; AVX512-NEXT: vpshufb %xmm5, %xmm13, %xmm5
7414 ; AVX512-NEXT: vporq %xmm1, %xmm5, %xmm17
7415 ; AVX512-NEXT: vmovdqa64 320(%rdi), %ymm29
7416 ; AVX512-NEXT: vmovdqa64 352(%rdi), %ymm22
7417 ; AVX512-NEXT: vmovdqa %ymm12, %ymm1
7418 ; AVX512-NEXT: vpternlogq $202, %ymm29, %ymm22, %ymm1
7419 ; AVX512-NEXT: vextracti128 $1, %ymm1, %xmm5
7420 ; AVX512-NEXT: vpshufb %xmm3, %xmm5, %xmm3
7421 ; AVX512-NEXT: vpshufb %xmm8, %xmm1, %xmm8
7422 ; AVX512-NEXT: vpor %xmm3, %xmm8, %xmm3
7423 ; AVX512-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
7424 ; AVX512-NEXT: vmovdqa {{.*#+}} xmm8 = [1,7,13,128,128,128,5,11,128,128,128,u,u,u,u,u]
7425 ; AVX512-NEXT: vpshufb %xmm8, %xmm0, %xmm0
7426 ; AVX512-NEXT: vmovdqa {{.*#+}} xmm10 = [128,128,128,3,9,15,128,128,1,7,13,u,u,u,u,u]
7427 ; AVX512-NEXT: vpshufb %xmm10, %xmm4, %xmm4
7428 ; AVX512-NEXT: vpor %xmm0, %xmm4, %xmm0
7429 ; AVX512-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7430 ; AVX512-NEXT: vmovdqa {{.*#+}} xmm0 = [u,u,u,u,u,u,128,128,1,7,13,128,128,128,5,11]
7431 ; AVX512-NEXT: vpshufb %xmm0, %xmm7, %xmm4
7432 ; AVX512-NEXT: vmovdqa {{.*#+}} xmm7 = [u,u,u,u,u,u,5,11,128,128,128,3,9,15,128,128]
7433 ; AVX512-NEXT: vpshufb %xmm7, %xmm6, %xmm6
7434 ; AVX512-NEXT: vporq %xmm4, %xmm6, %xmm28
7435 ; AVX512-NEXT: vpshufb %xmm8, %xmm9, %xmm4
7436 ; AVX512-NEXT: vpshufb %xmm10, %xmm13, %xmm6
7437 ; AVX512-NEXT: vporq %xmm4, %xmm6, %xmm21
7438 ; AVX512-NEXT: vpshufb %xmm0, %xmm5, %xmm0
7439 ; AVX512-NEXT: vpshufb %xmm7, %xmm1, %xmm1
7440 ; AVX512-NEXT: vporq %xmm0, %xmm1, %xmm27
7441 ; AVX512-NEXT: vmovdqa {{.*#+}} xmm0 = [128,128,128,4,10,128,128,128,2,8,14,u,u,u,u,u]
7442 ; AVX512-NEXT: vmovdqa {{.*#+}} ymm9 = [65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535]
7443 ; AVX512-NEXT: vmovdqa %ymm9, %ymm4
7444 ; AVX512-NEXT: vpternlogq $202, %ymm26, %ymm25, %ymm4
7445 ; AVX512-NEXT: vextracti128 $1, %ymm4, %xmm15
7446 ; AVX512-NEXT: vpshufb %xmm0, %xmm15, %xmm1
7447 ; AVX512-NEXT: vmovdqa {{.*#+}} xmm6 = [2,8,14,128,128,0,6,12,128,128,128,u,u,u,u,u]
7448 ; AVX512-NEXT: vpshufb %xmm6, %xmm4, %xmm5
7449 ; AVX512-NEXT: vpor %xmm1, %xmm5, %xmm1
7450 ; AVX512-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7451 ; AVX512-NEXT: vmovdqa %ymm12, %ymm5
7452 ; AVX512-NEXT: vpternlogq $202, %ymm18, %ymm24, %ymm5
7453 ; AVX512-NEXT: vmovdqa {{.*#+}} xmm8 = [u,u,u,u,u,0,6,12,128,128,128,4,10,128,128,128]
7454 ; AVX512-NEXT: vpshufb %xmm8, %xmm5, %xmm7
7455 ; AVX512-NEXT: vextracti128 $1, %ymm5, %xmm1
7456 ; AVX512-NEXT: vmovdqa {{.*#+}} xmm10 = [u,u,u,u,u,128,128,128,2,8,14,128,128,0,6,12]
7457 ; AVX512-NEXT: vpshufb %xmm10, %xmm1, %xmm13
7458 ; AVX512-NEXT: vpor %xmm7, %xmm13, %xmm2
7459 ; AVX512-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7460 ; AVX512-NEXT: vmovdqa %ymm9, %ymm13
7461 ; AVX512-NEXT: vpternlogq $202, %ymm30, %ymm31, %ymm13
7462 ; AVX512-NEXT: vextracti128 $1, %ymm13, %xmm14
7463 ; AVX512-NEXT: vpshufb %xmm0, %xmm14, %xmm0
7464 ; AVX512-NEXT: vpshufb %xmm6, %xmm13, %xmm6
7465 ; AVX512-NEXT: vporq %xmm0, %xmm6, %xmm16
7466 ; AVX512-NEXT: vmovdqa %ymm12, %ymm11
7467 ; AVX512-NEXT: vpternlogq $202, %ymm22, %ymm29, %ymm11
7468 ; AVX512-NEXT: vpshufb %xmm8, %xmm11, %xmm8
7469 ; AVX512-NEXT: vextracti128 $1, %ymm11, %xmm7
7470 ; AVX512-NEXT: vpshufb %xmm10, %xmm7, %xmm10
7471 ; AVX512-NEXT: vpor %xmm8, %xmm10, %xmm0
7472 ; AVX512-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7473 ; AVX512-NEXT: vmovdqa {{.*#+}} xmm10 = [128,128,128,5,11,128,128,128,3,9,15,u,u,u,u,u]
7474 ; AVX512-NEXT: vpshufb %xmm10, %xmm15, %xmm15
7475 ; AVX512-NEXT: vmovdqa {{.*#+}} xmm8 = [3,9,15,128,128,1,7,13,128,128,128,u,u,u,u,u]
7476 ; AVX512-NEXT: vpshufb %xmm8, %xmm4, %xmm4
7477 ; AVX512-NEXT: vpor %xmm4, %xmm15, %xmm0
7478 ; AVX512-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7479 ; AVX512-NEXT: vpshufb {{.*#+}} xmm15 = xmm5[u,u,u,u,u,1,7,13],zero,zero,zero,xmm5[5,11],zero,zero,zero
7480 ; AVX512-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[u,u,u,u,u],zero,zero,zero,xmm1[3,9,15],zero,zero,xmm1[1,7,13]
7481 ; AVX512-NEXT: vpor %xmm1, %xmm15, %xmm0
7482 ; AVX512-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7483 ; AVX512-NEXT: vmovdqa 256(%rdi), %ymm1
7484 ; AVX512-NEXT: vshufi64x2 {{.*#+}} ymm19 = ymm1[2,3],mem[2,3]
7485 ; AVX512-NEXT: vinserti32x4 $1, 288(%rdi), %ymm1, %ymm20
7486 ; AVX512-NEXT: vmovdqa {{.*#+}} ymm5 = [65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535,0]
7487 ; AVX512-NEXT: vmovdqa %ymm5, %ymm1
7488 ; AVX512-NEXT: vpternlogq $202, %ymm19, %ymm20, %ymm1
7489 ; AVX512-NEXT: vpshufb {{.*#+}} ymm15 = ymm1[u,u,u,u,u,u,u,u,u,u,u,2,8,14,4,10,16,22,28,18,24,30,u,u,u,u,u,u,u,u,u,u]
7490 ; AVX512-NEXT: vpblendw {{.*#+}} ymm3 = ymm15[0,1,2],ymm3[3,4,5,6,7],ymm15[8,9,10],ymm3[11,12,13,14,15]
7491 ; AVX512-NEXT: vpblendd {{.*#+}} ymm15 = ymm15[0,1,2,3],ymm3[4,5,6,7]
7492 ; AVX512-NEXT: vmovdqa64 64(%rdi), %ymm23
7493 ; AVX512-NEXT: vshufi64x2 {{.*#+}} ymm6 = ymm23[2,3],mem[2,3]
7494 ; AVX512-NEXT: vinserti32x4 $1, 96(%rdi), %ymm23, %ymm23
7495 ; AVX512-NEXT: vinserti64x4 $1, %ymm15, %zmm0, %zmm15
7496 ; AVX512-NEXT: vmovdqa %ymm5, %ymm2
7497 ; AVX512-NEXT: vpternlogq $202, %ymm6, %ymm23, %ymm2
7498 ; AVX512-NEXT: vmovdqa {{.*#+}} ymm4 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255]
7499 ; AVX512-NEXT: vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm2[2,8,14,4,10,16,22,28,18,24,30],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
7500 ; AVX512-NEXT: vpternlogq $248, %ymm4, %ymm17, %ymm0
7501 ; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm17 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
7502 ; AVX512-NEXT: vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm17, %zmm0 # 64-byte Folded Reload
7503 ; AVX512-NEXT: vpmovsxdq {{.*#+}} zmm3 = [18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,16777215,0,0]
7504 ; AVX512-NEXT: vpternlogq $184, %zmm0, %zmm3, %zmm15
7505 ; AVX512-NEXT: vpshufb {{.*#+}} ymm0 = ymm1[u,u,u,u,u,u,u,u,u,u,u,3,9,15,5,11,17,23,29,19,25,31,u,u,u,u,u,u,u,u,u,u]
7506 ; AVX512-NEXT: vinserti32x4 $1, %xmm27, %ymm0, %ymm1
7507 ; AVX512-NEXT: vpblendw {{.*#+}} ymm1 = ymm0[0,1,2],ymm1[3,4,5,6,7],ymm0[8,9,10],ymm1[11,12,13,14,15]
7508 ; AVX512-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
7509 ; AVX512-NEXT: vpshufb {{.*#+}} ymm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm2[3,9,15,5,11,17,23,29,19,25,31],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
7510 ; AVX512-NEXT: vpternlogq $248, %ymm4, %ymm21, %ymm1
7511 ; AVX512-NEXT: vinserti32x4 $1, %xmm28, %ymm0, %ymm2
7512 ; AVX512-NEXT: vinserti32x4 $2, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm2 # 16-byte Folded Reload
7513 ; AVX512-NEXT: vpternlogq $226, %zmm2, %zmm17, %zmm1
7514 ; AVX512-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm17
7515 ; AVX512-NEXT: vpternlogq $184, %zmm1, %zmm3, %zmm17
7516 ; AVX512-NEXT: vpshufb %xmm10, %xmm14, %xmm0
7517 ; AVX512-NEXT: vpshufb %xmm8, %xmm13, %xmm1
7518 ; AVX512-NEXT: vporq %xmm0, %xmm1, %xmm21
7519 ; AVX512-NEXT: vpshufb {{.*#+}} xmm0 = xmm11[u,u,u,u,u,1,7,13],zero,zero,zero,xmm11[5,11],zero,zero,zero
7520 ; AVX512-NEXT: vpshufb {{.*#+}} xmm1 = xmm7[u,u,u,u,u],zero,zero,zero,xmm7[3,9,15],zero,zero,xmm7[1,7,13]
7521 ; AVX512-NEXT: vporq %xmm0, %xmm1, %xmm28
7522 ; AVX512-NEXT: vmovdqa64 %ymm25, %ymm11
7523 ; AVX512-NEXT: vpternlogq $226, %ymm26, %ymm12, %ymm11
7524 ; AVX512-NEXT: vextracti128 $1, %ymm11, %xmm0
7525 ; AVX512-NEXT: vpshufb {{.*#+}} xmm1 = zero,zero,xmm0[0,6,12],zero,zero,zero,xmm0[4,10,u,u,u,u,u,u]
7526 ; AVX512-NEXT: vmovdqa {{.*#+}} xmm3 = [4,10,128,128,128,2,8,14,128,128,u,u,u,u,u,u]
7527 ; AVX512-NEXT: vpshufb %xmm3, %xmm11, %xmm2
7528 ; AVX512-NEXT: vmovdqa64 %xmm3, %xmm25
7529 ; AVX512-NEXT: vporq %xmm1, %xmm2, %xmm26
7530 ; AVX512-NEXT: vmovdqa64 %ymm18, %ymm14
7531 ; AVX512-NEXT: vpternlogq $226, %ymm24, %ymm9, %ymm14
7532 ; AVX512-NEXT: vextracti128 $1, %ymm14, %xmm10
7533 ; AVX512-NEXT: vmovdqa {{.*#+}} xmm1 = [u,u,u,u,u,128,128,128,4,10,128,128,128,2,8,14]
7534 ; AVX512-NEXT: vpshufb %xmm1, %xmm10, %xmm2
7535 ; AVX512-NEXT: vmovdqa {{.*#+}} xmm3 = [u,u,u,u,u,2,8,14,128,128,0,6,12,128,128,128]
7536 ; AVX512-NEXT: vpshufb %xmm3, %xmm14, %xmm4
7537 ; AVX512-NEXT: vporq %xmm2, %xmm4, %xmm27
7538 ; AVX512-NEXT: vpternlogq $202, %ymm30, %ymm31, %ymm12
7539 ; AVX512-NEXT: vmovdqa %ymm5, %ymm4
7540 ; AVX512-NEXT: vpternlogq $202, %ymm23, %ymm6, %ymm4
7541 ; AVX512-NEXT: vpternlogq $202, %ymm29, %ymm22, %ymm9
7542 ; AVX512-NEXT: vextracti128 $1, %ymm9, %xmm8
7543 ; AVX512-NEXT: vpshufb %xmm1, %xmm8, %xmm1
7544 ; AVX512-NEXT: vpshufb %xmm3, %xmm9, %xmm2
7545 ; AVX512-NEXT: vpor %xmm1, %xmm2, %xmm7
7546 ; AVX512-NEXT: vmovdqa {{.*#+}} xmm1 = [128,128,1,7,13,128,128,128,5,11,u,u,u,u,u,u]
7547 ; AVX512-NEXT: vpshufb %xmm1, %xmm0, %xmm0
7548 ; AVX512-NEXT: vmovdqa64 %xmm1, %xmm22
7549 ; AVX512-NEXT: vmovdqa {{.*#+}} xmm13 = [5,11,128,128,128,3,9,15,128,128,u,u,u,u,u,u]
7550 ; AVX512-NEXT: vpshufb %xmm13, %xmm11, %xmm1
7551 ; AVX512-NEXT: vpor %xmm0, %xmm1, %xmm3
7552 ; AVX512-NEXT: vmovdqa {{.*#+}} xmm2 = [u,u,u,u,u,128,128,128,5,11,128,128,128,3,9,15]
7553 ; AVX512-NEXT: vpshufb %xmm2, %xmm10, %xmm1
7554 ; AVX512-NEXT: vmovdqa {{.*#+}} xmm0 = [u,u,u,u,u,3,9,15,128,128,1,7,13,128,128,128]
7555 ; AVX512-NEXT: vpshufb %xmm0, %xmm14, %xmm10
7556 ; AVX512-NEXT: vpor %xmm1, %xmm10, %xmm10
7557 ; AVX512-NEXT: vmovdqa {{.*#+}} ymm1 = [128,128,128,128,128,128,128,128,128,128,128,4,10,0,6,12,18,24,30,20,26,128,128,128,128,128,128,128,128,128,128,128]
7558 ; AVX512-NEXT: vpshufb %ymm1, %ymm4, %ymm11
7559 ; AVX512-NEXT: vmovdqa64 {{.*#+}} ymm18 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255]
7560 ; AVX512-NEXT: vpternlogq $236, %ymm18, %ymm11, %ymm16
7561 ; AVX512-NEXT: vmovdqa {{.*#+}} ymm11 = [128,128,128,128,128,128,128,128,128,128,128,5,11,1,7,13,19,25,31,21,27,128,128,128,128,128,128,128,128,128,128,128]
7562 ; AVX512-NEXT: vpshufb %ymm11, %ymm4, %ymm4
7563 ; AVX512-NEXT: vpternlogq $236, %ymm18, %ymm4, %ymm21
7564 ; AVX512-NEXT: vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm4 # 16-byte Folded Reload
7565 ; AVX512-NEXT: vpternlogq $202, %ymm20, %ymm19, %ymm5
7566 ; AVX512-NEXT: vpshufb %ymm1, %ymm5, %ymm1
7567 ; AVX512-NEXT: vpternlogq $248, %ymm18, %ymm4, %ymm1
7568 ; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm4
7569 ; AVX512-NEXT: vpshufb %ymm11, %ymm5, %ymm5
7570 ; AVX512-NEXT: vextracti128 $1, %ymm12, %xmm1
7571 ; AVX512-NEXT: vpshufb {{.*#+}} xmm11 = zero,zero,xmm1[0,6,12],zero,zero,zero,xmm1[4,10,u,u,u,u,u,u]
7572 ; AVX512-NEXT: vmovdqa64 %xmm25, %xmm14
7573 ; AVX512-NEXT: vpshufb %xmm14, %xmm12, %xmm14
7574 ; AVX512-NEXT: vpor %xmm11, %xmm14, %xmm11
7575 ; AVX512-NEXT: vpshufb %xmm2, %xmm8, %xmm2
7576 ; AVX512-NEXT: vpshufb %xmm0, %xmm9, %xmm0
7577 ; AVX512-NEXT: vpor %xmm2, %xmm0, %xmm0
7578 ; AVX512-NEXT: vmovdqa {{.*#+}} ymm2 = [0,65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535]
7579 ; AVX512-NEXT: vpternlogq $226, %ymm23, %ymm2, %ymm6
7580 ; AVX512-NEXT: vpshufb {{.*#+}} ymm8 = ymm6[u,u,u,u,u,u,u,u,u,u,0,6,12,2,8,14,20,26,16,22,28,u,u,u,u,u,u,u,u,u,u,u]
7581 ; AVX512-NEXT: vpblendw {{.*#+}} xmm9 = xmm11[0,1,2,3,4],xmm8[5,6,7]
7582 ; AVX512-NEXT: vpblendd {{.*#+}} ymm8 = ymm9[0,1,2,3],ymm8[4,5,6,7]
7583 ; AVX512-NEXT: vinserti32x4 $1, %xmm28, %ymm0, %ymm9
7584 ; AVX512-NEXT: vpternlogq $248, %ymm18, %ymm9, %ymm5
7585 ; AVX512-NEXT: vinserti64x4 $1, %ymm5, %zmm0, %zmm5
7586 ; AVX512-NEXT: vpternlogq $202, %ymm20, %ymm19, %ymm2
7587 ; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm9 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
7588 ; AVX512-NEXT: vinserti128 $1, %xmm7, %ymm0, %ymm7
7589 ; AVX512-NEXT: vpshufb {{.*#+}} ymm11 = ymm2[u,u,u,u,u,u,u,u,u,u,0,6,12,2,8,14,20,26,16,22,28],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
7590 ; AVX512-NEXT: vpternlogq $242, %ymm7, %ymm9, %ymm11
7591 ; AVX512-NEXT: vinserti64x4 $1, %ymm11, %zmm0, %zmm7
7592 ; AVX512-NEXT: vinserti32x4 $1, %xmm27, %ymm0, %ymm11
7593 ; AVX512-NEXT: vinserti32x4 $2, %xmm26, %zmm11, %zmm11
7594 ; AVX512-NEXT: vpternlogq $226, %zmm11, %zmm9, %zmm8
7595 ; AVX512-NEXT: vpmovsxdq {{.*#+}} zmm11 = [18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,65535,0,0]
7596 ; AVX512-NEXT: vpternlogq $184, %zmm8, %zmm11, %zmm7
7597 ; AVX512-NEXT: vmovdqa64 %xmm22, %xmm8
7598 ; AVX512-NEXT: vpshufb %xmm8, %xmm1, %xmm1
7599 ; AVX512-NEXT: vpshufb %xmm13, %xmm12, %xmm8
7600 ; AVX512-NEXT: vpor %xmm1, %xmm8, %xmm1
7601 ; AVX512-NEXT: vpshufb {{.*#+}} ymm6 = ymm6[u,u,u,u,u,u,u,u,u,u,1,7,13,3,9,15,21,27,17,23,29,u,u,u,u,u,u,u,u,u,u,u]
7602 ; AVX512-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm6[5,6,7]
7603 ; AVX512-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm6[4,5,6,7]
7604 ; AVX512-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
7605 ; AVX512-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,1,7,13,3,9,15,21,27,17,23,29],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
7606 ; AVX512-NEXT: vpternlogq $242, %ymm0, %ymm9, %ymm2
7607 ; AVX512-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
7608 ; AVX512-NEXT: vinserti128 $1, %xmm10, %ymm0, %ymm2
7609 ; AVX512-NEXT: vinserti32x4 $2, %xmm3, %zmm2, %zmm2
7610 ; AVX512-NEXT: vpternlogq $226, %zmm2, %zmm9, %zmm1
7611 ; AVX512-NEXT: vpternlogq $184, %zmm1, %zmm11, %zmm0
7612 ; AVX512-NEXT: vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 16-byte Folded Reload
7613 ; AVX512-NEXT: vinserti32x4 $2, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm1 # 16-byte Folded Reload
7614 ; AVX512-NEXT: vpmovsxwd {{.*#+}} zmm2 = [0,0,0,0,0,4294967040,4294967295,4294967295,4294967295,4294967295,4294967295,4294967295,4294967295,4294967295,4294967295,4294967295]
7615 ; AVX512-NEXT: vpternlogq $184, %zmm1, %zmm2, %zmm16
7616 ; AVX512-NEXT: vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 16-byte Folded Reload
7617 ; AVX512-NEXT: vinserti32x4 $2, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm1 # 16-byte Folded Reload
7618 ; AVX512-NEXT: vpternlogq $184, %zmm1, %zmm2, %zmm21
7619 ; AVX512-NEXT: vpmovsxdq {{.*#+}} zmm1 = [18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,16777215,0,0]
7620 ; AVX512-NEXT: vpternlogq $184, %zmm16, %zmm1, %zmm4
7621 ; AVX512-NEXT: vpternlogq $184, %zmm21, %zmm1, %zmm5
7622 ; AVX512-NEXT: vmovdqa64 %zmm15, (%rsi)
7623 ; AVX512-NEXT: vmovdqa64 %zmm17, (%rdx)
7624 ; AVX512-NEXT: vmovdqa64 %zmm4, (%rcx)
7625 ; AVX512-NEXT: vmovdqa64 %zmm5, (%r8)
7626 ; AVX512-NEXT: vmovdqa64 %zmm7, (%r9)
7627 ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax
7628 ; AVX512-NEXT: vmovdqa64 %zmm0, (%rax)
7629 ; AVX512-NEXT: addq $40, %rsp
7630 ; AVX512-NEXT: vzeroupper
7633 ; AVX512-FCP-LABEL: load_i8_stride6_vf64:
7634 ; AVX512-FCP: # %bb.0:
7635 ; AVX512-FCP-NEXT: subq $40, %rsp
7636 ; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm1 = [0,6,12,128,128,128,4,10,128,128,128,u,u,u,u,u]
7637 ; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm12 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535]
7638 ; AVX512-FCP-NEXT: vmovdqa64 224(%rdi), %ymm25
7639 ; AVX512-FCP-NEXT: vmovdqa64 192(%rdi), %ymm26
7640 ; AVX512-FCP-NEXT: vmovdqa %ymm12, %ymm0
7641 ; AVX512-FCP-NEXT: vpternlogq $202, %ymm25, %ymm26, %ymm0
7642 ; AVX512-FCP-NEXT: vpshufb %xmm1, %xmm0, %xmm3
7643 ; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm5 = [128,128,128,2,8,14,128,128,0,6,12,u,u,u,u,u]
7644 ; AVX512-FCP-NEXT: vextracti128 $1, %ymm0, %xmm4
7645 ; AVX512-FCP-NEXT: vpshufb %xmm5, %xmm4, %xmm6
7646 ; AVX512-FCP-NEXT: vpor %xmm3, %xmm6, %xmm9
7647 ; AVX512-FCP-NEXT: vmovdqa64 (%rdi), %ymm30
7648 ; AVX512-FCP-NEXT: vmovdqa64 32(%rdi), %ymm31
7649 ; AVX512-FCP-NEXT: vmovdqa64 128(%rdi), %ymm24
7650 ; AVX512-FCP-NEXT: vmovdqa64 160(%rdi), %ymm18
7651 ; AVX512-FCP-NEXT: vmovdqa %ymm12, %ymm6
7652 ; AVX512-FCP-NEXT: vpternlogq $202, %ymm24, %ymm18, %ymm6
7653 ; AVX512-FCP-NEXT: vextracti128 $1, %ymm6, %xmm7
7654 ; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm3 = [u,u,u,u,u,u,128,128,0,6,12,128,128,128,4,10]
7655 ; AVX512-FCP-NEXT: vpshufb %xmm3, %xmm7, %xmm10
7656 ; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm8 = [u,u,u,u,u,u,4,10,128,128,128,2,8,14,128,128]
7657 ; AVX512-FCP-NEXT: vpshufb %xmm8, %xmm6, %xmm13
7658 ; AVX512-FCP-NEXT: vpor %xmm10, %xmm13, %xmm10
7659 ; AVX512-FCP-NEXT: vinserti128 $1, %xmm10, %ymm0, %ymm10
7660 ; AVX512-FCP-NEXT: vinserti32x4 $2, %xmm9, %zmm10, %zmm2
7661 ; AVX512-FCP-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
7662 ; AVX512-FCP-NEXT: vmovdqa %ymm12, %ymm9
7663 ; AVX512-FCP-NEXT: vpternlogq $202, %ymm31, %ymm30, %ymm9
7664 ; AVX512-FCP-NEXT: vpshufb %xmm1, %xmm9, %xmm1
7665 ; AVX512-FCP-NEXT: vextracti128 $1, %ymm9, %xmm13
7666 ; AVX512-FCP-NEXT: vpshufb %xmm5, %xmm13, %xmm5
7667 ; AVX512-FCP-NEXT: vporq %xmm1, %xmm5, %xmm17
7668 ; AVX512-FCP-NEXT: vmovdqa64 320(%rdi), %ymm29
7669 ; AVX512-FCP-NEXT: vmovdqa64 352(%rdi), %ymm22
7670 ; AVX512-FCP-NEXT: vmovdqa %ymm12, %ymm1
7671 ; AVX512-FCP-NEXT: vpternlogq $202, %ymm29, %ymm22, %ymm1
7672 ; AVX512-FCP-NEXT: vextracti128 $1, %ymm1, %xmm5
7673 ; AVX512-FCP-NEXT: vpshufb %xmm3, %xmm5, %xmm3
7674 ; AVX512-FCP-NEXT: vpshufb %xmm8, %xmm1, %xmm8
7675 ; AVX512-FCP-NEXT: vpor %xmm3, %xmm8, %xmm3
7676 ; AVX512-FCP-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
7677 ; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm8 = [1,7,13,128,128,128,5,11,128,128,128,u,u,u,u,u]
7678 ; AVX512-FCP-NEXT: vpshufb %xmm8, %xmm0, %xmm0
7679 ; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm10 = [128,128,128,3,9,15,128,128,1,7,13,u,u,u,u,u]
7680 ; AVX512-FCP-NEXT: vpshufb %xmm10, %xmm4, %xmm4
7681 ; AVX512-FCP-NEXT: vpor %xmm0, %xmm4, %xmm0
7682 ; AVX512-FCP-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7683 ; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm0 = [u,u,u,u,u,u,128,128,1,7,13,128,128,128,5,11]
7684 ; AVX512-FCP-NEXT: vpshufb %xmm0, %xmm7, %xmm4
7685 ; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm7 = [u,u,u,u,u,u,5,11,128,128,128,3,9,15,128,128]
7686 ; AVX512-FCP-NEXT: vpshufb %xmm7, %xmm6, %xmm6
7687 ; AVX512-FCP-NEXT: vporq %xmm4, %xmm6, %xmm28
7688 ; AVX512-FCP-NEXT: vpshufb %xmm8, %xmm9, %xmm4
7689 ; AVX512-FCP-NEXT: vpshufb %xmm10, %xmm13, %xmm6
7690 ; AVX512-FCP-NEXT: vporq %xmm4, %xmm6, %xmm21
7691 ; AVX512-FCP-NEXT: vpshufb %xmm0, %xmm5, %xmm0
7692 ; AVX512-FCP-NEXT: vpshufb %xmm7, %xmm1, %xmm1
7693 ; AVX512-FCP-NEXT: vporq %xmm0, %xmm1, %xmm27
7694 ; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm0 = [128,128,128,4,10,128,128,128,2,8,14,u,u,u,u,u]
7695 ; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm9 = [65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535]
7696 ; AVX512-FCP-NEXT: vmovdqa %ymm9, %ymm4
7697 ; AVX512-FCP-NEXT: vpternlogq $202, %ymm26, %ymm25, %ymm4
7698 ; AVX512-FCP-NEXT: vextracti128 $1, %ymm4, %xmm15
7699 ; AVX512-FCP-NEXT: vpshufb %xmm0, %xmm15, %xmm1
7700 ; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm6 = [2,8,14,128,128,0,6,12,128,128,128,u,u,u,u,u]
7701 ; AVX512-FCP-NEXT: vpshufb %xmm6, %xmm4, %xmm5
7702 ; AVX512-FCP-NEXT: vpor %xmm1, %xmm5, %xmm1
7703 ; AVX512-FCP-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7704 ; AVX512-FCP-NEXT: vmovdqa %ymm12, %ymm5
7705 ; AVX512-FCP-NEXT: vpternlogq $202, %ymm18, %ymm24, %ymm5
7706 ; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm8 = [u,u,u,u,u,0,6,12,128,128,128,4,10,128,128,128]
7707 ; AVX512-FCP-NEXT: vpshufb %xmm8, %xmm5, %xmm7
7708 ; AVX512-FCP-NEXT: vextracti128 $1, %ymm5, %xmm1
7709 ; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm10 = [u,u,u,u,u,128,128,128,2,8,14,128,128,0,6,12]
7710 ; AVX512-FCP-NEXT: vpshufb %xmm10, %xmm1, %xmm13
7711 ; AVX512-FCP-NEXT: vpor %xmm7, %xmm13, %xmm2
7712 ; AVX512-FCP-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7713 ; AVX512-FCP-NEXT: vmovdqa %ymm9, %ymm13
7714 ; AVX512-FCP-NEXT: vpternlogq $202, %ymm30, %ymm31, %ymm13
7715 ; AVX512-FCP-NEXT: vextracti128 $1, %ymm13, %xmm14
7716 ; AVX512-FCP-NEXT: vpshufb %xmm0, %xmm14, %xmm0
7717 ; AVX512-FCP-NEXT: vpshufb %xmm6, %xmm13, %xmm6
7718 ; AVX512-FCP-NEXT: vporq %xmm0, %xmm6, %xmm16
7719 ; AVX512-FCP-NEXT: vmovdqa %ymm12, %ymm11
7720 ; AVX512-FCP-NEXT: vpternlogq $202, %ymm22, %ymm29, %ymm11
7721 ; AVX512-FCP-NEXT: vpshufb %xmm8, %xmm11, %xmm8
7722 ; AVX512-FCP-NEXT: vextracti128 $1, %ymm11, %xmm7
7723 ; AVX512-FCP-NEXT: vpshufb %xmm10, %xmm7, %xmm10
7724 ; AVX512-FCP-NEXT: vpor %xmm8, %xmm10, %xmm0
7725 ; AVX512-FCP-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7726 ; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm10 = [128,128,128,5,11,128,128,128,3,9,15,u,u,u,u,u]
7727 ; AVX512-FCP-NEXT: vpshufb %xmm10, %xmm15, %xmm15
7728 ; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm8 = [3,9,15,128,128,1,7,13,128,128,128,u,u,u,u,u]
7729 ; AVX512-FCP-NEXT: vpshufb %xmm8, %xmm4, %xmm4
7730 ; AVX512-FCP-NEXT: vpor %xmm4, %xmm15, %xmm0
7731 ; AVX512-FCP-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7732 ; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm15 = xmm5[u,u,u,u,u,1,7,13],zero,zero,zero,xmm5[5,11],zero,zero,zero
7733 ; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[u,u,u,u,u],zero,zero,zero,xmm1[3,9,15],zero,zero,xmm1[1,7,13]
7734 ; AVX512-FCP-NEXT: vpor %xmm1, %xmm15, %xmm0
7735 ; AVX512-FCP-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7736 ; AVX512-FCP-NEXT: vmovdqa 256(%rdi), %ymm1
7737 ; AVX512-FCP-NEXT: vshufi64x2 {{.*#+}} ymm19 = ymm1[2,3],mem[2,3]
7738 ; AVX512-FCP-NEXT: vinserti32x4 $1, 288(%rdi), %ymm1, %ymm20
7739 ; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm5 = [65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535,0]
7740 ; AVX512-FCP-NEXT: vmovdqa %ymm5, %ymm1
7741 ; AVX512-FCP-NEXT: vpternlogq $202, %ymm19, %ymm20, %ymm1
7742 ; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm15 = ymm1[u,u,u,u,u,u,u,u,u,u,u,2,8,14,4,10,16,22,28,18,24,30,u,u,u,u,u,u,u,u,u,u]
7743 ; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm3 = ymm15[0,1,2],ymm3[3,4,5,6,7],ymm15[8,9,10],ymm3[11,12,13,14,15]
7744 ; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm15 = ymm15[0,1,2,3],ymm3[4,5,6,7]
7745 ; AVX512-FCP-NEXT: vmovdqa64 64(%rdi), %ymm23
7746 ; AVX512-FCP-NEXT: vshufi64x2 {{.*#+}} ymm6 = ymm23[2,3],mem[2,3]
7747 ; AVX512-FCP-NEXT: vinserti32x4 $1, 96(%rdi), %ymm23, %ymm23
7748 ; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm15, %zmm0, %zmm15
7749 ; AVX512-FCP-NEXT: vmovdqa %ymm5, %ymm2
7750 ; AVX512-FCP-NEXT: vpternlogq $202, %ymm6, %ymm23, %ymm2
7751 ; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm4 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255]
7752 ; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm2[2,8,14,4,10,16,22,28,18,24,30],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
7753 ; AVX512-FCP-NEXT: vpternlogq $248, %ymm4, %ymm17, %ymm0
7754 ; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} zmm17 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
7755 ; AVX512-FCP-NEXT: vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm17, %zmm0 # 64-byte Folded Reload
7756 ; AVX512-FCP-NEXT: vpmovsxdq {{.*#+}} zmm3 = [18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,16777215,0,0]
7757 ; AVX512-FCP-NEXT: vpternlogq $184, %zmm0, %zmm3, %zmm15
7758 ; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm1[u,u,u,u,u,u,u,u,u,u,u,3,9,15,5,11,17,23,29,19,25,31,u,u,u,u,u,u,u,u,u,u]
7759 ; AVX512-FCP-NEXT: vinserti32x4 $1, %xmm27, %ymm0, %ymm1
7760 ; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm0[0,1,2],ymm1[3,4,5,6,7],ymm0[8,9,10],ymm1[11,12,13,14,15]
7761 ; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
7762 ; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm2[3,9,15,5,11,17,23,29,19,25,31],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
7763 ; AVX512-FCP-NEXT: vpternlogq $248, %ymm4, %ymm21, %ymm1
7764 ; AVX512-FCP-NEXT: vinserti32x4 $1, %xmm28, %ymm0, %ymm2
7765 ; AVX512-FCP-NEXT: vinserti32x4 $2, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm2 # 16-byte Folded Reload
7766 ; AVX512-FCP-NEXT: vpternlogq $226, %zmm2, %zmm17, %zmm1
7767 ; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm17
7768 ; AVX512-FCP-NEXT: vpternlogq $184, %zmm1, %zmm3, %zmm17
7769 ; AVX512-FCP-NEXT: vpshufb %xmm10, %xmm14, %xmm0
7770 ; AVX512-FCP-NEXT: vpshufb %xmm8, %xmm13, %xmm1
7771 ; AVX512-FCP-NEXT: vporq %xmm0, %xmm1, %xmm21
7772 ; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm0 = xmm11[u,u,u,u,u,1,7,13],zero,zero,zero,xmm11[5,11],zero,zero,zero
7773 ; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm7[u,u,u,u,u],zero,zero,zero,xmm7[3,9,15],zero,zero,xmm7[1,7,13]
7774 ; AVX512-FCP-NEXT: vporq %xmm0, %xmm1, %xmm28
7775 ; AVX512-FCP-NEXT: vmovdqa64 %ymm25, %ymm11
7776 ; AVX512-FCP-NEXT: vpternlogq $226, %ymm26, %ymm12, %ymm11
7777 ; AVX512-FCP-NEXT: vextracti128 $1, %ymm11, %xmm0
7778 ; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm1 = zero,zero,xmm0[0,6,12],zero,zero,zero,xmm0[4,10,u,u,u,u,u,u]
7779 ; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm3 = [4,10,128,128,128,2,8,14,128,128,u,u,u,u,u,u]
7780 ; AVX512-FCP-NEXT: vpshufb %xmm3, %xmm11, %xmm2
7781 ; AVX512-FCP-NEXT: vmovdqa64 %xmm3, %xmm25
7782 ; AVX512-FCP-NEXT: vporq %xmm1, %xmm2, %xmm26
7783 ; AVX512-FCP-NEXT: vmovdqa64 %ymm18, %ymm14
7784 ; AVX512-FCP-NEXT: vpternlogq $226, %ymm24, %ymm9, %ymm14
7785 ; AVX512-FCP-NEXT: vextracti128 $1, %ymm14, %xmm10
7786 ; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm1 = [u,u,u,u,u,128,128,128,4,10,128,128,128,2,8,14]
7787 ; AVX512-FCP-NEXT: vpshufb %xmm1, %xmm10, %xmm2
7788 ; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm3 = [u,u,u,u,u,2,8,14,128,128,0,6,12,128,128,128]
7789 ; AVX512-FCP-NEXT: vpshufb %xmm3, %xmm14, %xmm4
7790 ; AVX512-FCP-NEXT: vporq %xmm2, %xmm4, %xmm27
7791 ; AVX512-FCP-NEXT: vpternlogq $202, %ymm30, %ymm31, %ymm12
7792 ; AVX512-FCP-NEXT: vmovdqa %ymm5, %ymm4
7793 ; AVX512-FCP-NEXT: vpternlogq $202, %ymm23, %ymm6, %ymm4
7794 ; AVX512-FCP-NEXT: vpternlogq $202, %ymm29, %ymm22, %ymm9
7795 ; AVX512-FCP-NEXT: vextracti128 $1, %ymm9, %xmm8
7796 ; AVX512-FCP-NEXT: vpshufb %xmm1, %xmm8, %xmm1
7797 ; AVX512-FCP-NEXT: vpshufb %xmm3, %xmm9, %xmm2
7798 ; AVX512-FCP-NEXT: vpor %xmm1, %xmm2, %xmm7
7799 ; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm1 = [128,128,1,7,13,128,128,128,5,11,u,u,u,u,u,u]
7800 ; AVX512-FCP-NEXT: vpshufb %xmm1, %xmm0, %xmm0
7801 ; AVX512-FCP-NEXT: vmovdqa64 %xmm1, %xmm22
7802 ; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm13 = [5,11,128,128,128,3,9,15,128,128,u,u,u,u,u,u]
7803 ; AVX512-FCP-NEXT: vpshufb %xmm13, %xmm11, %xmm1
7804 ; AVX512-FCP-NEXT: vpor %xmm0, %xmm1, %xmm3
7805 ; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm2 = [u,u,u,u,u,128,128,128,5,11,128,128,128,3,9,15]
7806 ; AVX512-FCP-NEXT: vpshufb %xmm2, %xmm10, %xmm1
7807 ; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm0 = [u,u,u,u,u,3,9,15,128,128,1,7,13,128,128,128]
7808 ; AVX512-FCP-NEXT: vpshufb %xmm0, %xmm14, %xmm10
7809 ; AVX512-FCP-NEXT: vpor %xmm1, %xmm10, %xmm10
7810 ; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm1 = [128,128,128,128,128,128,128,128,128,128,128,4,10,0,6,12,18,24,30,20,26,128,128,128,128,128,128,128,128,128,128,128]
7811 ; AVX512-FCP-NEXT: vpshufb %ymm1, %ymm4, %ymm11
7812 ; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} ymm18 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255]
7813 ; AVX512-FCP-NEXT: vpternlogq $236, %ymm18, %ymm11, %ymm16
7814 ; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm11 = [128,128,128,128,128,128,128,128,128,128,128,5,11,1,7,13,19,25,31,21,27,128,128,128,128,128,128,128,128,128,128,128]
7815 ; AVX512-FCP-NEXT: vpshufb %ymm11, %ymm4, %ymm4
7816 ; AVX512-FCP-NEXT: vpternlogq $236, %ymm18, %ymm4, %ymm21
7817 ; AVX512-FCP-NEXT: vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm4 # 16-byte Folded Reload
7818 ; AVX512-FCP-NEXT: vpternlogq $202, %ymm20, %ymm19, %ymm5
7819 ; AVX512-FCP-NEXT: vpshufb %ymm1, %ymm5, %ymm1
7820 ; AVX512-FCP-NEXT: vpternlogq $248, %ymm18, %ymm4, %ymm1
7821 ; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm4
7822 ; AVX512-FCP-NEXT: vpshufb %ymm11, %ymm5, %ymm5
7823 ; AVX512-FCP-NEXT: vextracti128 $1, %ymm12, %xmm1
7824 ; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm11 = zero,zero,xmm1[0,6,12],zero,zero,zero,xmm1[4,10,u,u,u,u,u,u]
7825 ; AVX512-FCP-NEXT: vmovdqa64 %xmm25, %xmm14
7826 ; AVX512-FCP-NEXT: vpshufb %xmm14, %xmm12, %xmm14
7827 ; AVX512-FCP-NEXT: vpor %xmm11, %xmm14, %xmm11
7828 ; AVX512-FCP-NEXT: vpshufb %xmm2, %xmm8, %xmm2
7829 ; AVX512-FCP-NEXT: vpshufb %xmm0, %xmm9, %xmm0
7830 ; AVX512-FCP-NEXT: vpor %xmm2, %xmm0, %xmm0
7831 ; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm2 = [0,65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535]
7832 ; AVX512-FCP-NEXT: vpternlogq $226, %ymm23, %ymm2, %ymm6
7833 ; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm8 = ymm6[u,u,u,u,u,u,u,u,u,u,0,6,12,2,8,14,20,26,16,22,28,u,u,u,u,u,u,u,u,u,u,u]
7834 ; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm9 = xmm11[0,1,2,3,4],xmm8[5,6,7]
7835 ; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm8 = ymm9[0,1,2,3],ymm8[4,5,6,7]
7836 ; AVX512-FCP-NEXT: vinserti32x4 $1, %xmm28, %ymm0, %ymm9
7837 ; AVX512-FCP-NEXT: vpternlogq $248, %ymm18, %ymm9, %ymm5
7838 ; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm5, %zmm0, %zmm5
7839 ; AVX512-FCP-NEXT: vpternlogq $202, %ymm20, %ymm19, %ymm2
7840 ; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} zmm9 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
7841 ; AVX512-FCP-NEXT: vinserti128 $1, %xmm7, %ymm0, %ymm7
7842 ; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm11 = ymm2[u,u,u,u,u,u,u,u,u,u,0,6,12,2,8,14,20,26,16,22,28],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
7843 ; AVX512-FCP-NEXT: vpternlogq $242, %ymm7, %ymm9, %ymm11
7844 ; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm11, %zmm0, %zmm7
7845 ; AVX512-FCP-NEXT: vinserti32x4 $1, %xmm27, %ymm0, %ymm11
7846 ; AVX512-FCP-NEXT: vinserti32x4 $2, %xmm26, %zmm11, %zmm11
7847 ; AVX512-FCP-NEXT: vpternlogq $226, %zmm11, %zmm9, %zmm8
7848 ; AVX512-FCP-NEXT: vpmovsxdq {{.*#+}} zmm11 = [18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,65535,0,0]
7849 ; AVX512-FCP-NEXT: vpternlogq $184, %zmm8, %zmm11, %zmm7
7850 ; AVX512-FCP-NEXT: vmovdqa64 %xmm22, %xmm8
7851 ; AVX512-FCP-NEXT: vpshufb %xmm8, %xmm1, %xmm1
7852 ; AVX512-FCP-NEXT: vpshufb %xmm13, %xmm12, %xmm8
7853 ; AVX512-FCP-NEXT: vpor %xmm1, %xmm8, %xmm1
7854 ; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm6 = ymm6[u,u,u,u,u,u,u,u,u,u,1,7,13,3,9,15,21,27,17,23,29,u,u,u,u,u,u,u,u,u,u,u]
7855 ; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm6[5,6,7]
7856 ; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm6[4,5,6,7]
7857 ; AVX512-FCP-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
7858 ; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,1,7,13,3,9,15,21,27,17,23,29],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
7859 ; AVX512-FCP-NEXT: vpternlogq $242, %ymm0, %ymm9, %ymm2
7860 ; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
7861 ; AVX512-FCP-NEXT: vinserti128 $1, %xmm10, %ymm0, %ymm2
7862 ; AVX512-FCP-NEXT: vinserti32x4 $2, %xmm3, %zmm2, %zmm2
7863 ; AVX512-FCP-NEXT: vpternlogq $226, %zmm2, %zmm9, %zmm1
7864 ; AVX512-FCP-NEXT: vpternlogq $184, %zmm1, %zmm11, %zmm0
7865 ; AVX512-FCP-NEXT: vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 16-byte Folded Reload
7866 ; AVX512-FCP-NEXT: vinserti32x4 $2, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm1 # 16-byte Folded Reload
7867 ; AVX512-FCP-NEXT: vpmovsxwd {{.*#+}} zmm2 = [0,0,0,0,0,4294967040,4294967295,4294967295,4294967295,4294967295,4294967295,4294967295,4294967295,4294967295,4294967295,4294967295]
7868 ; AVX512-FCP-NEXT: vpternlogq $184, %zmm1, %zmm2, %zmm16
7869 ; AVX512-FCP-NEXT: vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 16-byte Folded Reload
7870 ; AVX512-FCP-NEXT: vinserti32x4 $2, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm1 # 16-byte Folded Reload
7871 ; AVX512-FCP-NEXT: vpternlogq $184, %zmm1, %zmm2, %zmm21
7872 ; AVX512-FCP-NEXT: vpmovsxdq {{.*#+}} zmm1 = [18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,16777215,0,0]
7873 ; AVX512-FCP-NEXT: vpternlogq $184, %zmm16, %zmm1, %zmm4
7874 ; AVX512-FCP-NEXT: vpternlogq $184, %zmm21, %zmm1, %zmm5
7875 ; AVX512-FCP-NEXT: vmovdqa64 %zmm15, (%rsi)
7876 ; AVX512-FCP-NEXT: vmovdqa64 %zmm17, (%rdx)
7877 ; AVX512-FCP-NEXT: vmovdqa64 %zmm4, (%rcx)
7878 ; AVX512-FCP-NEXT: vmovdqa64 %zmm5, (%r8)
7879 ; AVX512-FCP-NEXT: vmovdqa64 %zmm7, (%r9)
7880 ; AVX512-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
7881 ; AVX512-FCP-NEXT: vmovdqa64 %zmm0, (%rax)
7882 ; AVX512-FCP-NEXT: addq $40, %rsp
7883 ; AVX512-FCP-NEXT: vzeroupper
7884 ; AVX512-FCP-NEXT: retq
7886 ; AVX512DQ-LABEL: load_i8_stride6_vf64:
7887 ; AVX512DQ: # %bb.0:
7888 ; AVX512DQ-NEXT: subq $40, %rsp
7889 ; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm1 = [0,6,12,128,128,128,4,10,128,128,128,u,u,u,u,u]
7890 ; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm12 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535]
7891 ; AVX512DQ-NEXT: vmovdqa64 224(%rdi), %ymm25
7892 ; AVX512DQ-NEXT: vmovdqa64 192(%rdi), %ymm26
7893 ; AVX512DQ-NEXT: vmovdqa %ymm12, %ymm0
7894 ; AVX512DQ-NEXT: vpternlogq $202, %ymm25, %ymm26, %ymm0
7895 ; AVX512DQ-NEXT: vpshufb %xmm1, %xmm0, %xmm3
7896 ; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm5 = [128,128,128,2,8,14,128,128,0,6,12,u,u,u,u,u]
7897 ; AVX512DQ-NEXT: vextracti128 $1, %ymm0, %xmm4
7898 ; AVX512DQ-NEXT: vpshufb %xmm5, %xmm4, %xmm6
7899 ; AVX512DQ-NEXT: vpor %xmm3, %xmm6, %xmm9
7900 ; AVX512DQ-NEXT: vmovdqa64 (%rdi), %ymm30
7901 ; AVX512DQ-NEXT: vmovdqa64 32(%rdi), %ymm31
7902 ; AVX512DQ-NEXT: vmovdqa64 128(%rdi), %ymm24
7903 ; AVX512DQ-NEXT: vmovdqa64 160(%rdi), %ymm18
7904 ; AVX512DQ-NEXT: vmovdqa %ymm12, %ymm6
7905 ; AVX512DQ-NEXT: vpternlogq $202, %ymm24, %ymm18, %ymm6
7906 ; AVX512DQ-NEXT: vextracti128 $1, %ymm6, %xmm7
7907 ; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm3 = [u,u,u,u,u,u,128,128,0,6,12,128,128,128,4,10]
7908 ; AVX512DQ-NEXT: vpshufb %xmm3, %xmm7, %xmm10
7909 ; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm8 = [u,u,u,u,u,u,4,10,128,128,128,2,8,14,128,128]
7910 ; AVX512DQ-NEXT: vpshufb %xmm8, %xmm6, %xmm13
7911 ; AVX512DQ-NEXT: vpor %xmm10, %xmm13, %xmm10
7912 ; AVX512DQ-NEXT: vinserti128 $1, %xmm10, %ymm0, %ymm10
7913 ; AVX512DQ-NEXT: vinserti32x4 $2, %xmm9, %zmm10, %zmm2
7914 ; AVX512DQ-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
7915 ; AVX512DQ-NEXT: vmovdqa %ymm12, %ymm9
7916 ; AVX512DQ-NEXT: vpternlogq $202, %ymm31, %ymm30, %ymm9
7917 ; AVX512DQ-NEXT: vpshufb %xmm1, %xmm9, %xmm1
7918 ; AVX512DQ-NEXT: vextracti128 $1, %ymm9, %xmm13
7919 ; AVX512DQ-NEXT: vpshufb %xmm5, %xmm13, %xmm5
7920 ; AVX512DQ-NEXT: vporq %xmm1, %xmm5, %xmm17
7921 ; AVX512DQ-NEXT: vmovdqa64 320(%rdi), %ymm29
7922 ; AVX512DQ-NEXT: vmovdqa64 352(%rdi), %ymm22
7923 ; AVX512DQ-NEXT: vmovdqa %ymm12, %ymm1
7924 ; AVX512DQ-NEXT: vpternlogq $202, %ymm29, %ymm22, %ymm1
7925 ; AVX512DQ-NEXT: vextracti128 $1, %ymm1, %xmm5
7926 ; AVX512DQ-NEXT: vpshufb %xmm3, %xmm5, %xmm3
7927 ; AVX512DQ-NEXT: vpshufb %xmm8, %xmm1, %xmm8
7928 ; AVX512DQ-NEXT: vpor %xmm3, %xmm8, %xmm3
7929 ; AVX512DQ-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
7930 ; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm8 = [1,7,13,128,128,128,5,11,128,128,128,u,u,u,u,u]
7931 ; AVX512DQ-NEXT: vpshufb %xmm8, %xmm0, %xmm0
7932 ; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm10 = [128,128,128,3,9,15,128,128,1,7,13,u,u,u,u,u]
7933 ; AVX512DQ-NEXT: vpshufb %xmm10, %xmm4, %xmm4
7934 ; AVX512DQ-NEXT: vpor %xmm0, %xmm4, %xmm0
7935 ; AVX512DQ-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7936 ; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm0 = [u,u,u,u,u,u,128,128,1,7,13,128,128,128,5,11]
7937 ; AVX512DQ-NEXT: vpshufb %xmm0, %xmm7, %xmm4
7938 ; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm7 = [u,u,u,u,u,u,5,11,128,128,128,3,9,15,128,128]
7939 ; AVX512DQ-NEXT: vpshufb %xmm7, %xmm6, %xmm6
7940 ; AVX512DQ-NEXT: vporq %xmm4, %xmm6, %xmm28
7941 ; AVX512DQ-NEXT: vpshufb %xmm8, %xmm9, %xmm4
7942 ; AVX512DQ-NEXT: vpshufb %xmm10, %xmm13, %xmm6
7943 ; AVX512DQ-NEXT: vporq %xmm4, %xmm6, %xmm21
7944 ; AVX512DQ-NEXT: vpshufb %xmm0, %xmm5, %xmm0
7945 ; AVX512DQ-NEXT: vpshufb %xmm7, %xmm1, %xmm1
7946 ; AVX512DQ-NEXT: vporq %xmm0, %xmm1, %xmm27
7947 ; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm0 = [128,128,128,4,10,128,128,128,2,8,14,u,u,u,u,u]
7948 ; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm9 = [65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535]
7949 ; AVX512DQ-NEXT: vmovdqa %ymm9, %ymm4
7950 ; AVX512DQ-NEXT: vpternlogq $202, %ymm26, %ymm25, %ymm4
7951 ; AVX512DQ-NEXT: vextracti128 $1, %ymm4, %xmm15
7952 ; AVX512DQ-NEXT: vpshufb %xmm0, %xmm15, %xmm1
7953 ; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm6 = [2,8,14,128,128,0,6,12,128,128,128,u,u,u,u,u]
7954 ; AVX512DQ-NEXT: vpshufb %xmm6, %xmm4, %xmm5
7955 ; AVX512DQ-NEXT: vpor %xmm1, %xmm5, %xmm1
7956 ; AVX512DQ-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7957 ; AVX512DQ-NEXT: vmovdqa %ymm12, %ymm5
7958 ; AVX512DQ-NEXT: vpternlogq $202, %ymm18, %ymm24, %ymm5
7959 ; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm8 = [u,u,u,u,u,0,6,12,128,128,128,4,10,128,128,128]
7960 ; AVX512DQ-NEXT: vpshufb %xmm8, %xmm5, %xmm7
7961 ; AVX512DQ-NEXT: vextracti128 $1, %ymm5, %xmm1
7962 ; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm10 = [u,u,u,u,u,128,128,128,2,8,14,128,128,0,6,12]
7963 ; AVX512DQ-NEXT: vpshufb %xmm10, %xmm1, %xmm13
7964 ; AVX512DQ-NEXT: vpor %xmm7, %xmm13, %xmm2
7965 ; AVX512DQ-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7966 ; AVX512DQ-NEXT: vmovdqa %ymm9, %ymm13
7967 ; AVX512DQ-NEXT: vpternlogq $202, %ymm30, %ymm31, %ymm13
7968 ; AVX512DQ-NEXT: vextracti128 $1, %ymm13, %xmm14
7969 ; AVX512DQ-NEXT: vpshufb %xmm0, %xmm14, %xmm0
7970 ; AVX512DQ-NEXT: vpshufb %xmm6, %xmm13, %xmm6
7971 ; AVX512DQ-NEXT: vporq %xmm0, %xmm6, %xmm16
7972 ; AVX512DQ-NEXT: vmovdqa %ymm12, %ymm11
7973 ; AVX512DQ-NEXT: vpternlogq $202, %ymm22, %ymm29, %ymm11
7974 ; AVX512DQ-NEXT: vpshufb %xmm8, %xmm11, %xmm8
7975 ; AVX512DQ-NEXT: vextracti128 $1, %ymm11, %xmm7
7976 ; AVX512DQ-NEXT: vpshufb %xmm10, %xmm7, %xmm10
7977 ; AVX512DQ-NEXT: vpor %xmm8, %xmm10, %xmm0
7978 ; AVX512DQ-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7979 ; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm10 = [128,128,128,5,11,128,128,128,3,9,15,u,u,u,u,u]
7980 ; AVX512DQ-NEXT: vpshufb %xmm10, %xmm15, %xmm15
7981 ; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm8 = [3,9,15,128,128,1,7,13,128,128,128,u,u,u,u,u]
7982 ; AVX512DQ-NEXT: vpshufb %xmm8, %xmm4, %xmm4
7983 ; AVX512DQ-NEXT: vpor %xmm4, %xmm15, %xmm0
7984 ; AVX512DQ-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7985 ; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm15 = xmm5[u,u,u,u,u,1,7,13],zero,zero,zero,xmm5[5,11],zero,zero,zero
7986 ; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[u,u,u,u,u],zero,zero,zero,xmm1[3,9,15],zero,zero,xmm1[1,7,13]
7987 ; AVX512DQ-NEXT: vpor %xmm1, %xmm15, %xmm0
7988 ; AVX512DQ-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7989 ; AVX512DQ-NEXT: vmovdqa 256(%rdi), %ymm1
7990 ; AVX512DQ-NEXT: vshufi64x2 {{.*#+}} ymm19 = ymm1[2,3],mem[2,3]
7991 ; AVX512DQ-NEXT: vinserti32x4 $1, 288(%rdi), %ymm1, %ymm20
7992 ; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm5 = [65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535,0]
7993 ; AVX512DQ-NEXT: vmovdqa %ymm5, %ymm1
7994 ; AVX512DQ-NEXT: vpternlogq $202, %ymm19, %ymm20, %ymm1
7995 ; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm15 = ymm1[u,u,u,u,u,u,u,u,u,u,u,2,8,14,4,10,16,22,28,18,24,30,u,u,u,u,u,u,u,u,u,u]
7996 ; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm3 = ymm15[0,1,2],ymm3[3,4,5,6,7],ymm15[8,9,10],ymm3[11,12,13,14,15]
7997 ; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm15 = ymm15[0,1,2,3],ymm3[4,5,6,7]
7998 ; AVX512DQ-NEXT: vmovdqa64 64(%rdi), %ymm23
7999 ; AVX512DQ-NEXT: vshufi64x2 {{.*#+}} ymm6 = ymm23[2,3],mem[2,3]
8000 ; AVX512DQ-NEXT: vinserti32x4 $1, 96(%rdi), %ymm23, %ymm23
8001 ; AVX512DQ-NEXT: vinserti64x4 $1, %ymm15, %zmm0, %zmm15
8002 ; AVX512DQ-NEXT: vmovdqa %ymm5, %ymm2
8003 ; AVX512DQ-NEXT: vpternlogq $202, %ymm6, %ymm23, %ymm2
8004 ; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm4 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255]
8005 ; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm2[2,8,14,4,10,16,22,28,18,24,30],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
8006 ; AVX512DQ-NEXT: vpternlogq $248, %ymm4, %ymm17, %ymm0
8007 ; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm17 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
8008 ; AVX512DQ-NEXT: vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm17, %zmm0 # 64-byte Folded Reload
8009 ; AVX512DQ-NEXT: vpmovsxdq {{.*#+}} zmm3 = [18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,16777215,0,0]
8010 ; AVX512DQ-NEXT: vpternlogq $184, %zmm0, %zmm3, %zmm15
8011 ; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm0 = ymm1[u,u,u,u,u,u,u,u,u,u,u,3,9,15,5,11,17,23,29,19,25,31,u,u,u,u,u,u,u,u,u,u]
8012 ; AVX512DQ-NEXT: vinserti32x4 $1, %xmm27, %ymm0, %ymm1
8013 ; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm1 = ymm0[0,1,2],ymm1[3,4,5,6,7],ymm0[8,9,10],ymm1[11,12,13,14,15]
8014 ; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
8015 ; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm2[3,9,15,5,11,17,23,29,19,25,31],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
8016 ; AVX512DQ-NEXT: vpternlogq $248, %ymm4, %ymm21, %ymm1
8017 ; AVX512DQ-NEXT: vinserti32x4 $1, %xmm28, %ymm0, %ymm2
8018 ; AVX512DQ-NEXT: vinserti32x4 $2, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm2 # 16-byte Folded Reload
8019 ; AVX512DQ-NEXT: vpternlogq $226, %zmm2, %zmm17, %zmm1
8020 ; AVX512DQ-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm17
8021 ; AVX512DQ-NEXT: vpternlogq $184, %zmm1, %zmm3, %zmm17
8022 ; AVX512DQ-NEXT: vpshufb %xmm10, %xmm14, %xmm0
8023 ; AVX512DQ-NEXT: vpshufb %xmm8, %xmm13, %xmm1
8024 ; AVX512DQ-NEXT: vporq %xmm0, %xmm1, %xmm21
8025 ; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm0 = xmm11[u,u,u,u,u,1,7,13],zero,zero,zero,xmm11[5,11],zero,zero,zero
8026 ; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm1 = xmm7[u,u,u,u,u],zero,zero,zero,xmm7[3,9,15],zero,zero,xmm7[1,7,13]
8027 ; AVX512DQ-NEXT: vporq %xmm0, %xmm1, %xmm28
8028 ; AVX512DQ-NEXT: vmovdqa64 %ymm25, %ymm11
8029 ; AVX512DQ-NEXT: vpternlogq $226, %ymm26, %ymm12, %ymm11
8030 ; AVX512DQ-NEXT: vextracti128 $1, %ymm11, %xmm0
8031 ; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm1 = zero,zero,xmm0[0,6,12],zero,zero,zero,xmm0[4,10,u,u,u,u,u,u]
8032 ; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm3 = [4,10,128,128,128,2,8,14,128,128,u,u,u,u,u,u]
8033 ; AVX512DQ-NEXT: vpshufb %xmm3, %xmm11, %xmm2
8034 ; AVX512DQ-NEXT: vmovdqa64 %xmm3, %xmm25
8035 ; AVX512DQ-NEXT: vporq %xmm1, %xmm2, %xmm26
8036 ; AVX512DQ-NEXT: vmovdqa64 %ymm18, %ymm14
8037 ; AVX512DQ-NEXT: vpternlogq $226, %ymm24, %ymm9, %ymm14
8038 ; AVX512DQ-NEXT: vextracti128 $1, %ymm14, %xmm10
8039 ; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm1 = [u,u,u,u,u,128,128,128,4,10,128,128,128,2,8,14]
8040 ; AVX512DQ-NEXT: vpshufb %xmm1, %xmm10, %xmm2
8041 ; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm3 = [u,u,u,u,u,2,8,14,128,128,0,6,12,128,128,128]
8042 ; AVX512DQ-NEXT: vpshufb %xmm3, %xmm14, %xmm4
8043 ; AVX512DQ-NEXT: vporq %xmm2, %xmm4, %xmm27
8044 ; AVX512DQ-NEXT: vpternlogq $202, %ymm30, %ymm31, %ymm12
8045 ; AVX512DQ-NEXT: vmovdqa %ymm5, %ymm4
8046 ; AVX512DQ-NEXT: vpternlogq $202, %ymm23, %ymm6, %ymm4
8047 ; AVX512DQ-NEXT: vpternlogq $202, %ymm29, %ymm22, %ymm9
8048 ; AVX512DQ-NEXT: vextracti128 $1, %ymm9, %xmm8
8049 ; AVX512DQ-NEXT: vpshufb %xmm1, %xmm8, %xmm1
8050 ; AVX512DQ-NEXT: vpshufb %xmm3, %xmm9, %xmm2
8051 ; AVX512DQ-NEXT: vpor %xmm1, %xmm2, %xmm7
8052 ; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm1 = [128,128,1,7,13,128,128,128,5,11,u,u,u,u,u,u]
8053 ; AVX512DQ-NEXT: vpshufb %xmm1, %xmm0, %xmm0
8054 ; AVX512DQ-NEXT: vmovdqa64 %xmm1, %xmm22
8055 ; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm13 = [5,11,128,128,128,3,9,15,128,128,u,u,u,u,u,u]
8056 ; AVX512DQ-NEXT: vpshufb %xmm13, %xmm11, %xmm1
8057 ; AVX512DQ-NEXT: vpor %xmm0, %xmm1, %xmm3
8058 ; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm2 = [u,u,u,u,u,128,128,128,5,11,128,128,128,3,9,15]
8059 ; AVX512DQ-NEXT: vpshufb %xmm2, %xmm10, %xmm1
8060 ; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm0 = [u,u,u,u,u,3,9,15,128,128,1,7,13,128,128,128]
8061 ; AVX512DQ-NEXT: vpshufb %xmm0, %xmm14, %xmm10
8062 ; AVX512DQ-NEXT: vpor %xmm1, %xmm10, %xmm10
8063 ; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm1 = [128,128,128,128,128,128,128,128,128,128,128,4,10,0,6,12,18,24,30,20,26,128,128,128,128,128,128,128,128,128,128,128]
8064 ; AVX512DQ-NEXT: vpshufb %ymm1, %ymm4, %ymm11
8065 ; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} ymm18 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255]
8066 ; AVX512DQ-NEXT: vpternlogq $236, %ymm18, %ymm11, %ymm16
8067 ; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm11 = [128,128,128,128,128,128,128,128,128,128,128,5,11,1,7,13,19,25,31,21,27,128,128,128,128,128,128,128,128,128,128,128]
8068 ; AVX512DQ-NEXT: vpshufb %ymm11, %ymm4, %ymm4
8069 ; AVX512DQ-NEXT: vpternlogq $236, %ymm18, %ymm4, %ymm21
8070 ; AVX512DQ-NEXT: vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm4 # 16-byte Folded Reload
8071 ; AVX512DQ-NEXT: vpternlogq $202, %ymm20, %ymm19, %ymm5
8072 ; AVX512DQ-NEXT: vpshufb %ymm1, %ymm5, %ymm1
8073 ; AVX512DQ-NEXT: vpternlogq $248, %ymm18, %ymm4, %ymm1
8074 ; AVX512DQ-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm4
8075 ; AVX512DQ-NEXT: vpshufb %ymm11, %ymm5, %ymm5
8076 ; AVX512DQ-NEXT: vextracti128 $1, %ymm12, %xmm1
8077 ; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm11 = zero,zero,xmm1[0,6,12],zero,zero,zero,xmm1[4,10,u,u,u,u,u,u]
8078 ; AVX512DQ-NEXT: vmovdqa64 %xmm25, %xmm14
8079 ; AVX512DQ-NEXT: vpshufb %xmm14, %xmm12, %xmm14
8080 ; AVX512DQ-NEXT: vpor %xmm11, %xmm14, %xmm11
8081 ; AVX512DQ-NEXT: vpshufb %xmm2, %xmm8, %xmm2
8082 ; AVX512DQ-NEXT: vpshufb %xmm0, %xmm9, %xmm0
8083 ; AVX512DQ-NEXT: vpor %xmm2, %xmm0, %xmm0
8084 ; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm2 = [0,65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535]
8085 ; AVX512DQ-NEXT: vpternlogq $226, %ymm23, %ymm2, %ymm6
8086 ; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm8 = ymm6[u,u,u,u,u,u,u,u,u,u,0,6,12,2,8,14,20,26,16,22,28,u,u,u,u,u,u,u,u,u,u,u]
8087 ; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm9 = xmm11[0,1,2,3,4],xmm8[5,6,7]
8088 ; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm8 = ymm9[0,1,2,3],ymm8[4,5,6,7]
8089 ; AVX512DQ-NEXT: vinserti32x4 $1, %xmm28, %ymm0, %ymm9
8090 ; AVX512DQ-NEXT: vpternlogq $248, %ymm18, %ymm9, %ymm5
8091 ; AVX512DQ-NEXT: vinserti64x4 $1, %ymm5, %zmm0, %zmm5
8092 ; AVX512DQ-NEXT: vpternlogq $202, %ymm20, %ymm19, %ymm2
8093 ; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm9 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
8094 ; AVX512DQ-NEXT: vinserti128 $1, %xmm7, %ymm0, %ymm7
8095 ; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm11 = ymm2[u,u,u,u,u,u,u,u,u,u,0,6,12,2,8,14,20,26,16,22,28],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
8096 ; AVX512DQ-NEXT: vpternlogq $242, %ymm7, %ymm9, %ymm11
8097 ; AVX512DQ-NEXT: vinserti64x4 $1, %ymm11, %zmm0, %zmm7
8098 ; AVX512DQ-NEXT: vinserti32x4 $1, %xmm27, %ymm0, %ymm11
8099 ; AVX512DQ-NEXT: vinserti32x4 $2, %xmm26, %zmm11, %zmm11
8100 ; AVX512DQ-NEXT: vpternlogq $226, %zmm11, %zmm9, %zmm8
8101 ; AVX512DQ-NEXT: vpmovsxdq {{.*#+}} zmm11 = [18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,65535,0,0]
8102 ; AVX512DQ-NEXT: vpternlogq $184, %zmm8, %zmm11, %zmm7
8103 ; AVX512DQ-NEXT: vmovdqa64 %xmm22, %xmm8
8104 ; AVX512DQ-NEXT: vpshufb %xmm8, %xmm1, %xmm1
8105 ; AVX512DQ-NEXT: vpshufb %xmm13, %xmm12, %xmm8
8106 ; AVX512DQ-NEXT: vpor %xmm1, %xmm8, %xmm1
8107 ; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm6 = ymm6[u,u,u,u,u,u,u,u,u,u,1,7,13,3,9,15,21,27,17,23,29,u,u,u,u,u,u,u,u,u,u,u]
8108 ; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm6[5,6,7]
8109 ; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm6[4,5,6,7]
8110 ; AVX512DQ-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
8111 ; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,1,7,13,3,9,15,21,27,17,23,29],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
8112 ; AVX512DQ-NEXT: vpternlogq $242, %ymm0, %ymm9, %ymm2
8113 ; AVX512DQ-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
8114 ; AVX512DQ-NEXT: vinserti128 $1, %xmm10, %ymm0, %ymm2
8115 ; AVX512DQ-NEXT: vinserti32x4 $2, %xmm3, %zmm2, %zmm2
8116 ; AVX512DQ-NEXT: vpternlogq $226, %zmm2, %zmm9, %zmm1
8117 ; AVX512DQ-NEXT: vpternlogq $184, %zmm1, %zmm11, %zmm0
8118 ; AVX512DQ-NEXT: vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 16-byte Folded Reload
8119 ; AVX512DQ-NEXT: vinserti32x4 $2, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm1 # 16-byte Folded Reload
8120 ; AVX512DQ-NEXT: vpmovsxwd {{.*#+}} zmm2 = [0,0,0,0,0,4294967040,4294967295,4294967295,4294967295,4294967295,4294967295,4294967295,4294967295,4294967295,4294967295,4294967295]
8121 ; AVX512DQ-NEXT: vpternlogq $184, %zmm1, %zmm2, %zmm16
8122 ; AVX512DQ-NEXT: vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 16-byte Folded Reload
8123 ; AVX512DQ-NEXT: vinserti32x4 $2, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm1 # 16-byte Folded Reload
8124 ; AVX512DQ-NEXT: vpternlogq $184, %zmm1, %zmm2, %zmm21
8125 ; AVX512DQ-NEXT: vpmovsxdq {{.*#+}} zmm1 = [18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,16777215,0,0]
8126 ; AVX512DQ-NEXT: vpternlogq $184, %zmm16, %zmm1, %zmm4
8127 ; AVX512DQ-NEXT: vpternlogq $184, %zmm21, %zmm1, %zmm5
8128 ; AVX512DQ-NEXT: vmovdqa64 %zmm15, (%rsi)
8129 ; AVX512DQ-NEXT: vmovdqa64 %zmm17, (%rdx)
8130 ; AVX512DQ-NEXT: vmovdqa64 %zmm4, (%rcx)
8131 ; AVX512DQ-NEXT: vmovdqa64 %zmm5, (%r8)
8132 ; AVX512DQ-NEXT: vmovdqa64 %zmm7, (%r9)
8133 ; AVX512DQ-NEXT: movq {{[0-9]+}}(%rsp), %rax
8134 ; AVX512DQ-NEXT: vmovdqa64 %zmm0, (%rax)
8135 ; AVX512DQ-NEXT: addq $40, %rsp
8136 ; AVX512DQ-NEXT: vzeroupper
8137 ; AVX512DQ-NEXT: retq
8139 ; AVX512DQ-FCP-LABEL: load_i8_stride6_vf64:
8140 ; AVX512DQ-FCP: # %bb.0:
8141 ; AVX512DQ-FCP-NEXT: subq $40, %rsp
8142 ; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm1 = [0,6,12,128,128,128,4,10,128,128,128,u,u,u,u,u]
8143 ; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm12 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535]
8144 ; AVX512DQ-FCP-NEXT: vmovdqa64 224(%rdi), %ymm25
8145 ; AVX512DQ-FCP-NEXT: vmovdqa64 192(%rdi), %ymm26
8146 ; AVX512DQ-FCP-NEXT: vmovdqa %ymm12, %ymm0
8147 ; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm25, %ymm26, %ymm0
8148 ; AVX512DQ-FCP-NEXT: vpshufb %xmm1, %xmm0, %xmm3
8149 ; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm5 = [128,128,128,2,8,14,128,128,0,6,12,u,u,u,u,u]
8150 ; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm0, %xmm4
8151 ; AVX512DQ-FCP-NEXT: vpshufb %xmm5, %xmm4, %xmm6
8152 ; AVX512DQ-FCP-NEXT: vpor %xmm3, %xmm6, %xmm9
8153 ; AVX512DQ-FCP-NEXT: vmovdqa64 (%rdi), %ymm30
8154 ; AVX512DQ-FCP-NEXT: vmovdqa64 32(%rdi), %ymm31
8155 ; AVX512DQ-FCP-NEXT: vmovdqa64 128(%rdi), %ymm24
8156 ; AVX512DQ-FCP-NEXT: vmovdqa64 160(%rdi), %ymm18
8157 ; AVX512DQ-FCP-NEXT: vmovdqa %ymm12, %ymm6
8158 ; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm24, %ymm18, %ymm6
8159 ; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm6, %xmm7
8160 ; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm3 = [u,u,u,u,u,u,128,128,0,6,12,128,128,128,4,10]
8161 ; AVX512DQ-FCP-NEXT: vpshufb %xmm3, %xmm7, %xmm10
8162 ; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm8 = [u,u,u,u,u,u,4,10,128,128,128,2,8,14,128,128]
8163 ; AVX512DQ-FCP-NEXT: vpshufb %xmm8, %xmm6, %xmm13
8164 ; AVX512DQ-FCP-NEXT: vpor %xmm10, %xmm13, %xmm10
8165 ; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm10, %ymm0, %ymm10
8166 ; AVX512DQ-FCP-NEXT: vinserti32x4 $2, %xmm9, %zmm10, %zmm2
8167 ; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
8168 ; AVX512DQ-FCP-NEXT: vmovdqa %ymm12, %ymm9
8169 ; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm31, %ymm30, %ymm9
8170 ; AVX512DQ-FCP-NEXT: vpshufb %xmm1, %xmm9, %xmm1
8171 ; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm9, %xmm13
8172 ; AVX512DQ-FCP-NEXT: vpshufb %xmm5, %xmm13, %xmm5
8173 ; AVX512DQ-FCP-NEXT: vporq %xmm1, %xmm5, %xmm17
8174 ; AVX512DQ-FCP-NEXT: vmovdqa64 320(%rdi), %ymm29
8175 ; AVX512DQ-FCP-NEXT: vmovdqa64 352(%rdi), %ymm22
8176 ; AVX512DQ-FCP-NEXT: vmovdqa %ymm12, %ymm1
8177 ; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm29, %ymm22, %ymm1
8178 ; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm1, %xmm5
8179 ; AVX512DQ-FCP-NEXT: vpshufb %xmm3, %xmm5, %xmm3
8180 ; AVX512DQ-FCP-NEXT: vpshufb %xmm8, %xmm1, %xmm8
8181 ; AVX512DQ-FCP-NEXT: vpor %xmm3, %xmm8, %xmm3
8182 ; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
8183 ; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm8 = [1,7,13,128,128,128,5,11,128,128,128,u,u,u,u,u]
8184 ; AVX512DQ-FCP-NEXT: vpshufb %xmm8, %xmm0, %xmm0
8185 ; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm10 = [128,128,128,3,9,15,128,128,1,7,13,u,u,u,u,u]
8186 ; AVX512DQ-FCP-NEXT: vpshufb %xmm10, %xmm4, %xmm4
8187 ; AVX512DQ-FCP-NEXT: vpor %xmm0, %xmm4, %xmm0
8188 ; AVX512DQ-FCP-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8189 ; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm0 = [u,u,u,u,u,u,128,128,1,7,13,128,128,128,5,11]
8190 ; AVX512DQ-FCP-NEXT: vpshufb %xmm0, %xmm7, %xmm4
8191 ; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm7 = [u,u,u,u,u,u,5,11,128,128,128,3,9,15,128,128]
8192 ; AVX512DQ-FCP-NEXT: vpshufb %xmm7, %xmm6, %xmm6
8193 ; AVX512DQ-FCP-NEXT: vporq %xmm4, %xmm6, %xmm28
8194 ; AVX512DQ-FCP-NEXT: vpshufb %xmm8, %xmm9, %xmm4
8195 ; AVX512DQ-FCP-NEXT: vpshufb %xmm10, %xmm13, %xmm6
8196 ; AVX512DQ-FCP-NEXT: vporq %xmm4, %xmm6, %xmm21
8197 ; AVX512DQ-FCP-NEXT: vpshufb %xmm0, %xmm5, %xmm0
8198 ; AVX512DQ-FCP-NEXT: vpshufb %xmm7, %xmm1, %xmm1
8199 ; AVX512DQ-FCP-NEXT: vporq %xmm0, %xmm1, %xmm27
8200 ; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm0 = [128,128,128,4,10,128,128,128,2,8,14,u,u,u,u,u]
8201 ; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm9 = [65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535]
8202 ; AVX512DQ-FCP-NEXT: vmovdqa %ymm9, %ymm4
8203 ; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm26, %ymm25, %ymm4
8204 ; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm4, %xmm15
8205 ; AVX512DQ-FCP-NEXT: vpshufb %xmm0, %xmm15, %xmm1
8206 ; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm6 = [2,8,14,128,128,0,6,12,128,128,128,u,u,u,u,u]
8207 ; AVX512DQ-FCP-NEXT: vpshufb %xmm6, %xmm4, %xmm5
8208 ; AVX512DQ-FCP-NEXT: vpor %xmm1, %xmm5, %xmm1
8209 ; AVX512DQ-FCP-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8210 ; AVX512DQ-FCP-NEXT: vmovdqa %ymm12, %ymm5
8211 ; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm18, %ymm24, %ymm5
8212 ; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm8 = [u,u,u,u,u,0,6,12,128,128,128,4,10,128,128,128]
8213 ; AVX512DQ-FCP-NEXT: vpshufb %xmm8, %xmm5, %xmm7
8214 ; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm5, %xmm1
8215 ; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm10 = [u,u,u,u,u,128,128,128,2,8,14,128,128,0,6,12]
8216 ; AVX512DQ-FCP-NEXT: vpshufb %xmm10, %xmm1, %xmm13
8217 ; AVX512DQ-FCP-NEXT: vpor %xmm7, %xmm13, %xmm2
8218 ; AVX512DQ-FCP-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8219 ; AVX512DQ-FCP-NEXT: vmovdqa %ymm9, %ymm13
8220 ; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm30, %ymm31, %ymm13
8221 ; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm13, %xmm14
8222 ; AVX512DQ-FCP-NEXT: vpshufb %xmm0, %xmm14, %xmm0
8223 ; AVX512DQ-FCP-NEXT: vpshufb %xmm6, %xmm13, %xmm6
8224 ; AVX512DQ-FCP-NEXT: vporq %xmm0, %xmm6, %xmm16
8225 ; AVX512DQ-FCP-NEXT: vmovdqa %ymm12, %ymm11
8226 ; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm22, %ymm29, %ymm11
8227 ; AVX512DQ-FCP-NEXT: vpshufb %xmm8, %xmm11, %xmm8
8228 ; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm11, %xmm7
8229 ; AVX512DQ-FCP-NEXT: vpshufb %xmm10, %xmm7, %xmm10
8230 ; AVX512DQ-FCP-NEXT: vpor %xmm8, %xmm10, %xmm0
8231 ; AVX512DQ-FCP-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8232 ; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm10 = [128,128,128,5,11,128,128,128,3,9,15,u,u,u,u,u]
8233 ; AVX512DQ-FCP-NEXT: vpshufb %xmm10, %xmm15, %xmm15
8234 ; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm8 = [3,9,15,128,128,1,7,13,128,128,128,u,u,u,u,u]
8235 ; AVX512DQ-FCP-NEXT: vpshufb %xmm8, %xmm4, %xmm4
8236 ; AVX512DQ-FCP-NEXT: vpor %xmm4, %xmm15, %xmm0
8237 ; AVX512DQ-FCP-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8238 ; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm15 = xmm5[u,u,u,u,u,1,7,13],zero,zero,zero,xmm5[5,11],zero,zero,zero
8239 ; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[u,u,u,u,u],zero,zero,zero,xmm1[3,9,15],zero,zero,xmm1[1,7,13]
8240 ; AVX512DQ-FCP-NEXT: vpor %xmm1, %xmm15, %xmm0
8241 ; AVX512DQ-FCP-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8242 ; AVX512DQ-FCP-NEXT: vmovdqa 256(%rdi), %ymm1
8243 ; AVX512DQ-FCP-NEXT: vshufi64x2 {{.*#+}} ymm19 = ymm1[2,3],mem[2,3]
8244 ; AVX512DQ-FCP-NEXT: vinserti32x4 $1, 288(%rdi), %ymm1, %ymm20
8245 ; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm5 = [65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535,0]
8246 ; AVX512DQ-FCP-NEXT: vmovdqa %ymm5, %ymm1
8247 ; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm19, %ymm20, %ymm1
8248 ; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm15 = ymm1[u,u,u,u,u,u,u,u,u,u,u,2,8,14,4,10,16,22,28,18,24,30,u,u,u,u,u,u,u,u,u,u]
8249 ; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm3 = ymm15[0,1,2],ymm3[3,4,5,6,7],ymm15[8,9,10],ymm3[11,12,13,14,15]
8250 ; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm15 = ymm15[0,1,2,3],ymm3[4,5,6,7]
8251 ; AVX512DQ-FCP-NEXT: vmovdqa64 64(%rdi), %ymm23
8252 ; AVX512DQ-FCP-NEXT: vshufi64x2 {{.*#+}} ymm6 = ymm23[2,3],mem[2,3]
8253 ; AVX512DQ-FCP-NEXT: vinserti32x4 $1, 96(%rdi), %ymm23, %ymm23
8254 ; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm15, %zmm0, %zmm15
8255 ; AVX512DQ-FCP-NEXT: vmovdqa %ymm5, %ymm2
8256 ; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm6, %ymm23, %ymm2
8257 ; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm4 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255]
8258 ; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm2[2,8,14,4,10,16,22,28,18,24,30],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
8259 ; AVX512DQ-FCP-NEXT: vpternlogq $248, %ymm4, %ymm17, %ymm0
8260 ; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} zmm17 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
8261 ; AVX512DQ-FCP-NEXT: vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm17, %zmm0 # 64-byte Folded Reload
8262 ; AVX512DQ-FCP-NEXT: vpmovsxdq {{.*#+}} zmm3 = [18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,16777215,0,0]
8263 ; AVX512DQ-FCP-NEXT: vpternlogq $184, %zmm0, %zmm3, %zmm15
8264 ; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm1[u,u,u,u,u,u,u,u,u,u,u,3,9,15,5,11,17,23,29,19,25,31,u,u,u,u,u,u,u,u,u,u]
8265 ; AVX512DQ-FCP-NEXT: vinserti32x4 $1, %xmm27, %ymm0, %ymm1
8266 ; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm0[0,1,2],ymm1[3,4,5,6,7],ymm0[8,9,10],ymm1[11,12,13,14,15]
8267 ; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
8268 ; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm2[3,9,15,5,11,17,23,29,19,25,31],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
8269 ; AVX512DQ-FCP-NEXT: vpternlogq $248, %ymm4, %ymm21, %ymm1
8270 ; AVX512DQ-FCP-NEXT: vinserti32x4 $1, %xmm28, %ymm0, %ymm2
8271 ; AVX512DQ-FCP-NEXT: vinserti32x4 $2, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm2 # 16-byte Folded Reload
8272 ; AVX512DQ-FCP-NEXT: vpternlogq $226, %zmm2, %zmm17, %zmm1
8273 ; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm17
8274 ; AVX512DQ-FCP-NEXT: vpternlogq $184, %zmm1, %zmm3, %zmm17
8275 ; AVX512DQ-FCP-NEXT: vpshufb %xmm10, %xmm14, %xmm0
8276 ; AVX512DQ-FCP-NEXT: vpshufb %xmm8, %xmm13, %xmm1
8277 ; AVX512DQ-FCP-NEXT: vporq %xmm0, %xmm1, %xmm21
8278 ; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm0 = xmm11[u,u,u,u,u,1,7,13],zero,zero,zero,xmm11[5,11],zero,zero,zero
8279 ; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm7[u,u,u,u,u],zero,zero,zero,xmm7[3,9,15],zero,zero,xmm7[1,7,13]
8280 ; AVX512DQ-FCP-NEXT: vporq %xmm0, %xmm1, %xmm28
8281 ; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm25, %ymm11
8282 ; AVX512DQ-FCP-NEXT: vpternlogq $226, %ymm26, %ymm12, %ymm11
8283 ; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm11, %xmm0
8284 ; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm1 = zero,zero,xmm0[0,6,12],zero,zero,zero,xmm0[4,10,u,u,u,u,u,u]
8285 ; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm3 = [4,10,128,128,128,2,8,14,128,128,u,u,u,u,u,u]
8286 ; AVX512DQ-FCP-NEXT: vpshufb %xmm3, %xmm11, %xmm2
8287 ; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm3, %xmm25
8288 ; AVX512DQ-FCP-NEXT: vporq %xmm1, %xmm2, %xmm26
8289 ; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm18, %ymm14
8290 ; AVX512DQ-FCP-NEXT: vpternlogq $226, %ymm24, %ymm9, %ymm14
8291 ; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm14, %xmm10
8292 ; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm1 = [u,u,u,u,u,128,128,128,4,10,128,128,128,2,8,14]
8293 ; AVX512DQ-FCP-NEXT: vpshufb %xmm1, %xmm10, %xmm2
8294 ; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm3 = [u,u,u,u,u,2,8,14,128,128,0,6,12,128,128,128]
8295 ; AVX512DQ-FCP-NEXT: vpshufb %xmm3, %xmm14, %xmm4
8296 ; AVX512DQ-FCP-NEXT: vporq %xmm2, %xmm4, %xmm27
8297 ; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm30, %ymm31, %ymm12
8298 ; AVX512DQ-FCP-NEXT: vmovdqa %ymm5, %ymm4
8299 ; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm23, %ymm6, %ymm4
8300 ; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm29, %ymm22, %ymm9
8301 ; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm9, %xmm8
8302 ; AVX512DQ-FCP-NEXT: vpshufb %xmm1, %xmm8, %xmm1
8303 ; AVX512DQ-FCP-NEXT: vpshufb %xmm3, %xmm9, %xmm2
8304 ; AVX512DQ-FCP-NEXT: vpor %xmm1, %xmm2, %xmm7
8305 ; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm1 = [128,128,1,7,13,128,128,128,5,11,u,u,u,u,u,u]
8306 ; AVX512DQ-FCP-NEXT: vpshufb %xmm1, %xmm0, %xmm0
8307 ; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm1, %xmm22
8308 ; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm13 = [5,11,128,128,128,3,9,15,128,128,u,u,u,u,u,u]
8309 ; AVX512DQ-FCP-NEXT: vpshufb %xmm13, %xmm11, %xmm1
8310 ; AVX512DQ-FCP-NEXT: vpor %xmm0, %xmm1, %xmm3
8311 ; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm2 = [u,u,u,u,u,128,128,128,5,11,128,128,128,3,9,15]
8312 ; AVX512DQ-FCP-NEXT: vpshufb %xmm2, %xmm10, %xmm1
8313 ; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm0 = [u,u,u,u,u,3,9,15,128,128,1,7,13,128,128,128]
8314 ; AVX512DQ-FCP-NEXT: vpshufb %xmm0, %xmm14, %xmm10
8315 ; AVX512DQ-FCP-NEXT: vpor %xmm1, %xmm10, %xmm10
8316 ; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm1 = [128,128,128,128,128,128,128,128,128,128,128,4,10,0,6,12,18,24,30,20,26,128,128,128,128,128,128,128,128,128,128,128]
8317 ; AVX512DQ-FCP-NEXT: vpshufb %ymm1, %ymm4, %ymm11
8318 ; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} ymm18 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255]
8319 ; AVX512DQ-FCP-NEXT: vpternlogq $236, %ymm18, %ymm11, %ymm16
8320 ; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm11 = [128,128,128,128,128,128,128,128,128,128,128,5,11,1,7,13,19,25,31,21,27,128,128,128,128,128,128,128,128,128,128,128]
8321 ; AVX512DQ-FCP-NEXT: vpshufb %ymm11, %ymm4, %ymm4
8322 ; AVX512DQ-FCP-NEXT: vpternlogq $236, %ymm18, %ymm4, %ymm21
8323 ; AVX512DQ-FCP-NEXT: vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm4 # 16-byte Folded Reload
8324 ; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm20, %ymm19, %ymm5
8325 ; AVX512DQ-FCP-NEXT: vpshufb %ymm1, %ymm5, %ymm1
8326 ; AVX512DQ-FCP-NEXT: vpternlogq $248, %ymm18, %ymm4, %ymm1
8327 ; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm4
8328 ; AVX512DQ-FCP-NEXT: vpshufb %ymm11, %ymm5, %ymm5
8329 ; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm12, %xmm1
8330 ; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm11 = zero,zero,xmm1[0,6,12],zero,zero,zero,xmm1[4,10,u,u,u,u,u,u]
8331 ; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm25, %xmm14
8332 ; AVX512DQ-FCP-NEXT: vpshufb %xmm14, %xmm12, %xmm14
8333 ; AVX512DQ-FCP-NEXT: vpor %xmm11, %xmm14, %xmm11
8334 ; AVX512DQ-FCP-NEXT: vpshufb %xmm2, %xmm8, %xmm2
8335 ; AVX512DQ-FCP-NEXT: vpshufb %xmm0, %xmm9, %xmm0
8336 ; AVX512DQ-FCP-NEXT: vpor %xmm2, %xmm0, %xmm0
8337 ; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm2 = [0,65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535]
8338 ; AVX512DQ-FCP-NEXT: vpternlogq $226, %ymm23, %ymm2, %ymm6
8339 ; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm8 = ymm6[u,u,u,u,u,u,u,u,u,u,0,6,12,2,8,14,20,26,16,22,28,u,u,u,u,u,u,u,u,u,u,u]
8340 ; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm9 = xmm11[0,1,2,3,4],xmm8[5,6,7]
8341 ; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm8 = ymm9[0,1,2,3],ymm8[4,5,6,7]
8342 ; AVX512DQ-FCP-NEXT: vinserti32x4 $1, %xmm28, %ymm0, %ymm9
8343 ; AVX512DQ-FCP-NEXT: vpternlogq $248, %ymm18, %ymm9, %ymm5
8344 ; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm5, %zmm0, %zmm5
8345 ; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm20, %ymm19, %ymm2
8346 ; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} zmm9 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
8347 ; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm7, %ymm0, %ymm7
8348 ; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm11 = ymm2[u,u,u,u,u,u,u,u,u,u,0,6,12,2,8,14,20,26,16,22,28],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
8349 ; AVX512DQ-FCP-NEXT: vpternlogq $242, %ymm7, %ymm9, %ymm11
8350 ; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm11, %zmm0, %zmm7
8351 ; AVX512DQ-FCP-NEXT: vinserti32x4 $1, %xmm27, %ymm0, %ymm11
8352 ; AVX512DQ-FCP-NEXT: vinserti32x4 $2, %xmm26, %zmm11, %zmm11
8353 ; AVX512DQ-FCP-NEXT: vpternlogq $226, %zmm11, %zmm9, %zmm8
8354 ; AVX512DQ-FCP-NEXT: vpmovsxdq {{.*#+}} zmm11 = [18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,65535,0,0]
8355 ; AVX512DQ-FCP-NEXT: vpternlogq $184, %zmm8, %zmm11, %zmm7
8356 ; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm22, %xmm8
8357 ; AVX512DQ-FCP-NEXT: vpshufb %xmm8, %xmm1, %xmm1
8358 ; AVX512DQ-FCP-NEXT: vpshufb %xmm13, %xmm12, %xmm8
8359 ; AVX512DQ-FCP-NEXT: vpor %xmm1, %xmm8, %xmm1
8360 ; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm6 = ymm6[u,u,u,u,u,u,u,u,u,u,1,7,13,3,9,15,21,27,17,23,29,u,u,u,u,u,u,u,u,u,u,u]
8361 ; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm6[5,6,7]
8362 ; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm6[4,5,6,7]
8363 ; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
8364 ; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,1,7,13,3,9,15,21,27,17,23,29],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
8365 ; AVX512DQ-FCP-NEXT: vpternlogq $242, %ymm0, %ymm9, %ymm2
8366 ; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
8367 ; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm10, %ymm0, %ymm2
8368 ; AVX512DQ-FCP-NEXT: vinserti32x4 $2, %xmm3, %zmm2, %zmm2
8369 ; AVX512DQ-FCP-NEXT: vpternlogq $226, %zmm2, %zmm9, %zmm1
8370 ; AVX512DQ-FCP-NEXT: vpternlogq $184, %zmm1, %zmm11, %zmm0
8371 ; AVX512DQ-FCP-NEXT: vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 16-byte Folded Reload
8372 ; AVX512DQ-FCP-NEXT: vinserti32x4 $2, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm1 # 16-byte Folded Reload
8373 ; AVX512DQ-FCP-NEXT: vpmovsxwd {{.*#+}} zmm2 = [0,0,0,0,0,4294967040,4294967295,4294967295,4294967295,4294967295,4294967295,4294967295,4294967295,4294967295,4294967295,4294967295]
8374 ; AVX512DQ-FCP-NEXT: vpternlogq $184, %zmm1, %zmm2, %zmm16
8375 ; AVX512DQ-FCP-NEXT: vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 16-byte Folded Reload
8376 ; AVX512DQ-FCP-NEXT: vinserti32x4 $2, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm1 # 16-byte Folded Reload
8377 ; AVX512DQ-FCP-NEXT: vpternlogq $184, %zmm1, %zmm2, %zmm21
8378 ; AVX512DQ-FCP-NEXT: vpmovsxdq {{.*#+}} zmm1 = [18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,16777215,0,0]
8379 ; AVX512DQ-FCP-NEXT: vpternlogq $184, %zmm16, %zmm1, %zmm4
8380 ; AVX512DQ-FCP-NEXT: vpternlogq $184, %zmm21, %zmm1, %zmm5
8381 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm15, (%rsi)
8382 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm17, (%rdx)
8383 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm4, (%rcx)
8384 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm5, (%r8)
8385 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm7, (%r9)
8386 ; AVX512DQ-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
8387 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm0, (%rax)
8388 ; AVX512DQ-FCP-NEXT: addq $40, %rsp
8389 ; AVX512DQ-FCP-NEXT: vzeroupper
8390 ; AVX512DQ-FCP-NEXT: retq
8392 ; AVX512BW-LABEL: load_i8_stride6_vf64:
8393 ; AVX512BW: # %bb.0:
8394 ; AVX512BW-NEXT: movq {{[0-9]+}}(%rsp), %rax
8395 ; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm2 = [0,6,12,128,128,128,4,10,128,128,128,u,u,u,u,u]
8396 ; AVX512BW-NEXT: vmovdqa 224(%rdi), %ymm0
8397 ; AVX512BW-NEXT: vmovdqa64 192(%rdi), %ymm23
8398 ; AVX512BW-NEXT: movw $18724, %r10w # imm = 0x4924
8399 ; AVX512BW-NEXT: kmovd %r10d, %k1
8400 ; AVX512BW-NEXT: vpblendmw %ymm0, %ymm23, %ymm9 {%k1}
8401 ; AVX512BW-NEXT: vpshufb %xmm2, %xmm9, %xmm1
8402 ; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm4 = [128,128,128,2,8,14,128,128,0,6,12,u,u,u,u,u]
8403 ; AVX512BW-NEXT: vextracti128 $1, %ymm9, %xmm12
8404 ; AVX512BW-NEXT: vpshufb %xmm4, %xmm12, %xmm3
8405 ; AVX512BW-NEXT: vpor %xmm1, %xmm3, %xmm5
8406 ; AVX512BW-NEXT: vmovdqa (%rdi), %ymm10
8407 ; AVX512BW-NEXT: vmovdqa 32(%rdi), %ymm3
8408 ; AVX512BW-NEXT: vmovdqa 64(%rdi), %ymm6
8409 ; AVX512BW-NEXT: vmovdqa64 128(%rdi), %ymm26
8410 ; AVX512BW-NEXT: vmovdqa 160(%rdi), %ymm1
8411 ; AVX512BW-NEXT: vpblendmw %ymm26, %ymm1, %ymm15 {%k1}
8412 ; AVX512BW-NEXT: vextracti32x4 $1, %ymm15, %xmm16
8413 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} xmm17 = [u,u,u,u,u,u,128,128,0,6,12,128,128,128,4,10]
8414 ; AVX512BW-NEXT: vpshufb %xmm17, %xmm16, %xmm11
8415 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} xmm18 = [u,u,u,u,u,u,4,10,128,128,128,2,8,14,128,128]
8416 ; AVX512BW-NEXT: vpshufb %xmm18, %xmm15, %xmm13
8417 ; AVX512BW-NEXT: vpor %xmm11, %xmm13, %xmm11
8418 ; AVX512BW-NEXT: vinserti128 $1, %xmm11, %ymm0, %ymm11
8419 ; AVX512BW-NEXT: vinserti32x4 $2, %xmm5, %zmm11, %zmm11
8420 ; AVX512BW-NEXT: vperm2i128 {{.*#+}} ymm5 = ymm6[2,3],mem[2,3]
8421 ; AVX512BW-NEXT: vinserti128 $1, 96(%rdi), %ymm6, %ymm13
8422 ; AVX512BW-NEXT: movw $-28124, %r10w # imm = 0x9224
8423 ; AVX512BW-NEXT: kmovd %r10d, %k4
8424 ; AVX512BW-NEXT: vpblendmw %ymm5, %ymm13, %ymm19 {%k4}
8425 ; AVX512BW-NEXT: vpblendmw %ymm3, %ymm10, %ymm20 {%k1}
8426 ; AVX512BW-NEXT: vpshufb %xmm2, %xmm20, %xmm2
8427 ; AVX512BW-NEXT: vextracti32x4 $1, %ymm20, %xmm21
8428 ; AVX512BW-NEXT: vpshufb %xmm4, %xmm21, %xmm4
8429 ; AVX512BW-NEXT: vpor %xmm2, %xmm4, %xmm2
8430 ; AVX512BW-NEXT: vpbroadcastq {{.*#+}} ymm6 = [0,6,12,2,8,14,4,10,0,6,12,2,8,14,4,10,0,6,12,2,8,14,4,10,0,6,12,2,8,14,4,10]
8431 ; AVX512BW-NEXT: movl $4192256, %r10d # imm = 0x3FF800
8432 ; AVX512BW-NEXT: kmovd %r10d, %k2
8433 ; AVX512BW-NEXT: vpshufb %ymm6, %ymm19, %ymm2 {%k2}
8434 ; AVX512BW-NEXT: vmovdqu16 %zmm11, %zmm2 {%k2}
8435 ; AVX512BW-NEXT: vmovdqa 256(%rdi), %ymm11
8436 ; AVX512BW-NEXT: vperm2i128 {{.*#+}} ymm4 = ymm11[2,3],mem[2,3]
8437 ; AVX512BW-NEXT: vinserti128 $1, 288(%rdi), %ymm11, %ymm14
8438 ; AVX512BW-NEXT: vpblendmw %ymm4, %ymm14, %ymm22 {%k4}
8439 ; AVX512BW-NEXT: vpshufb %ymm6, %ymm22, %ymm7
8440 ; AVX512BW-NEXT: vmovdqa 320(%rdi), %ymm11
8441 ; AVX512BW-NEXT: vmovdqa 352(%rdi), %ymm6
8442 ; AVX512BW-NEXT: vpblendmw %ymm11, %ymm6, %ymm24 {%k1}
8443 ; AVX512BW-NEXT: vextracti32x4 $1, %ymm24, %xmm25
8444 ; AVX512BW-NEXT: vpshufb %xmm17, %xmm25, %xmm17
8445 ; AVX512BW-NEXT: vpshufb %xmm18, %xmm24, %xmm18
8446 ; AVX512BW-NEXT: vporq %xmm17, %xmm18, %xmm17
8447 ; AVX512BW-NEXT: vinserti32x4 $1, %xmm17, %ymm0, %ymm8
8448 ; AVX512BW-NEXT: vpblendw {{.*#+}} ymm8 = ymm7[0,1,2],ymm8[3,4,5,6,7],ymm7[8,9,10],ymm8[11,12,13,14,15]
8449 ; AVX512BW-NEXT: vpblendd {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm8[4,5,6,7]
8450 ; AVX512BW-NEXT: vinserti64x4 $1, %ymm7, %zmm0, %zmm7
8451 ; AVX512BW-NEXT: movabsq $-8796093022208, %rdi # imm = 0xFFFFF80000000000
8452 ; AVX512BW-NEXT: kmovq %rdi, %k3
8453 ; AVX512BW-NEXT: vmovdqu8 %zmm7, %zmm2 {%k3}
8454 ; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm7 = [1,7,13,128,128,128,5,11,128,128,128,u,u,u,u,u]
8455 ; AVX512BW-NEXT: vpshufb %xmm7, %xmm9, %xmm8
8456 ; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm9 = [128,128,128,3,9,15,128,128,1,7,13,u,u,u,u,u]
8457 ; AVX512BW-NEXT: vpshufb %xmm9, %xmm12, %xmm12
8458 ; AVX512BW-NEXT: vpor %xmm8, %xmm12, %xmm8
8459 ; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm12 = [u,u,u,u,u,u,128,128,1,7,13,128,128,128,5,11]
8460 ; AVX512BW-NEXT: vpshufb %xmm12, %xmm16, %xmm16
8461 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} xmm17 = [u,u,u,u,u,u,5,11,128,128,128,3,9,15,128,128]
8462 ; AVX512BW-NEXT: vpshufb %xmm17, %xmm15, %xmm15
8463 ; AVX512BW-NEXT: vporq %xmm16, %xmm15, %xmm15
8464 ; AVX512BW-NEXT: vinserti128 $1, %xmm15, %ymm0, %ymm15
8465 ; AVX512BW-NEXT: vinserti32x4 $2, %xmm8, %zmm15, %zmm8
8466 ; AVX512BW-NEXT: vpshufb %xmm7, %xmm20, %xmm7
8467 ; AVX512BW-NEXT: vpshufb %xmm9, %xmm21, %xmm9
8468 ; AVX512BW-NEXT: vpor %xmm7, %xmm9, %xmm9
8469 ; AVX512BW-NEXT: vpbroadcastq {{.*#+}} ymm7 = [1,7,13,3,9,15,5,11,1,7,13,3,9,15,5,11,1,7,13,3,9,15,5,11,1,7,13,3,9,15,5,11]
8470 ; AVX512BW-NEXT: vpshufb %ymm7, %ymm19, %ymm9 {%k2}
8471 ; AVX512BW-NEXT: vmovdqu16 %zmm8, %zmm9 {%k2}
8472 ; AVX512BW-NEXT: vpshufb %ymm7, %ymm22, %ymm7
8473 ; AVX512BW-NEXT: vpshufb %xmm12, %xmm25, %xmm8
8474 ; AVX512BW-NEXT: vpshufb %xmm17, %xmm24, %xmm12
8475 ; AVX512BW-NEXT: vpor %xmm8, %xmm12, %xmm8
8476 ; AVX512BW-NEXT: vinserti128 $1, %xmm8, %ymm0, %ymm8
8477 ; AVX512BW-NEXT: vpblendw {{.*#+}} ymm8 = ymm7[0,1,2],ymm8[3,4,5,6,7],ymm7[8,9,10],ymm8[11,12,13,14,15]
8478 ; AVX512BW-NEXT: vpblendd {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm8[4,5,6,7]
8479 ; AVX512BW-NEXT: vinserti64x4 $1, %ymm7, %zmm0, %zmm7
8480 ; AVX512BW-NEXT: vmovdqu8 %zmm7, %zmm9 {%k3}
8481 ; AVX512BW-NEXT: vpblendmw %ymm13, %ymm5, %ymm15 {%k4}
8482 ; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm7 = [128,128,128,4,10,128,128,128,2,8,14,u,u,u,u,u]
8483 ; AVX512BW-NEXT: movw $9362, %di # imm = 0x2492
8484 ; AVX512BW-NEXT: kmovd %edi, %k2
8485 ; AVX512BW-NEXT: vpblendmw %ymm10, %ymm3, %ymm8 {%k2}
8486 ; AVX512BW-NEXT: vextracti32x4 $1, %ymm8, %xmm16
8487 ; AVX512BW-NEXT: vpshufb %xmm7, %xmm16, %xmm12
8488 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} xmm17 = [2,8,14,128,128,0,6,12,128,128,128,u,u,u,u,u]
8489 ; AVX512BW-NEXT: vpshufb %xmm17, %xmm8, %xmm18
8490 ; AVX512BW-NEXT: vporq %xmm12, %xmm18, %xmm18
8491 ; AVX512BW-NEXT: vpbroadcastq {{.*#+}} ymm19 = [2,8,14,4,10,0,6,12,2,8,14,4,10,0,6,12,2,8,14,4,10,0,6,12,2,8,14,4,10,0,6,12]
8492 ; AVX512BW-NEXT: movl $2095104, %edi # imm = 0x1FF800
8493 ; AVX512BW-NEXT: kmovd %edi, %k5
8494 ; AVX512BW-NEXT: vpshufb %ymm19, %ymm15, %ymm18 {%k5}
8495 ; AVX512BW-NEXT: vpblendmw %ymm23, %ymm0, %ymm20 {%k2}
8496 ; AVX512BW-NEXT: vextracti32x4 $1, %ymm20, %xmm21
8497 ; AVX512BW-NEXT: vpshufb %xmm7, %xmm21, %xmm7
8498 ; AVX512BW-NEXT: vpshufb %xmm17, %xmm20, %xmm12
8499 ; AVX512BW-NEXT: vpor %xmm7, %xmm12, %xmm7
8500 ; AVX512BW-NEXT: vpblendmw %ymm1, %ymm26, %ymm17 {%k1}
8501 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} xmm22 = [u,u,u,u,u,0,6,12,128,128,128,4,10,128,128,128]
8502 ; AVX512BW-NEXT: vpshufb %xmm22, %xmm17, %xmm12
8503 ; AVX512BW-NEXT: vextracti32x4 $1, %ymm17, %xmm24
8504 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} xmm25 = [u,u,u,u,u,128,128,128,2,8,14,128,128,0,6,12]
8505 ; AVX512BW-NEXT: vpshufb %xmm25, %xmm24, %xmm27
8506 ; AVX512BW-NEXT: vporq %xmm12, %xmm27, %xmm12
8507 ; AVX512BW-NEXT: vinserti128 $1, %xmm12, %ymm0, %ymm12
8508 ; AVX512BW-NEXT: vinserti32x4 $2, %xmm7, %zmm12, %zmm12
8509 ; AVX512BW-NEXT: movl $2097151, %edi # imm = 0x1FFFFF
8510 ; AVX512BW-NEXT: kmovq %rdi, %k6
8511 ; AVX512BW-NEXT: vmovdqu8 %zmm18, %zmm12 {%k6}
8512 ; AVX512BW-NEXT: vpblendmw %ymm14, %ymm4, %ymm7 {%k4}
8513 ; AVX512BW-NEXT: vpblendmw %ymm6, %ymm11, %ymm18 {%k1}
8514 ; AVX512BW-NEXT: vpshufb %xmm22, %xmm18, %xmm22
8515 ; AVX512BW-NEXT: vextracti32x4 $1, %ymm18, %xmm27
8516 ; AVX512BW-NEXT: vpshufb %xmm25, %xmm27, %xmm25
8517 ; AVX512BW-NEXT: vporq %xmm22, %xmm25, %xmm22
8518 ; AVX512BW-NEXT: vinserti32x4 $1, %xmm22, %ymm0, %ymm22
8519 ; AVX512BW-NEXT: vpshufb %ymm19, %ymm7, %ymm22 {%k5}
8520 ; AVX512BW-NEXT: vinserti64x4 $1, %ymm22, %zmm0, %zmm19
8521 ; AVX512BW-NEXT: vmovdqu8 %zmm19, %zmm12 {%k3}
8522 ; AVX512BW-NEXT: movw $9289, %di # imm = 0x2449
8523 ; AVX512BW-NEXT: kmovd %edi, %k4
8524 ; AVX512BW-NEXT: vmovdqu16 %ymm14, %ymm4 {%k4}
8525 ; AVX512BW-NEXT: vmovdqu16 %ymm13, %ymm5 {%k4}
8526 ; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm13 = [128,128,128,5,11,128,128,128,3,9,15,u,u,u,u,u]
8527 ; AVX512BW-NEXT: vpshufb %xmm13, %xmm16, %xmm14
8528 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} xmm16 = [3,9,15,128,128,1,7,13,128,128,128,u,u,u,u,u]
8529 ; AVX512BW-NEXT: vpshufb %xmm16, %xmm8, %xmm8
8530 ; AVX512BW-NEXT: vpor %xmm14, %xmm8, %xmm8
8531 ; AVX512BW-NEXT: vpbroadcastq {{.*#+}} ymm14 = [3,9,15,5,11,1,7,13,3,9,15,5,11,1,7,13,3,9,15,5,11,1,7,13,3,9,15,5,11,1,7,13]
8532 ; AVX512BW-NEXT: vpshufb %ymm14, %ymm15, %ymm8 {%k5}
8533 ; AVX512BW-NEXT: vpshufb %xmm13, %xmm21, %xmm13
8534 ; AVX512BW-NEXT: vpshufb %xmm16, %xmm20, %xmm15
8535 ; AVX512BW-NEXT: vpor %xmm13, %xmm15, %xmm13
8536 ; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm15 = [u,u,u,u,u,1,7,13,128,128,128,5,11,128,128,128]
8537 ; AVX512BW-NEXT: vpshufb %xmm15, %xmm17, %xmm16
8538 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} xmm17 = [u,u,u,u,u,128,128,128,3,9,15,128,128,1,7,13]
8539 ; AVX512BW-NEXT: vpshufb %xmm17, %xmm24, %xmm19
8540 ; AVX512BW-NEXT: vporq %xmm16, %xmm19, %xmm16
8541 ; AVX512BW-NEXT: vinserti32x4 $1, %xmm16, %ymm0, %ymm16
8542 ; AVX512BW-NEXT: vinserti32x4 $2, %xmm13, %zmm16, %zmm13
8543 ; AVX512BW-NEXT: vmovdqu8 %zmm8, %zmm13 {%k6}
8544 ; AVX512BW-NEXT: vpshufb %xmm15, %xmm18, %xmm8
8545 ; AVX512BW-NEXT: vpshufb %xmm17, %xmm27, %xmm15
8546 ; AVX512BW-NEXT: vpor %xmm8, %xmm15, %xmm8
8547 ; AVX512BW-NEXT: vinserti128 $1, %xmm8, %ymm0, %ymm8
8548 ; AVX512BW-NEXT: vpshufb %ymm14, %ymm7, %ymm8 {%k5}
8549 ; AVX512BW-NEXT: vinserti64x4 $1, %ymm8, %zmm0, %zmm7
8550 ; AVX512BW-NEXT: vmovdqu8 %zmm7, %zmm13 {%k3}
8551 ; AVX512BW-NEXT: vpbroadcastq {{.*#+}} ymm7 = [4,10,0,6,12,2,8,14,4,10,0,6,12,2,8,14,4,10,0,6,12,2,8,14,4,10,0,6,12,2,8,14]
8552 ; AVX512BW-NEXT: vpshufb %ymm7, %ymm5, %ymm8
8553 ; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm14 = [128,128,0,6,12,128,128,128,4,10,u,u,u,u,u,u]
8554 ; AVX512BW-NEXT: vmovdqu16 %ymm10, %ymm3 {%k1}
8555 ; AVX512BW-NEXT: vextracti128 $1, %ymm3, %xmm15
8556 ; AVX512BW-NEXT: vpshufb %xmm14, %xmm15, %xmm10
8557 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} xmm16 = [4,10,128,128,128,2,8,14,128,128,u,u,u,u,u,u]
8558 ; AVX512BW-NEXT: vpshufb %xmm16, %xmm3, %xmm17
8559 ; AVX512BW-NEXT: vporq %xmm10, %xmm17, %xmm10
8560 ; AVX512BW-NEXT: vpblendw {{.*#+}} xmm10 = xmm10[0,1,2,3,4],xmm8[5,6,7]
8561 ; AVX512BW-NEXT: vpblendd {{.*#+}} ymm10 = ymm10[0,1,2,3],ymm8[4,5,6,7]
8562 ; AVX512BW-NEXT: vmovdqu16 %ymm23, %ymm0 {%k1}
8563 ; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm8
8564 ; AVX512BW-NEXT: vpshufb %xmm14, %xmm8, %xmm14
8565 ; AVX512BW-NEXT: vpshufb %xmm16, %xmm0, %xmm16
8566 ; AVX512BW-NEXT: vporq %xmm14, %xmm16, %xmm14
8567 ; AVX512BW-NEXT: vmovdqu16 %ymm26, %ymm1 {%k2}
8568 ; AVX512BW-NEXT: vextracti32x4 $1, %ymm1, %xmm16
8569 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} xmm17 = [u,u,u,u,u,128,128,128,4,10,128,128,128,2,8,14]
8570 ; AVX512BW-NEXT: vpshufb %xmm17, %xmm16, %xmm18
8571 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} xmm19 = [u,u,u,u,u,2,8,14,128,128,0,6,12,128,128,128]
8572 ; AVX512BW-NEXT: vpshufb %xmm19, %xmm1, %xmm20
8573 ; AVX512BW-NEXT: vporq %xmm18, %xmm20, %xmm18
8574 ; AVX512BW-NEXT: vinserti32x4 $1, %xmm18, %ymm0, %ymm18
8575 ; AVX512BW-NEXT: vinserti32x4 $2, %xmm14, %zmm18, %zmm14
8576 ; AVX512BW-NEXT: movabsq $4398044413952, %rdi # imm = 0x3FFFFE00000
8577 ; AVX512BW-NEXT: kmovq %rdi, %k1
8578 ; AVX512BW-NEXT: vmovdqu8 %zmm14, %zmm10 {%k1}
8579 ; AVX512BW-NEXT: vpshufb %ymm7, %ymm4, %ymm7
8580 ; AVX512BW-NEXT: vmovdqu16 %ymm11, %ymm6 {%k2}
8581 ; AVX512BW-NEXT: vextracti128 $1, %ymm6, %xmm11
8582 ; AVX512BW-NEXT: vpshufb %xmm17, %xmm11, %xmm14
8583 ; AVX512BW-NEXT: vpshufb %xmm19, %xmm6, %xmm17
8584 ; AVX512BW-NEXT: vporq %xmm14, %xmm17, %xmm14
8585 ; AVX512BW-NEXT: vinserti128 $1, %xmm14, %ymm0, %ymm14
8586 ; AVX512BW-NEXT: movl $-2097152, %edi # imm = 0xFFE00000
8587 ; AVX512BW-NEXT: kmovd %edi, %k2
8588 ; AVX512BW-NEXT: vmovdqu8 %ymm14, %ymm7 {%k2}
8589 ; AVX512BW-NEXT: vinserti64x4 $1, %ymm7, %zmm0, %zmm7
8590 ; AVX512BW-NEXT: vmovdqu16 %zmm7, %zmm10 {%k2}
8591 ; AVX512BW-NEXT: vpbroadcastq {{.*#+}} ymm7 = [5,11,1,7,13,3,9,15,5,11,1,7,13,3,9,15,5,11,1,7,13,3,9,15,5,11,1,7,13,3,9,15]
8592 ; AVX512BW-NEXT: vpshufb %ymm7, %ymm5, %ymm5
8593 ; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm14 = [128,128,1,7,13,128,128,128,5,11,u,u,u,u,u,u]
8594 ; AVX512BW-NEXT: vpshufb %xmm14, %xmm15, %xmm15
8595 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} xmm17 = [5,11,128,128,128,3,9,15,128,128,u,u,u,u,u,u]
8596 ; AVX512BW-NEXT: vpshufb %xmm17, %xmm3, %xmm3
8597 ; AVX512BW-NEXT: vpor %xmm3, %xmm15, %xmm3
8598 ; AVX512BW-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3,4],xmm5[5,6,7]
8599 ; AVX512BW-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm5[4,5,6,7]
8600 ; AVX512BW-NEXT: vpshufb %xmm14, %xmm8, %xmm5
8601 ; AVX512BW-NEXT: vpshufb %xmm17, %xmm0, %xmm0
8602 ; AVX512BW-NEXT: vpor %xmm5, %xmm0, %xmm0
8603 ; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm5 = [u,u,u,u,u,128,128,128,5,11,128,128,128,3,9,15]
8604 ; AVX512BW-NEXT: vpshufb %xmm5, %xmm16, %xmm8
8605 ; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm14 = [u,u,u,u,u,3,9,15,128,128,1,7,13,128,128,128]
8606 ; AVX512BW-NEXT: vpshufb %xmm14, %xmm1, %xmm1
8607 ; AVX512BW-NEXT: vpor %xmm1, %xmm8, %xmm1
8608 ; AVX512BW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
8609 ; AVX512BW-NEXT: vinserti32x4 $2, %xmm0, %zmm1, %zmm0
8610 ; AVX512BW-NEXT: vmovdqu8 %zmm0, %zmm3 {%k1}
8611 ; AVX512BW-NEXT: vpshufb %ymm7, %ymm4, %ymm0
8612 ; AVX512BW-NEXT: vpshufb %xmm5, %xmm11, %xmm1
8613 ; AVX512BW-NEXT: vpshufb %xmm14, %xmm6, %xmm4
8614 ; AVX512BW-NEXT: vpor %xmm1, %xmm4, %xmm1
8615 ; AVX512BW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
8616 ; AVX512BW-NEXT: vmovdqu8 %ymm1, %ymm0 {%k2}
8617 ; AVX512BW-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
8618 ; AVX512BW-NEXT: vmovdqu16 %zmm0, %zmm3 {%k2}
8619 ; AVX512BW-NEXT: vmovdqa64 %zmm2, (%rsi)
8620 ; AVX512BW-NEXT: vmovdqa64 %zmm9, (%rdx)
8621 ; AVX512BW-NEXT: vmovdqa64 %zmm12, (%rcx)
8622 ; AVX512BW-NEXT: vmovdqa64 %zmm13, (%r8)
8623 ; AVX512BW-NEXT: vmovdqa64 %zmm10, (%r9)
8624 ; AVX512BW-NEXT: vmovdqa64 %zmm3, (%rax)
8625 ; AVX512BW-NEXT: vzeroupper
8626 ; AVX512BW-NEXT: retq
8628 ; AVX512BW-FCP-LABEL: load_i8_stride6_vf64:
8629 ; AVX512BW-FCP: # %bb.0:
8630 ; AVX512BW-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
8631 ; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} xmm2 = [0,6,12,128,128,128,4,10,128,128,128,u,u,u,u,u]
8632 ; AVX512BW-FCP-NEXT: vmovdqa 224(%rdi), %ymm0
8633 ; AVX512BW-FCP-NEXT: vmovdqa64 192(%rdi), %ymm23
8634 ; AVX512BW-FCP-NEXT: movw $18724, %r10w # imm = 0x4924
8635 ; AVX512BW-FCP-NEXT: kmovd %r10d, %k1
8636 ; AVX512BW-FCP-NEXT: vpblendmw %ymm0, %ymm23, %ymm9 {%k1}
8637 ; AVX512BW-FCP-NEXT: vpshufb %xmm2, %xmm9, %xmm1
8638 ; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} xmm4 = [128,128,128,2,8,14,128,128,0,6,12,u,u,u,u,u]
8639 ; AVX512BW-FCP-NEXT: vextracti128 $1, %ymm9, %xmm12
8640 ; AVX512BW-FCP-NEXT: vpshufb %xmm4, %xmm12, %xmm3
8641 ; AVX512BW-FCP-NEXT: vpor %xmm1, %xmm3, %xmm5
8642 ; AVX512BW-FCP-NEXT: vmovdqa (%rdi), %ymm10
8643 ; AVX512BW-FCP-NEXT: vmovdqa 32(%rdi), %ymm3
8644 ; AVX512BW-FCP-NEXT: vmovdqa 64(%rdi), %ymm6
8645 ; AVX512BW-FCP-NEXT: vmovdqa64 128(%rdi), %ymm26
8646 ; AVX512BW-FCP-NEXT: vmovdqa 160(%rdi), %ymm1
8647 ; AVX512BW-FCP-NEXT: vpblendmw %ymm26, %ymm1, %ymm15 {%k1}
8648 ; AVX512BW-FCP-NEXT: vextracti32x4 $1, %ymm15, %xmm16
8649 ; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} xmm17 = [u,u,u,u,u,u,128,128,0,6,12,128,128,128,4,10]
8650 ; AVX512BW-FCP-NEXT: vpshufb %xmm17, %xmm16, %xmm11
8651 ; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} xmm18 = [u,u,u,u,u,u,4,10,128,128,128,2,8,14,128,128]
8652 ; AVX512BW-FCP-NEXT: vpshufb %xmm18, %xmm15, %xmm13
8653 ; AVX512BW-FCP-NEXT: vpor %xmm11, %xmm13, %xmm11
8654 ; AVX512BW-FCP-NEXT: vinserti128 $1, %xmm11, %ymm0, %ymm11
8655 ; AVX512BW-FCP-NEXT: vinserti32x4 $2, %xmm5, %zmm11, %zmm11
8656 ; AVX512BW-FCP-NEXT: vperm2i128 {{.*#+}} ymm5 = ymm6[2,3],mem[2,3]
8657 ; AVX512BW-FCP-NEXT: vinserti128 $1, 96(%rdi), %ymm6, %ymm13
8658 ; AVX512BW-FCP-NEXT: movw $-28124, %r10w # imm = 0x9224
8659 ; AVX512BW-FCP-NEXT: kmovd %r10d, %k4
8660 ; AVX512BW-FCP-NEXT: vpblendmw %ymm5, %ymm13, %ymm19 {%k4}
8661 ; AVX512BW-FCP-NEXT: vpblendmw %ymm3, %ymm10, %ymm20 {%k1}
8662 ; AVX512BW-FCP-NEXT: vpshufb %xmm2, %xmm20, %xmm2
8663 ; AVX512BW-FCP-NEXT: vextracti32x4 $1, %ymm20, %xmm21
8664 ; AVX512BW-FCP-NEXT: vpshufb %xmm4, %xmm21, %xmm4
8665 ; AVX512BW-FCP-NEXT: vpor %xmm2, %xmm4, %xmm2
8666 ; AVX512BW-FCP-NEXT: vpbroadcastq {{.*#+}} ymm6 = [0,6,12,2,8,14,4,10,0,6,12,2,8,14,4,10,0,6,12,2,8,14,4,10,0,6,12,2,8,14,4,10]
8667 ; AVX512BW-FCP-NEXT: movl $4192256, %r10d # imm = 0x3FF800
8668 ; AVX512BW-FCP-NEXT: kmovd %r10d, %k2
8669 ; AVX512BW-FCP-NEXT: vpshufb %ymm6, %ymm19, %ymm2 {%k2}
8670 ; AVX512BW-FCP-NEXT: vmovdqu16 %zmm11, %zmm2 {%k2}
8671 ; AVX512BW-FCP-NEXT: vmovdqa 256(%rdi), %ymm11
8672 ; AVX512BW-FCP-NEXT: vperm2i128 {{.*#+}} ymm4 = ymm11[2,3],mem[2,3]
8673 ; AVX512BW-FCP-NEXT: vinserti128 $1, 288(%rdi), %ymm11, %ymm14
8674 ; AVX512BW-FCP-NEXT: vpblendmw %ymm4, %ymm14, %ymm22 {%k4}
8675 ; AVX512BW-FCP-NEXT: vpshufb %ymm6, %ymm22, %ymm7
8676 ; AVX512BW-FCP-NEXT: vmovdqa 320(%rdi), %ymm11
8677 ; AVX512BW-FCP-NEXT: vmovdqa 352(%rdi), %ymm6
8678 ; AVX512BW-FCP-NEXT: vpblendmw %ymm11, %ymm6, %ymm24 {%k1}
8679 ; AVX512BW-FCP-NEXT: vextracti32x4 $1, %ymm24, %xmm25
8680 ; AVX512BW-FCP-NEXT: vpshufb %xmm17, %xmm25, %xmm17
8681 ; AVX512BW-FCP-NEXT: vpshufb %xmm18, %xmm24, %xmm18
8682 ; AVX512BW-FCP-NEXT: vporq %xmm17, %xmm18, %xmm17
8683 ; AVX512BW-FCP-NEXT: vinserti32x4 $1, %xmm17, %ymm0, %ymm8
8684 ; AVX512BW-FCP-NEXT: vpblendw {{.*#+}} ymm8 = ymm7[0,1,2],ymm8[3,4,5,6,7],ymm7[8,9,10],ymm8[11,12,13,14,15]
8685 ; AVX512BW-FCP-NEXT: vpblendd {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm8[4,5,6,7]
8686 ; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm7, %zmm0, %zmm7
8687 ; AVX512BW-FCP-NEXT: movabsq $-8796093022208, %rdi # imm = 0xFFFFF80000000000
8688 ; AVX512BW-FCP-NEXT: kmovq %rdi, %k3
8689 ; AVX512BW-FCP-NEXT: vmovdqu8 %zmm7, %zmm2 {%k3}
8690 ; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} xmm7 = [1,7,13,128,128,128,5,11,128,128,128,u,u,u,u,u]
8691 ; AVX512BW-FCP-NEXT: vpshufb %xmm7, %xmm9, %xmm8
8692 ; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} xmm9 = [128,128,128,3,9,15,128,128,1,7,13,u,u,u,u,u]
8693 ; AVX512BW-FCP-NEXT: vpshufb %xmm9, %xmm12, %xmm12
8694 ; AVX512BW-FCP-NEXT: vpor %xmm8, %xmm12, %xmm8
8695 ; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} xmm12 = [u,u,u,u,u,u,128,128,1,7,13,128,128,128,5,11]
8696 ; AVX512BW-FCP-NEXT: vpshufb %xmm12, %xmm16, %xmm16
8697 ; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} xmm17 = [u,u,u,u,u,u,5,11,128,128,128,3,9,15,128,128]
8698 ; AVX512BW-FCP-NEXT: vpshufb %xmm17, %xmm15, %xmm15
8699 ; AVX512BW-FCP-NEXT: vporq %xmm16, %xmm15, %xmm15
8700 ; AVX512BW-FCP-NEXT: vinserti128 $1, %xmm15, %ymm0, %ymm15
8701 ; AVX512BW-FCP-NEXT: vinserti32x4 $2, %xmm8, %zmm15, %zmm8
8702 ; AVX512BW-FCP-NEXT: vpshufb %xmm7, %xmm20, %xmm7
8703 ; AVX512BW-FCP-NEXT: vpshufb %xmm9, %xmm21, %xmm9
8704 ; AVX512BW-FCP-NEXT: vpor %xmm7, %xmm9, %xmm9
8705 ; AVX512BW-FCP-NEXT: vpbroadcastq {{.*#+}} ymm7 = [1,7,13,3,9,15,5,11,1,7,13,3,9,15,5,11,1,7,13,3,9,15,5,11,1,7,13,3,9,15,5,11]
8706 ; AVX512BW-FCP-NEXT: vpshufb %ymm7, %ymm19, %ymm9 {%k2}
8707 ; AVX512BW-FCP-NEXT: vmovdqu16 %zmm8, %zmm9 {%k2}
8708 ; AVX512BW-FCP-NEXT: vpshufb %ymm7, %ymm22, %ymm7
8709 ; AVX512BW-FCP-NEXT: vpshufb %xmm12, %xmm25, %xmm8
8710 ; AVX512BW-FCP-NEXT: vpshufb %xmm17, %xmm24, %xmm12
8711 ; AVX512BW-FCP-NEXT: vpor %xmm8, %xmm12, %xmm8
8712 ; AVX512BW-FCP-NEXT: vinserti128 $1, %xmm8, %ymm0, %ymm8
8713 ; AVX512BW-FCP-NEXT: vpblendw {{.*#+}} ymm8 = ymm7[0,1,2],ymm8[3,4,5,6,7],ymm7[8,9,10],ymm8[11,12,13,14,15]
8714 ; AVX512BW-FCP-NEXT: vpblendd {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm8[4,5,6,7]
8715 ; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm7, %zmm0, %zmm7
8716 ; AVX512BW-FCP-NEXT: vmovdqu8 %zmm7, %zmm9 {%k3}
8717 ; AVX512BW-FCP-NEXT: vpblendmw %ymm13, %ymm5, %ymm15 {%k4}
8718 ; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} xmm7 = [128,128,128,4,10,128,128,128,2,8,14,u,u,u,u,u]
8719 ; AVX512BW-FCP-NEXT: movw $9362, %di # imm = 0x2492
8720 ; AVX512BW-FCP-NEXT: kmovd %edi, %k2
8721 ; AVX512BW-FCP-NEXT: vpblendmw %ymm10, %ymm3, %ymm8 {%k2}
8722 ; AVX512BW-FCP-NEXT: vextracti32x4 $1, %ymm8, %xmm16
8723 ; AVX512BW-FCP-NEXT: vpshufb %xmm7, %xmm16, %xmm12
8724 ; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} xmm17 = [2,8,14,128,128,0,6,12,128,128,128,u,u,u,u,u]
8725 ; AVX512BW-FCP-NEXT: vpshufb %xmm17, %xmm8, %xmm18
8726 ; AVX512BW-FCP-NEXT: vporq %xmm12, %xmm18, %xmm18
8727 ; AVX512BW-FCP-NEXT: vpbroadcastq {{.*#+}} ymm19 = [2,8,14,4,10,0,6,12,2,8,14,4,10,0,6,12,2,8,14,4,10,0,6,12,2,8,14,4,10,0,6,12]
8728 ; AVX512BW-FCP-NEXT: movl $2095104, %edi # imm = 0x1FF800
8729 ; AVX512BW-FCP-NEXT: kmovd %edi, %k5
8730 ; AVX512BW-FCP-NEXT: vpshufb %ymm19, %ymm15, %ymm18 {%k5}
8731 ; AVX512BW-FCP-NEXT: vpblendmw %ymm23, %ymm0, %ymm20 {%k2}
8732 ; AVX512BW-FCP-NEXT: vextracti32x4 $1, %ymm20, %xmm21
8733 ; AVX512BW-FCP-NEXT: vpshufb %xmm7, %xmm21, %xmm7
8734 ; AVX512BW-FCP-NEXT: vpshufb %xmm17, %xmm20, %xmm12
8735 ; AVX512BW-FCP-NEXT: vpor %xmm7, %xmm12, %xmm7
8736 ; AVX512BW-FCP-NEXT: vpblendmw %ymm1, %ymm26, %ymm17 {%k1}
8737 ; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} xmm22 = [u,u,u,u,u,0,6,12,128,128,128,4,10,128,128,128]
8738 ; AVX512BW-FCP-NEXT: vpshufb %xmm22, %xmm17, %xmm12
8739 ; AVX512BW-FCP-NEXT: vextracti32x4 $1, %ymm17, %xmm24
8740 ; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} xmm25 = [u,u,u,u,u,128,128,128,2,8,14,128,128,0,6,12]
8741 ; AVX512BW-FCP-NEXT: vpshufb %xmm25, %xmm24, %xmm27
8742 ; AVX512BW-FCP-NEXT: vporq %xmm12, %xmm27, %xmm12
8743 ; AVX512BW-FCP-NEXT: vinserti128 $1, %xmm12, %ymm0, %ymm12
8744 ; AVX512BW-FCP-NEXT: vinserti32x4 $2, %xmm7, %zmm12, %zmm12
8745 ; AVX512BW-FCP-NEXT: movl $2097151, %edi # imm = 0x1FFFFF
8746 ; AVX512BW-FCP-NEXT: kmovq %rdi, %k6
8747 ; AVX512BW-FCP-NEXT: vmovdqu8 %zmm18, %zmm12 {%k6}
8748 ; AVX512BW-FCP-NEXT: vpblendmw %ymm14, %ymm4, %ymm7 {%k4}
8749 ; AVX512BW-FCP-NEXT: vpblendmw %ymm6, %ymm11, %ymm18 {%k1}
8750 ; AVX512BW-FCP-NEXT: vpshufb %xmm22, %xmm18, %xmm22
8751 ; AVX512BW-FCP-NEXT: vextracti32x4 $1, %ymm18, %xmm27
8752 ; AVX512BW-FCP-NEXT: vpshufb %xmm25, %xmm27, %xmm25
8753 ; AVX512BW-FCP-NEXT: vporq %xmm22, %xmm25, %xmm22
8754 ; AVX512BW-FCP-NEXT: vinserti32x4 $1, %xmm22, %ymm0, %ymm22
8755 ; AVX512BW-FCP-NEXT: vpshufb %ymm19, %ymm7, %ymm22 {%k5}
8756 ; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm22, %zmm0, %zmm19
8757 ; AVX512BW-FCP-NEXT: vmovdqu8 %zmm19, %zmm12 {%k3}
8758 ; AVX512BW-FCP-NEXT: movw $9289, %di # imm = 0x2449
8759 ; AVX512BW-FCP-NEXT: kmovd %edi, %k4
8760 ; AVX512BW-FCP-NEXT: vmovdqu16 %ymm14, %ymm4 {%k4}
8761 ; AVX512BW-FCP-NEXT: vmovdqu16 %ymm13, %ymm5 {%k4}
8762 ; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} xmm13 = [128,128,128,5,11,128,128,128,3,9,15,u,u,u,u,u]
8763 ; AVX512BW-FCP-NEXT: vpshufb %xmm13, %xmm16, %xmm14
8764 ; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} xmm16 = [3,9,15,128,128,1,7,13,128,128,128,u,u,u,u,u]
8765 ; AVX512BW-FCP-NEXT: vpshufb %xmm16, %xmm8, %xmm8
8766 ; AVX512BW-FCP-NEXT: vpor %xmm14, %xmm8, %xmm8
8767 ; AVX512BW-FCP-NEXT: vpbroadcastq {{.*#+}} ymm14 = [3,9,15,5,11,1,7,13,3,9,15,5,11,1,7,13,3,9,15,5,11,1,7,13,3,9,15,5,11,1,7,13]
8768 ; AVX512BW-FCP-NEXT: vpshufb %ymm14, %ymm15, %ymm8 {%k5}
8769 ; AVX512BW-FCP-NEXT: vpshufb %xmm13, %xmm21, %xmm13
8770 ; AVX512BW-FCP-NEXT: vpshufb %xmm16, %xmm20, %xmm15
8771 ; AVX512BW-FCP-NEXT: vpor %xmm13, %xmm15, %xmm13
8772 ; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} xmm15 = [u,u,u,u,u,1,7,13,128,128,128,5,11,128,128,128]
8773 ; AVX512BW-FCP-NEXT: vpshufb %xmm15, %xmm17, %xmm16
8774 ; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} xmm17 = [u,u,u,u,u,128,128,128,3,9,15,128,128,1,7,13]
8775 ; AVX512BW-FCP-NEXT: vpshufb %xmm17, %xmm24, %xmm19
8776 ; AVX512BW-FCP-NEXT: vporq %xmm16, %xmm19, %xmm16
8777 ; AVX512BW-FCP-NEXT: vinserti32x4 $1, %xmm16, %ymm0, %ymm16
8778 ; AVX512BW-FCP-NEXT: vinserti32x4 $2, %xmm13, %zmm16, %zmm13
8779 ; AVX512BW-FCP-NEXT: vmovdqu8 %zmm8, %zmm13 {%k6}
8780 ; AVX512BW-FCP-NEXT: vpshufb %xmm15, %xmm18, %xmm8
8781 ; AVX512BW-FCP-NEXT: vpshufb %xmm17, %xmm27, %xmm15
8782 ; AVX512BW-FCP-NEXT: vpor %xmm8, %xmm15, %xmm8
8783 ; AVX512BW-FCP-NEXT: vinserti128 $1, %xmm8, %ymm0, %ymm8
8784 ; AVX512BW-FCP-NEXT: vpshufb %ymm14, %ymm7, %ymm8 {%k5}
8785 ; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm8, %zmm0, %zmm7
8786 ; AVX512BW-FCP-NEXT: vmovdqu8 %zmm7, %zmm13 {%k3}
8787 ; AVX512BW-FCP-NEXT: vpbroadcastq {{.*#+}} ymm7 = [4,10,0,6,12,2,8,14,4,10,0,6,12,2,8,14,4,10,0,6,12,2,8,14,4,10,0,6,12,2,8,14]
8788 ; AVX512BW-FCP-NEXT: vpshufb %ymm7, %ymm5, %ymm8
8789 ; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} xmm14 = [128,128,0,6,12,128,128,128,4,10,u,u,u,u,u,u]
8790 ; AVX512BW-FCP-NEXT: vmovdqu16 %ymm10, %ymm3 {%k1}
8791 ; AVX512BW-FCP-NEXT: vextracti128 $1, %ymm3, %xmm15
8792 ; AVX512BW-FCP-NEXT: vpshufb %xmm14, %xmm15, %xmm10
8793 ; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} xmm16 = [4,10,128,128,128,2,8,14,128,128,u,u,u,u,u,u]
8794 ; AVX512BW-FCP-NEXT: vpshufb %xmm16, %xmm3, %xmm17
8795 ; AVX512BW-FCP-NEXT: vporq %xmm10, %xmm17, %xmm10
8796 ; AVX512BW-FCP-NEXT: vpblendw {{.*#+}} xmm10 = xmm10[0,1,2,3,4],xmm8[5,6,7]
8797 ; AVX512BW-FCP-NEXT: vpblendd {{.*#+}} ymm10 = ymm10[0,1,2,3],ymm8[4,5,6,7]
8798 ; AVX512BW-FCP-NEXT: vmovdqu16 %ymm23, %ymm0 {%k1}
8799 ; AVX512BW-FCP-NEXT: vextracti128 $1, %ymm0, %xmm8
8800 ; AVX512BW-FCP-NEXT: vpshufb %xmm14, %xmm8, %xmm14
8801 ; AVX512BW-FCP-NEXT: vpshufb %xmm16, %xmm0, %xmm16
8802 ; AVX512BW-FCP-NEXT: vporq %xmm14, %xmm16, %xmm14
8803 ; AVX512BW-FCP-NEXT: vmovdqu16 %ymm26, %ymm1 {%k2}
8804 ; AVX512BW-FCP-NEXT: vextracti32x4 $1, %ymm1, %xmm16
8805 ; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} xmm17 = [u,u,u,u,u,128,128,128,4,10,128,128,128,2,8,14]
8806 ; AVX512BW-FCP-NEXT: vpshufb %xmm17, %xmm16, %xmm18
8807 ; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} xmm19 = [u,u,u,u,u,2,8,14,128,128,0,6,12,128,128,128]
8808 ; AVX512BW-FCP-NEXT: vpshufb %xmm19, %xmm1, %xmm20
8809 ; AVX512BW-FCP-NEXT: vporq %xmm18, %xmm20, %xmm18
8810 ; AVX512BW-FCP-NEXT: vinserti32x4 $1, %xmm18, %ymm0, %ymm18
8811 ; AVX512BW-FCP-NEXT: vinserti32x4 $2, %xmm14, %zmm18, %zmm14
8812 ; AVX512BW-FCP-NEXT: movabsq $4398044413952, %rdi # imm = 0x3FFFFE00000
8813 ; AVX512BW-FCP-NEXT: kmovq %rdi, %k1
8814 ; AVX512BW-FCP-NEXT: vmovdqu8 %zmm14, %zmm10 {%k1}
8815 ; AVX512BW-FCP-NEXT: vpshufb %ymm7, %ymm4, %ymm7
8816 ; AVX512BW-FCP-NEXT: vmovdqu16 %ymm11, %ymm6 {%k2}
8817 ; AVX512BW-FCP-NEXT: vextracti128 $1, %ymm6, %xmm11
8818 ; AVX512BW-FCP-NEXT: vpshufb %xmm17, %xmm11, %xmm14
8819 ; AVX512BW-FCP-NEXT: vpshufb %xmm19, %xmm6, %xmm17
8820 ; AVX512BW-FCP-NEXT: vporq %xmm14, %xmm17, %xmm14
8821 ; AVX512BW-FCP-NEXT: vinserti128 $1, %xmm14, %ymm0, %ymm14
8822 ; AVX512BW-FCP-NEXT: movl $-2097152, %edi # imm = 0xFFE00000
8823 ; AVX512BW-FCP-NEXT: kmovd %edi, %k2
8824 ; AVX512BW-FCP-NEXT: vmovdqu8 %ymm14, %ymm7 {%k2}
8825 ; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm7, %zmm0, %zmm7
8826 ; AVX512BW-FCP-NEXT: vmovdqu16 %zmm7, %zmm10 {%k2}
8827 ; AVX512BW-FCP-NEXT: vpbroadcastq {{.*#+}} ymm7 = [5,11,1,7,13,3,9,15,5,11,1,7,13,3,9,15,5,11,1,7,13,3,9,15,5,11,1,7,13,3,9,15]
8828 ; AVX512BW-FCP-NEXT: vpshufb %ymm7, %ymm5, %ymm5
8829 ; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} xmm14 = [128,128,1,7,13,128,128,128,5,11,u,u,u,u,u,u]
8830 ; AVX512BW-FCP-NEXT: vpshufb %xmm14, %xmm15, %xmm15
8831 ; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} xmm17 = [5,11,128,128,128,3,9,15,128,128,u,u,u,u,u,u]
8832 ; AVX512BW-FCP-NEXT: vpshufb %xmm17, %xmm3, %xmm3
8833 ; AVX512BW-FCP-NEXT: vpor %xmm3, %xmm15, %xmm3
8834 ; AVX512BW-FCP-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3,4],xmm5[5,6,7]
8835 ; AVX512BW-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm5[4,5,6,7]
8836 ; AVX512BW-FCP-NEXT: vpshufb %xmm14, %xmm8, %xmm5
8837 ; AVX512BW-FCP-NEXT: vpshufb %xmm17, %xmm0, %xmm0
8838 ; AVX512BW-FCP-NEXT: vpor %xmm5, %xmm0, %xmm0
8839 ; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} xmm5 = [u,u,u,u,u,128,128,128,5,11,128,128,128,3,9,15]
8840 ; AVX512BW-FCP-NEXT: vpshufb %xmm5, %xmm16, %xmm8
8841 ; AVX512BW-FCP-NEXT: vmovdqa {{.*#+}} xmm14 = [u,u,u,u,u,3,9,15,128,128,1,7,13,128,128,128]
8842 ; AVX512BW-FCP-NEXT: vpshufb %xmm14, %xmm1, %xmm1
8843 ; AVX512BW-FCP-NEXT: vpor %xmm1, %xmm8, %xmm1
8844 ; AVX512BW-FCP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
8845 ; AVX512BW-FCP-NEXT: vinserti32x4 $2, %xmm0, %zmm1, %zmm0
8846 ; AVX512BW-FCP-NEXT: vmovdqu8 %zmm0, %zmm3 {%k1}
8847 ; AVX512BW-FCP-NEXT: vpshufb %ymm7, %ymm4, %ymm0
8848 ; AVX512BW-FCP-NEXT: vpshufb %xmm5, %xmm11, %xmm1
8849 ; AVX512BW-FCP-NEXT: vpshufb %xmm14, %xmm6, %xmm4
8850 ; AVX512BW-FCP-NEXT: vpor %xmm1, %xmm4, %xmm1
8851 ; AVX512BW-FCP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
8852 ; AVX512BW-FCP-NEXT: vmovdqu8 %ymm1, %ymm0 {%k2}
8853 ; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
8854 ; AVX512BW-FCP-NEXT: vmovdqu16 %zmm0, %zmm3 {%k2}
8855 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm2, (%rsi)
8856 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm9, (%rdx)
8857 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm12, (%rcx)
8858 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm13, (%r8)
8859 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm10, (%r9)
8860 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm3, (%rax)
8861 ; AVX512BW-FCP-NEXT: vzeroupper
8862 ; AVX512BW-FCP-NEXT: retq
8864 ; AVX512DQ-BW-LABEL: load_i8_stride6_vf64:
8865 ; AVX512DQ-BW: # %bb.0:
8866 ; AVX512DQ-BW-NEXT: movq {{[0-9]+}}(%rsp), %rax
8867 ; AVX512DQ-BW-NEXT: vmovdqa {{.*#+}} xmm2 = [0,6,12,128,128,128,4,10,128,128,128,u,u,u,u,u]
8868 ; AVX512DQ-BW-NEXT: vmovdqa 224(%rdi), %ymm0
8869 ; AVX512DQ-BW-NEXT: vmovdqa64 192(%rdi), %ymm23
8870 ; AVX512DQ-BW-NEXT: movw $18724, %r10w # imm = 0x4924
8871 ; AVX512DQ-BW-NEXT: kmovd %r10d, %k1
8872 ; AVX512DQ-BW-NEXT: vpblendmw %ymm0, %ymm23, %ymm9 {%k1}
8873 ; AVX512DQ-BW-NEXT: vpshufb %xmm2, %xmm9, %xmm1
8874 ; AVX512DQ-BW-NEXT: vmovdqa {{.*#+}} xmm4 = [128,128,128,2,8,14,128,128,0,6,12,u,u,u,u,u]
8875 ; AVX512DQ-BW-NEXT: vextracti128 $1, %ymm9, %xmm12
8876 ; AVX512DQ-BW-NEXT: vpshufb %xmm4, %xmm12, %xmm3
8877 ; AVX512DQ-BW-NEXT: vpor %xmm1, %xmm3, %xmm5
8878 ; AVX512DQ-BW-NEXT: vmovdqa (%rdi), %ymm10
8879 ; AVX512DQ-BW-NEXT: vmovdqa 32(%rdi), %ymm3
8880 ; AVX512DQ-BW-NEXT: vmovdqa 64(%rdi), %ymm6
8881 ; AVX512DQ-BW-NEXT: vmovdqa64 128(%rdi), %ymm26
8882 ; AVX512DQ-BW-NEXT: vmovdqa 160(%rdi), %ymm1
8883 ; AVX512DQ-BW-NEXT: vpblendmw %ymm26, %ymm1, %ymm15 {%k1}
8884 ; AVX512DQ-BW-NEXT: vextracti32x4 $1, %ymm15, %xmm16
8885 ; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} xmm17 = [u,u,u,u,u,u,128,128,0,6,12,128,128,128,4,10]
8886 ; AVX512DQ-BW-NEXT: vpshufb %xmm17, %xmm16, %xmm11
8887 ; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} xmm18 = [u,u,u,u,u,u,4,10,128,128,128,2,8,14,128,128]
8888 ; AVX512DQ-BW-NEXT: vpshufb %xmm18, %xmm15, %xmm13
8889 ; AVX512DQ-BW-NEXT: vpor %xmm11, %xmm13, %xmm11
8890 ; AVX512DQ-BW-NEXT: vinserti128 $1, %xmm11, %ymm0, %ymm11
8891 ; AVX512DQ-BW-NEXT: vinserti32x4 $2, %xmm5, %zmm11, %zmm11
8892 ; AVX512DQ-BW-NEXT: vperm2i128 {{.*#+}} ymm5 = ymm6[2,3],mem[2,3]
8893 ; AVX512DQ-BW-NEXT: vinserti128 $1, 96(%rdi), %ymm6, %ymm13
8894 ; AVX512DQ-BW-NEXT: movw $-28124, %r10w # imm = 0x9224
8895 ; AVX512DQ-BW-NEXT: kmovd %r10d, %k4
8896 ; AVX512DQ-BW-NEXT: vpblendmw %ymm5, %ymm13, %ymm19 {%k4}
8897 ; AVX512DQ-BW-NEXT: vpblendmw %ymm3, %ymm10, %ymm20 {%k1}
8898 ; AVX512DQ-BW-NEXT: vpshufb %xmm2, %xmm20, %xmm2
8899 ; AVX512DQ-BW-NEXT: vextracti32x4 $1, %ymm20, %xmm21
8900 ; AVX512DQ-BW-NEXT: vpshufb %xmm4, %xmm21, %xmm4
8901 ; AVX512DQ-BW-NEXT: vpor %xmm2, %xmm4, %xmm2
8902 ; AVX512DQ-BW-NEXT: vpbroadcastq {{.*#+}} ymm6 = [0,6,12,2,8,14,4,10,0,6,12,2,8,14,4,10,0,6,12,2,8,14,4,10,0,6,12,2,8,14,4,10]
8903 ; AVX512DQ-BW-NEXT: movl $4192256, %r10d # imm = 0x3FF800
8904 ; AVX512DQ-BW-NEXT: kmovd %r10d, %k2
8905 ; AVX512DQ-BW-NEXT: vpshufb %ymm6, %ymm19, %ymm2 {%k2}
8906 ; AVX512DQ-BW-NEXT: vmovdqu16 %zmm11, %zmm2 {%k2}
8907 ; AVX512DQ-BW-NEXT: vmovdqa 256(%rdi), %ymm11
8908 ; AVX512DQ-BW-NEXT: vperm2i128 {{.*#+}} ymm4 = ymm11[2,3],mem[2,3]
8909 ; AVX512DQ-BW-NEXT: vinserti128 $1, 288(%rdi), %ymm11, %ymm14
8910 ; AVX512DQ-BW-NEXT: vpblendmw %ymm4, %ymm14, %ymm22 {%k4}
8911 ; AVX512DQ-BW-NEXT: vpshufb %ymm6, %ymm22, %ymm7
8912 ; AVX512DQ-BW-NEXT: vmovdqa 320(%rdi), %ymm11
8913 ; AVX512DQ-BW-NEXT: vmovdqa 352(%rdi), %ymm6
8914 ; AVX512DQ-BW-NEXT: vpblendmw %ymm11, %ymm6, %ymm24 {%k1}
8915 ; AVX512DQ-BW-NEXT: vextracti32x4 $1, %ymm24, %xmm25
8916 ; AVX512DQ-BW-NEXT: vpshufb %xmm17, %xmm25, %xmm17
8917 ; AVX512DQ-BW-NEXT: vpshufb %xmm18, %xmm24, %xmm18
8918 ; AVX512DQ-BW-NEXT: vporq %xmm17, %xmm18, %xmm17
8919 ; AVX512DQ-BW-NEXT: vinserti32x4 $1, %xmm17, %ymm0, %ymm8
8920 ; AVX512DQ-BW-NEXT: vpblendw {{.*#+}} ymm8 = ymm7[0,1,2],ymm8[3,4,5,6,7],ymm7[8,9,10],ymm8[11,12,13,14,15]
8921 ; AVX512DQ-BW-NEXT: vpblendd {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm8[4,5,6,7]
8922 ; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm7, %zmm0, %zmm7
8923 ; AVX512DQ-BW-NEXT: movabsq $-8796093022208, %rdi # imm = 0xFFFFF80000000000
8924 ; AVX512DQ-BW-NEXT: kmovq %rdi, %k3
8925 ; AVX512DQ-BW-NEXT: vmovdqu8 %zmm7, %zmm2 {%k3}
8926 ; AVX512DQ-BW-NEXT: vmovdqa {{.*#+}} xmm7 = [1,7,13,128,128,128,5,11,128,128,128,u,u,u,u,u]
8927 ; AVX512DQ-BW-NEXT: vpshufb %xmm7, %xmm9, %xmm8
8928 ; AVX512DQ-BW-NEXT: vmovdqa {{.*#+}} xmm9 = [128,128,128,3,9,15,128,128,1,7,13,u,u,u,u,u]
8929 ; AVX512DQ-BW-NEXT: vpshufb %xmm9, %xmm12, %xmm12
8930 ; AVX512DQ-BW-NEXT: vpor %xmm8, %xmm12, %xmm8
8931 ; AVX512DQ-BW-NEXT: vmovdqa {{.*#+}} xmm12 = [u,u,u,u,u,u,128,128,1,7,13,128,128,128,5,11]
8932 ; AVX512DQ-BW-NEXT: vpshufb %xmm12, %xmm16, %xmm16
8933 ; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} xmm17 = [u,u,u,u,u,u,5,11,128,128,128,3,9,15,128,128]
8934 ; AVX512DQ-BW-NEXT: vpshufb %xmm17, %xmm15, %xmm15
8935 ; AVX512DQ-BW-NEXT: vporq %xmm16, %xmm15, %xmm15
8936 ; AVX512DQ-BW-NEXT: vinserti128 $1, %xmm15, %ymm0, %ymm15
8937 ; AVX512DQ-BW-NEXT: vinserti32x4 $2, %xmm8, %zmm15, %zmm8
8938 ; AVX512DQ-BW-NEXT: vpshufb %xmm7, %xmm20, %xmm7
8939 ; AVX512DQ-BW-NEXT: vpshufb %xmm9, %xmm21, %xmm9
8940 ; AVX512DQ-BW-NEXT: vpor %xmm7, %xmm9, %xmm9
8941 ; AVX512DQ-BW-NEXT: vpbroadcastq {{.*#+}} ymm7 = [1,7,13,3,9,15,5,11,1,7,13,3,9,15,5,11,1,7,13,3,9,15,5,11,1,7,13,3,9,15,5,11]
8942 ; AVX512DQ-BW-NEXT: vpshufb %ymm7, %ymm19, %ymm9 {%k2}
8943 ; AVX512DQ-BW-NEXT: vmovdqu16 %zmm8, %zmm9 {%k2}
8944 ; AVX512DQ-BW-NEXT: vpshufb %ymm7, %ymm22, %ymm7
8945 ; AVX512DQ-BW-NEXT: vpshufb %xmm12, %xmm25, %xmm8
8946 ; AVX512DQ-BW-NEXT: vpshufb %xmm17, %xmm24, %xmm12
8947 ; AVX512DQ-BW-NEXT: vpor %xmm8, %xmm12, %xmm8
8948 ; AVX512DQ-BW-NEXT: vinserti128 $1, %xmm8, %ymm0, %ymm8
8949 ; AVX512DQ-BW-NEXT: vpblendw {{.*#+}} ymm8 = ymm7[0,1,2],ymm8[3,4,5,6,7],ymm7[8,9,10],ymm8[11,12,13,14,15]
8950 ; AVX512DQ-BW-NEXT: vpblendd {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm8[4,5,6,7]
8951 ; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm7, %zmm0, %zmm7
8952 ; AVX512DQ-BW-NEXT: vmovdqu8 %zmm7, %zmm9 {%k3}
8953 ; AVX512DQ-BW-NEXT: vpblendmw %ymm13, %ymm5, %ymm15 {%k4}
8954 ; AVX512DQ-BW-NEXT: vmovdqa {{.*#+}} xmm7 = [128,128,128,4,10,128,128,128,2,8,14,u,u,u,u,u]
8955 ; AVX512DQ-BW-NEXT: movw $9362, %di # imm = 0x2492
8956 ; AVX512DQ-BW-NEXT: kmovd %edi, %k2
8957 ; AVX512DQ-BW-NEXT: vpblendmw %ymm10, %ymm3, %ymm8 {%k2}
8958 ; AVX512DQ-BW-NEXT: vextracti32x4 $1, %ymm8, %xmm16
8959 ; AVX512DQ-BW-NEXT: vpshufb %xmm7, %xmm16, %xmm12
8960 ; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} xmm17 = [2,8,14,128,128,0,6,12,128,128,128,u,u,u,u,u]
8961 ; AVX512DQ-BW-NEXT: vpshufb %xmm17, %xmm8, %xmm18
8962 ; AVX512DQ-BW-NEXT: vporq %xmm12, %xmm18, %xmm18
8963 ; AVX512DQ-BW-NEXT: vpbroadcastq {{.*#+}} ymm19 = [2,8,14,4,10,0,6,12,2,8,14,4,10,0,6,12,2,8,14,4,10,0,6,12,2,8,14,4,10,0,6,12]
8964 ; AVX512DQ-BW-NEXT: movl $2095104, %edi # imm = 0x1FF800
8965 ; AVX512DQ-BW-NEXT: kmovd %edi, %k5
8966 ; AVX512DQ-BW-NEXT: vpshufb %ymm19, %ymm15, %ymm18 {%k5}
8967 ; AVX512DQ-BW-NEXT: vpblendmw %ymm23, %ymm0, %ymm20 {%k2}
8968 ; AVX512DQ-BW-NEXT: vextracti32x4 $1, %ymm20, %xmm21
8969 ; AVX512DQ-BW-NEXT: vpshufb %xmm7, %xmm21, %xmm7
8970 ; AVX512DQ-BW-NEXT: vpshufb %xmm17, %xmm20, %xmm12
8971 ; AVX512DQ-BW-NEXT: vpor %xmm7, %xmm12, %xmm7
8972 ; AVX512DQ-BW-NEXT: vpblendmw %ymm1, %ymm26, %ymm17 {%k1}
8973 ; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} xmm22 = [u,u,u,u,u,0,6,12,128,128,128,4,10,128,128,128]
8974 ; AVX512DQ-BW-NEXT: vpshufb %xmm22, %xmm17, %xmm12
8975 ; AVX512DQ-BW-NEXT: vextracti32x4 $1, %ymm17, %xmm24
8976 ; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} xmm25 = [u,u,u,u,u,128,128,128,2,8,14,128,128,0,6,12]
8977 ; AVX512DQ-BW-NEXT: vpshufb %xmm25, %xmm24, %xmm27
8978 ; AVX512DQ-BW-NEXT: vporq %xmm12, %xmm27, %xmm12
8979 ; AVX512DQ-BW-NEXT: vinserti128 $1, %xmm12, %ymm0, %ymm12
8980 ; AVX512DQ-BW-NEXT: vinserti32x4 $2, %xmm7, %zmm12, %zmm12
8981 ; AVX512DQ-BW-NEXT: movl $2097151, %edi # imm = 0x1FFFFF
8982 ; AVX512DQ-BW-NEXT: kmovq %rdi, %k6
8983 ; AVX512DQ-BW-NEXT: vmovdqu8 %zmm18, %zmm12 {%k6}
8984 ; AVX512DQ-BW-NEXT: vpblendmw %ymm14, %ymm4, %ymm7 {%k4}
8985 ; AVX512DQ-BW-NEXT: vpblendmw %ymm6, %ymm11, %ymm18 {%k1}
8986 ; AVX512DQ-BW-NEXT: vpshufb %xmm22, %xmm18, %xmm22
8987 ; AVX512DQ-BW-NEXT: vextracti32x4 $1, %ymm18, %xmm27
8988 ; AVX512DQ-BW-NEXT: vpshufb %xmm25, %xmm27, %xmm25
8989 ; AVX512DQ-BW-NEXT: vporq %xmm22, %xmm25, %xmm22
8990 ; AVX512DQ-BW-NEXT: vinserti32x4 $1, %xmm22, %ymm0, %ymm22
8991 ; AVX512DQ-BW-NEXT: vpshufb %ymm19, %ymm7, %ymm22 {%k5}
8992 ; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm22, %zmm0, %zmm19
8993 ; AVX512DQ-BW-NEXT: vmovdqu8 %zmm19, %zmm12 {%k3}
8994 ; AVX512DQ-BW-NEXT: movw $9289, %di # imm = 0x2449
8995 ; AVX512DQ-BW-NEXT: kmovd %edi, %k4
8996 ; AVX512DQ-BW-NEXT: vmovdqu16 %ymm14, %ymm4 {%k4}
8997 ; AVX512DQ-BW-NEXT: vmovdqu16 %ymm13, %ymm5 {%k4}
8998 ; AVX512DQ-BW-NEXT: vmovdqa {{.*#+}} xmm13 = [128,128,128,5,11,128,128,128,3,9,15,u,u,u,u,u]
8999 ; AVX512DQ-BW-NEXT: vpshufb %xmm13, %xmm16, %xmm14
9000 ; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} xmm16 = [3,9,15,128,128,1,7,13,128,128,128,u,u,u,u,u]
9001 ; AVX512DQ-BW-NEXT: vpshufb %xmm16, %xmm8, %xmm8
9002 ; AVX512DQ-BW-NEXT: vpor %xmm14, %xmm8, %xmm8
9003 ; AVX512DQ-BW-NEXT: vpbroadcastq {{.*#+}} ymm14 = [3,9,15,5,11,1,7,13,3,9,15,5,11,1,7,13,3,9,15,5,11,1,7,13,3,9,15,5,11,1,7,13]
9004 ; AVX512DQ-BW-NEXT: vpshufb %ymm14, %ymm15, %ymm8 {%k5}
9005 ; AVX512DQ-BW-NEXT: vpshufb %xmm13, %xmm21, %xmm13
9006 ; AVX512DQ-BW-NEXT: vpshufb %xmm16, %xmm20, %xmm15
9007 ; AVX512DQ-BW-NEXT: vpor %xmm13, %xmm15, %xmm13
9008 ; AVX512DQ-BW-NEXT: vmovdqa {{.*#+}} xmm15 = [u,u,u,u,u,1,7,13,128,128,128,5,11,128,128,128]
9009 ; AVX512DQ-BW-NEXT: vpshufb %xmm15, %xmm17, %xmm16
9010 ; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} xmm17 = [u,u,u,u,u,128,128,128,3,9,15,128,128,1,7,13]
9011 ; AVX512DQ-BW-NEXT: vpshufb %xmm17, %xmm24, %xmm19
9012 ; AVX512DQ-BW-NEXT: vporq %xmm16, %xmm19, %xmm16
9013 ; AVX512DQ-BW-NEXT: vinserti32x4 $1, %xmm16, %ymm0, %ymm16
9014 ; AVX512DQ-BW-NEXT: vinserti32x4 $2, %xmm13, %zmm16, %zmm13
9015 ; AVX512DQ-BW-NEXT: vmovdqu8 %zmm8, %zmm13 {%k6}
9016 ; AVX512DQ-BW-NEXT: vpshufb %xmm15, %xmm18, %xmm8
9017 ; AVX512DQ-BW-NEXT: vpshufb %xmm17, %xmm27, %xmm15
9018 ; AVX512DQ-BW-NEXT: vpor %xmm8, %xmm15, %xmm8
9019 ; AVX512DQ-BW-NEXT: vinserti128 $1, %xmm8, %ymm0, %ymm8
9020 ; AVX512DQ-BW-NEXT: vpshufb %ymm14, %ymm7, %ymm8 {%k5}
9021 ; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm8, %zmm0, %zmm7
9022 ; AVX512DQ-BW-NEXT: vmovdqu8 %zmm7, %zmm13 {%k3}
9023 ; AVX512DQ-BW-NEXT: vpbroadcastq {{.*#+}} ymm7 = [4,10,0,6,12,2,8,14,4,10,0,6,12,2,8,14,4,10,0,6,12,2,8,14,4,10,0,6,12,2,8,14]
9024 ; AVX512DQ-BW-NEXT: vpshufb %ymm7, %ymm5, %ymm8
9025 ; AVX512DQ-BW-NEXT: vmovdqa {{.*#+}} xmm14 = [128,128,0,6,12,128,128,128,4,10,u,u,u,u,u,u]
9026 ; AVX512DQ-BW-NEXT: vmovdqu16 %ymm10, %ymm3 {%k1}
9027 ; AVX512DQ-BW-NEXT: vextracti128 $1, %ymm3, %xmm15
9028 ; AVX512DQ-BW-NEXT: vpshufb %xmm14, %xmm15, %xmm10
9029 ; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} xmm16 = [4,10,128,128,128,2,8,14,128,128,u,u,u,u,u,u]
9030 ; AVX512DQ-BW-NEXT: vpshufb %xmm16, %xmm3, %xmm17
9031 ; AVX512DQ-BW-NEXT: vporq %xmm10, %xmm17, %xmm10
9032 ; AVX512DQ-BW-NEXT: vpblendw {{.*#+}} xmm10 = xmm10[0,1,2,3,4],xmm8[5,6,7]
9033 ; AVX512DQ-BW-NEXT: vpblendd {{.*#+}} ymm10 = ymm10[0,1,2,3],ymm8[4,5,6,7]
9034 ; AVX512DQ-BW-NEXT: vmovdqu16 %ymm23, %ymm0 {%k1}
9035 ; AVX512DQ-BW-NEXT: vextracti128 $1, %ymm0, %xmm8
9036 ; AVX512DQ-BW-NEXT: vpshufb %xmm14, %xmm8, %xmm14
9037 ; AVX512DQ-BW-NEXT: vpshufb %xmm16, %xmm0, %xmm16
9038 ; AVX512DQ-BW-NEXT: vporq %xmm14, %xmm16, %xmm14
9039 ; AVX512DQ-BW-NEXT: vmovdqu16 %ymm26, %ymm1 {%k2}
9040 ; AVX512DQ-BW-NEXT: vextracti32x4 $1, %ymm1, %xmm16
9041 ; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} xmm17 = [u,u,u,u,u,128,128,128,4,10,128,128,128,2,8,14]
9042 ; AVX512DQ-BW-NEXT: vpshufb %xmm17, %xmm16, %xmm18
9043 ; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} xmm19 = [u,u,u,u,u,2,8,14,128,128,0,6,12,128,128,128]
9044 ; AVX512DQ-BW-NEXT: vpshufb %xmm19, %xmm1, %xmm20
9045 ; AVX512DQ-BW-NEXT: vporq %xmm18, %xmm20, %xmm18
9046 ; AVX512DQ-BW-NEXT: vinserti32x4 $1, %xmm18, %ymm0, %ymm18
9047 ; AVX512DQ-BW-NEXT: vinserti32x4 $2, %xmm14, %zmm18, %zmm14
9048 ; AVX512DQ-BW-NEXT: movabsq $4398044413952, %rdi # imm = 0x3FFFFE00000
9049 ; AVX512DQ-BW-NEXT: kmovq %rdi, %k1
9050 ; AVX512DQ-BW-NEXT: vmovdqu8 %zmm14, %zmm10 {%k1}
9051 ; AVX512DQ-BW-NEXT: vpshufb %ymm7, %ymm4, %ymm7
9052 ; AVX512DQ-BW-NEXT: vmovdqu16 %ymm11, %ymm6 {%k2}
9053 ; AVX512DQ-BW-NEXT: vextracti128 $1, %ymm6, %xmm11
9054 ; AVX512DQ-BW-NEXT: vpshufb %xmm17, %xmm11, %xmm14
9055 ; AVX512DQ-BW-NEXT: vpshufb %xmm19, %xmm6, %xmm17
9056 ; AVX512DQ-BW-NEXT: vporq %xmm14, %xmm17, %xmm14
9057 ; AVX512DQ-BW-NEXT: vinserti128 $1, %xmm14, %ymm0, %ymm14
9058 ; AVX512DQ-BW-NEXT: movl $-2097152, %edi # imm = 0xFFE00000
9059 ; AVX512DQ-BW-NEXT: kmovd %edi, %k2
9060 ; AVX512DQ-BW-NEXT: vmovdqu8 %ymm14, %ymm7 {%k2}
9061 ; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm7, %zmm0, %zmm7
9062 ; AVX512DQ-BW-NEXT: vmovdqu16 %zmm7, %zmm10 {%k2}
9063 ; AVX512DQ-BW-NEXT: vpbroadcastq {{.*#+}} ymm7 = [5,11,1,7,13,3,9,15,5,11,1,7,13,3,9,15,5,11,1,7,13,3,9,15,5,11,1,7,13,3,9,15]
9064 ; AVX512DQ-BW-NEXT: vpshufb %ymm7, %ymm5, %ymm5
9065 ; AVX512DQ-BW-NEXT: vmovdqa {{.*#+}} xmm14 = [128,128,1,7,13,128,128,128,5,11,u,u,u,u,u,u]
9066 ; AVX512DQ-BW-NEXT: vpshufb %xmm14, %xmm15, %xmm15
9067 ; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} xmm17 = [5,11,128,128,128,3,9,15,128,128,u,u,u,u,u,u]
9068 ; AVX512DQ-BW-NEXT: vpshufb %xmm17, %xmm3, %xmm3
9069 ; AVX512DQ-BW-NEXT: vpor %xmm3, %xmm15, %xmm3
9070 ; AVX512DQ-BW-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3,4],xmm5[5,6,7]
9071 ; AVX512DQ-BW-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm5[4,5,6,7]
9072 ; AVX512DQ-BW-NEXT: vpshufb %xmm14, %xmm8, %xmm5
9073 ; AVX512DQ-BW-NEXT: vpshufb %xmm17, %xmm0, %xmm0
9074 ; AVX512DQ-BW-NEXT: vpor %xmm5, %xmm0, %xmm0
9075 ; AVX512DQ-BW-NEXT: vmovdqa {{.*#+}} xmm5 = [u,u,u,u,u,128,128,128,5,11,128,128,128,3,9,15]
9076 ; AVX512DQ-BW-NEXT: vpshufb %xmm5, %xmm16, %xmm8
9077 ; AVX512DQ-BW-NEXT: vmovdqa {{.*#+}} xmm14 = [u,u,u,u,u,3,9,15,128,128,1,7,13,128,128,128]
9078 ; AVX512DQ-BW-NEXT: vpshufb %xmm14, %xmm1, %xmm1
9079 ; AVX512DQ-BW-NEXT: vpor %xmm1, %xmm8, %xmm1
9080 ; AVX512DQ-BW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
9081 ; AVX512DQ-BW-NEXT: vinserti32x4 $2, %xmm0, %zmm1, %zmm0
9082 ; AVX512DQ-BW-NEXT: vmovdqu8 %zmm0, %zmm3 {%k1}
9083 ; AVX512DQ-BW-NEXT: vpshufb %ymm7, %ymm4, %ymm0
9084 ; AVX512DQ-BW-NEXT: vpshufb %xmm5, %xmm11, %xmm1
9085 ; AVX512DQ-BW-NEXT: vpshufb %xmm14, %xmm6, %xmm4
9086 ; AVX512DQ-BW-NEXT: vpor %xmm1, %xmm4, %xmm1
9087 ; AVX512DQ-BW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
9088 ; AVX512DQ-BW-NEXT: vmovdqu8 %ymm1, %ymm0 {%k2}
9089 ; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
9090 ; AVX512DQ-BW-NEXT: vmovdqu16 %zmm0, %zmm3 {%k2}
9091 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm2, (%rsi)
9092 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm9, (%rdx)
9093 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm12, (%rcx)
9094 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm13, (%r8)
9095 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm10, (%r9)
9096 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm3, (%rax)
9097 ; AVX512DQ-BW-NEXT: vzeroupper
9098 ; AVX512DQ-BW-NEXT: retq
9100 ; AVX512DQ-BW-FCP-LABEL: load_i8_stride6_vf64:
9101 ; AVX512DQ-BW-FCP: # %bb.0:
9102 ; AVX512DQ-BW-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
9103 ; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} xmm2 = [0,6,12,128,128,128,4,10,128,128,128,u,u,u,u,u]
9104 ; AVX512DQ-BW-FCP-NEXT: vmovdqa 224(%rdi), %ymm0
9105 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 192(%rdi), %ymm23
9106 ; AVX512DQ-BW-FCP-NEXT: movw $18724, %r10w # imm = 0x4924
9107 ; AVX512DQ-BW-FCP-NEXT: kmovd %r10d, %k1
9108 ; AVX512DQ-BW-FCP-NEXT: vpblendmw %ymm0, %ymm23, %ymm9 {%k1}
9109 ; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm2, %xmm9, %xmm1
9110 ; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} xmm4 = [128,128,128,2,8,14,128,128,0,6,12,u,u,u,u,u]
9111 ; AVX512DQ-BW-FCP-NEXT: vextracti128 $1, %ymm9, %xmm12
9112 ; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm4, %xmm12, %xmm3
9113 ; AVX512DQ-BW-FCP-NEXT: vpor %xmm1, %xmm3, %xmm5
9114 ; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdi), %ymm10
9115 ; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%rdi), %ymm3
9116 ; AVX512DQ-BW-FCP-NEXT: vmovdqa 64(%rdi), %ymm6
9117 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 128(%rdi), %ymm26
9118 ; AVX512DQ-BW-FCP-NEXT: vmovdqa 160(%rdi), %ymm1
9119 ; AVX512DQ-BW-FCP-NEXT: vpblendmw %ymm26, %ymm1, %ymm15 {%k1}
9120 ; AVX512DQ-BW-FCP-NEXT: vextracti32x4 $1, %ymm15, %xmm16
9121 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} xmm17 = [u,u,u,u,u,u,128,128,0,6,12,128,128,128,4,10]
9122 ; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm17, %xmm16, %xmm11
9123 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} xmm18 = [u,u,u,u,u,u,4,10,128,128,128,2,8,14,128,128]
9124 ; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm18, %xmm15, %xmm13
9125 ; AVX512DQ-BW-FCP-NEXT: vpor %xmm11, %xmm13, %xmm11
9126 ; AVX512DQ-BW-FCP-NEXT: vinserti128 $1, %xmm11, %ymm0, %ymm11
9127 ; AVX512DQ-BW-FCP-NEXT: vinserti32x4 $2, %xmm5, %zmm11, %zmm11
9128 ; AVX512DQ-BW-FCP-NEXT: vperm2i128 {{.*#+}} ymm5 = ymm6[2,3],mem[2,3]
9129 ; AVX512DQ-BW-FCP-NEXT: vinserti128 $1, 96(%rdi), %ymm6, %ymm13
9130 ; AVX512DQ-BW-FCP-NEXT: movw $-28124, %r10w # imm = 0x9224
9131 ; AVX512DQ-BW-FCP-NEXT: kmovd %r10d, %k4
9132 ; AVX512DQ-BW-FCP-NEXT: vpblendmw %ymm5, %ymm13, %ymm19 {%k4}
9133 ; AVX512DQ-BW-FCP-NEXT: vpblendmw %ymm3, %ymm10, %ymm20 {%k1}
9134 ; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm2, %xmm20, %xmm2
9135 ; AVX512DQ-BW-FCP-NEXT: vextracti32x4 $1, %ymm20, %xmm21
9136 ; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm4, %xmm21, %xmm4
9137 ; AVX512DQ-BW-FCP-NEXT: vpor %xmm2, %xmm4, %xmm2
9138 ; AVX512DQ-BW-FCP-NEXT: vpbroadcastq {{.*#+}} ymm6 = [0,6,12,2,8,14,4,10,0,6,12,2,8,14,4,10,0,6,12,2,8,14,4,10,0,6,12,2,8,14,4,10]
9139 ; AVX512DQ-BW-FCP-NEXT: movl $4192256, %r10d # imm = 0x3FF800
9140 ; AVX512DQ-BW-FCP-NEXT: kmovd %r10d, %k2
9141 ; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm6, %ymm19, %ymm2 {%k2}
9142 ; AVX512DQ-BW-FCP-NEXT: vmovdqu16 %zmm11, %zmm2 {%k2}
9143 ; AVX512DQ-BW-FCP-NEXT: vmovdqa 256(%rdi), %ymm11
9144 ; AVX512DQ-BW-FCP-NEXT: vperm2i128 {{.*#+}} ymm4 = ymm11[2,3],mem[2,3]
9145 ; AVX512DQ-BW-FCP-NEXT: vinserti128 $1, 288(%rdi), %ymm11, %ymm14
9146 ; AVX512DQ-BW-FCP-NEXT: vpblendmw %ymm4, %ymm14, %ymm22 {%k4}
9147 ; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm6, %ymm22, %ymm7
9148 ; AVX512DQ-BW-FCP-NEXT: vmovdqa 320(%rdi), %ymm11
9149 ; AVX512DQ-BW-FCP-NEXT: vmovdqa 352(%rdi), %ymm6
9150 ; AVX512DQ-BW-FCP-NEXT: vpblendmw %ymm11, %ymm6, %ymm24 {%k1}
9151 ; AVX512DQ-BW-FCP-NEXT: vextracti32x4 $1, %ymm24, %xmm25
9152 ; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm17, %xmm25, %xmm17
9153 ; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm18, %xmm24, %xmm18
9154 ; AVX512DQ-BW-FCP-NEXT: vporq %xmm17, %xmm18, %xmm17
9155 ; AVX512DQ-BW-FCP-NEXT: vinserti32x4 $1, %xmm17, %ymm0, %ymm8
9156 ; AVX512DQ-BW-FCP-NEXT: vpblendw {{.*#+}} ymm8 = ymm7[0,1,2],ymm8[3,4,5,6,7],ymm7[8,9,10],ymm8[11,12,13,14,15]
9157 ; AVX512DQ-BW-FCP-NEXT: vpblendd {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm8[4,5,6,7]
9158 ; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm7, %zmm0, %zmm7
9159 ; AVX512DQ-BW-FCP-NEXT: movabsq $-8796093022208, %rdi # imm = 0xFFFFF80000000000
9160 ; AVX512DQ-BW-FCP-NEXT: kmovq %rdi, %k3
9161 ; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm7, %zmm2 {%k3}
9162 ; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} xmm7 = [1,7,13,128,128,128,5,11,128,128,128,u,u,u,u,u]
9163 ; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm7, %xmm9, %xmm8
9164 ; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} xmm9 = [128,128,128,3,9,15,128,128,1,7,13,u,u,u,u,u]
9165 ; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm9, %xmm12, %xmm12
9166 ; AVX512DQ-BW-FCP-NEXT: vpor %xmm8, %xmm12, %xmm8
9167 ; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} xmm12 = [u,u,u,u,u,u,128,128,1,7,13,128,128,128,5,11]
9168 ; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm12, %xmm16, %xmm16
9169 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} xmm17 = [u,u,u,u,u,u,5,11,128,128,128,3,9,15,128,128]
9170 ; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm17, %xmm15, %xmm15
9171 ; AVX512DQ-BW-FCP-NEXT: vporq %xmm16, %xmm15, %xmm15
9172 ; AVX512DQ-BW-FCP-NEXT: vinserti128 $1, %xmm15, %ymm0, %ymm15
9173 ; AVX512DQ-BW-FCP-NEXT: vinserti32x4 $2, %xmm8, %zmm15, %zmm8
9174 ; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm7, %xmm20, %xmm7
9175 ; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm9, %xmm21, %xmm9
9176 ; AVX512DQ-BW-FCP-NEXT: vpor %xmm7, %xmm9, %xmm9
9177 ; AVX512DQ-BW-FCP-NEXT: vpbroadcastq {{.*#+}} ymm7 = [1,7,13,3,9,15,5,11,1,7,13,3,9,15,5,11,1,7,13,3,9,15,5,11,1,7,13,3,9,15,5,11]
9178 ; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm7, %ymm19, %ymm9 {%k2}
9179 ; AVX512DQ-BW-FCP-NEXT: vmovdqu16 %zmm8, %zmm9 {%k2}
9180 ; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm7, %ymm22, %ymm7
9181 ; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm12, %xmm25, %xmm8
9182 ; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm17, %xmm24, %xmm12
9183 ; AVX512DQ-BW-FCP-NEXT: vpor %xmm8, %xmm12, %xmm8
9184 ; AVX512DQ-BW-FCP-NEXT: vinserti128 $1, %xmm8, %ymm0, %ymm8
9185 ; AVX512DQ-BW-FCP-NEXT: vpblendw {{.*#+}} ymm8 = ymm7[0,1,2],ymm8[3,4,5,6,7],ymm7[8,9,10],ymm8[11,12,13,14,15]
9186 ; AVX512DQ-BW-FCP-NEXT: vpblendd {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm8[4,5,6,7]
9187 ; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm7, %zmm0, %zmm7
9188 ; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm7, %zmm9 {%k3}
9189 ; AVX512DQ-BW-FCP-NEXT: vpblendmw %ymm13, %ymm5, %ymm15 {%k4}
9190 ; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} xmm7 = [128,128,128,4,10,128,128,128,2,8,14,u,u,u,u,u]
9191 ; AVX512DQ-BW-FCP-NEXT: movw $9362, %di # imm = 0x2492
9192 ; AVX512DQ-BW-FCP-NEXT: kmovd %edi, %k2
9193 ; AVX512DQ-BW-FCP-NEXT: vpblendmw %ymm10, %ymm3, %ymm8 {%k2}
9194 ; AVX512DQ-BW-FCP-NEXT: vextracti32x4 $1, %ymm8, %xmm16
9195 ; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm7, %xmm16, %xmm12
9196 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} xmm17 = [2,8,14,128,128,0,6,12,128,128,128,u,u,u,u,u]
9197 ; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm17, %xmm8, %xmm18
9198 ; AVX512DQ-BW-FCP-NEXT: vporq %xmm12, %xmm18, %xmm18
9199 ; AVX512DQ-BW-FCP-NEXT: vpbroadcastq {{.*#+}} ymm19 = [2,8,14,4,10,0,6,12,2,8,14,4,10,0,6,12,2,8,14,4,10,0,6,12,2,8,14,4,10,0,6,12]
9200 ; AVX512DQ-BW-FCP-NEXT: movl $2095104, %edi # imm = 0x1FF800
9201 ; AVX512DQ-BW-FCP-NEXT: kmovd %edi, %k5
9202 ; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm19, %ymm15, %ymm18 {%k5}
9203 ; AVX512DQ-BW-FCP-NEXT: vpblendmw %ymm23, %ymm0, %ymm20 {%k2}
9204 ; AVX512DQ-BW-FCP-NEXT: vextracti32x4 $1, %ymm20, %xmm21
9205 ; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm7, %xmm21, %xmm7
9206 ; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm17, %xmm20, %xmm12
9207 ; AVX512DQ-BW-FCP-NEXT: vpor %xmm7, %xmm12, %xmm7
9208 ; AVX512DQ-BW-FCP-NEXT: vpblendmw %ymm1, %ymm26, %ymm17 {%k1}
9209 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} xmm22 = [u,u,u,u,u,0,6,12,128,128,128,4,10,128,128,128]
9210 ; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm22, %xmm17, %xmm12
9211 ; AVX512DQ-BW-FCP-NEXT: vextracti32x4 $1, %ymm17, %xmm24
9212 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} xmm25 = [u,u,u,u,u,128,128,128,2,8,14,128,128,0,6,12]
9213 ; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm25, %xmm24, %xmm27
9214 ; AVX512DQ-BW-FCP-NEXT: vporq %xmm12, %xmm27, %xmm12
9215 ; AVX512DQ-BW-FCP-NEXT: vinserti128 $1, %xmm12, %ymm0, %ymm12
9216 ; AVX512DQ-BW-FCP-NEXT: vinserti32x4 $2, %xmm7, %zmm12, %zmm12
9217 ; AVX512DQ-BW-FCP-NEXT: movl $2097151, %edi # imm = 0x1FFFFF
9218 ; AVX512DQ-BW-FCP-NEXT: kmovq %rdi, %k6
9219 ; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm18, %zmm12 {%k6}
9220 ; AVX512DQ-BW-FCP-NEXT: vpblendmw %ymm14, %ymm4, %ymm7 {%k4}
9221 ; AVX512DQ-BW-FCP-NEXT: vpblendmw %ymm6, %ymm11, %ymm18 {%k1}
9222 ; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm22, %xmm18, %xmm22
9223 ; AVX512DQ-BW-FCP-NEXT: vextracti32x4 $1, %ymm18, %xmm27
9224 ; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm25, %xmm27, %xmm25
9225 ; AVX512DQ-BW-FCP-NEXT: vporq %xmm22, %xmm25, %xmm22
9226 ; AVX512DQ-BW-FCP-NEXT: vinserti32x4 $1, %xmm22, %ymm0, %ymm22
9227 ; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm19, %ymm7, %ymm22 {%k5}
9228 ; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm22, %zmm0, %zmm19
9229 ; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm19, %zmm12 {%k3}
9230 ; AVX512DQ-BW-FCP-NEXT: movw $9289, %di # imm = 0x2449
9231 ; AVX512DQ-BW-FCP-NEXT: kmovd %edi, %k4
9232 ; AVX512DQ-BW-FCP-NEXT: vmovdqu16 %ymm14, %ymm4 {%k4}
9233 ; AVX512DQ-BW-FCP-NEXT: vmovdqu16 %ymm13, %ymm5 {%k4}
9234 ; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} xmm13 = [128,128,128,5,11,128,128,128,3,9,15,u,u,u,u,u]
9235 ; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm13, %xmm16, %xmm14
9236 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} xmm16 = [3,9,15,128,128,1,7,13,128,128,128,u,u,u,u,u]
9237 ; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm16, %xmm8, %xmm8
9238 ; AVX512DQ-BW-FCP-NEXT: vpor %xmm14, %xmm8, %xmm8
9239 ; AVX512DQ-BW-FCP-NEXT: vpbroadcastq {{.*#+}} ymm14 = [3,9,15,5,11,1,7,13,3,9,15,5,11,1,7,13,3,9,15,5,11,1,7,13,3,9,15,5,11,1,7,13]
9240 ; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm14, %ymm15, %ymm8 {%k5}
9241 ; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm13, %xmm21, %xmm13
9242 ; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm16, %xmm20, %xmm15
9243 ; AVX512DQ-BW-FCP-NEXT: vpor %xmm13, %xmm15, %xmm13
9244 ; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} xmm15 = [u,u,u,u,u,1,7,13,128,128,128,5,11,128,128,128]
9245 ; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm15, %xmm17, %xmm16
9246 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} xmm17 = [u,u,u,u,u,128,128,128,3,9,15,128,128,1,7,13]
9247 ; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm17, %xmm24, %xmm19
9248 ; AVX512DQ-BW-FCP-NEXT: vporq %xmm16, %xmm19, %xmm16
9249 ; AVX512DQ-BW-FCP-NEXT: vinserti32x4 $1, %xmm16, %ymm0, %ymm16
9250 ; AVX512DQ-BW-FCP-NEXT: vinserti32x4 $2, %xmm13, %zmm16, %zmm13
9251 ; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm8, %zmm13 {%k6}
9252 ; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm15, %xmm18, %xmm8
9253 ; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm17, %xmm27, %xmm15
9254 ; AVX512DQ-BW-FCP-NEXT: vpor %xmm8, %xmm15, %xmm8
9255 ; AVX512DQ-BW-FCP-NEXT: vinserti128 $1, %xmm8, %ymm0, %ymm8
9256 ; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm14, %ymm7, %ymm8 {%k5}
9257 ; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm8, %zmm0, %zmm7
9258 ; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm7, %zmm13 {%k3}
9259 ; AVX512DQ-BW-FCP-NEXT: vpbroadcastq {{.*#+}} ymm7 = [4,10,0,6,12,2,8,14,4,10,0,6,12,2,8,14,4,10,0,6,12,2,8,14,4,10,0,6,12,2,8,14]
9260 ; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm7, %ymm5, %ymm8
9261 ; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} xmm14 = [128,128,0,6,12,128,128,128,4,10,u,u,u,u,u,u]
9262 ; AVX512DQ-BW-FCP-NEXT: vmovdqu16 %ymm10, %ymm3 {%k1}
9263 ; AVX512DQ-BW-FCP-NEXT: vextracti128 $1, %ymm3, %xmm15
9264 ; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm14, %xmm15, %xmm10
9265 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} xmm16 = [4,10,128,128,128,2,8,14,128,128,u,u,u,u,u,u]
9266 ; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm16, %xmm3, %xmm17
9267 ; AVX512DQ-BW-FCP-NEXT: vporq %xmm10, %xmm17, %xmm10
9268 ; AVX512DQ-BW-FCP-NEXT: vpblendw {{.*#+}} xmm10 = xmm10[0,1,2,3,4],xmm8[5,6,7]
9269 ; AVX512DQ-BW-FCP-NEXT: vpblendd {{.*#+}} ymm10 = ymm10[0,1,2,3],ymm8[4,5,6,7]
9270 ; AVX512DQ-BW-FCP-NEXT: vmovdqu16 %ymm23, %ymm0 {%k1}
9271 ; AVX512DQ-BW-FCP-NEXT: vextracti128 $1, %ymm0, %xmm8
9272 ; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm14, %xmm8, %xmm14
9273 ; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm16, %xmm0, %xmm16
9274 ; AVX512DQ-BW-FCP-NEXT: vporq %xmm14, %xmm16, %xmm14
9275 ; AVX512DQ-BW-FCP-NEXT: vmovdqu16 %ymm26, %ymm1 {%k2}
9276 ; AVX512DQ-BW-FCP-NEXT: vextracti32x4 $1, %ymm1, %xmm16
9277 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} xmm17 = [u,u,u,u,u,128,128,128,4,10,128,128,128,2,8,14]
9278 ; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm17, %xmm16, %xmm18
9279 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} xmm19 = [u,u,u,u,u,2,8,14,128,128,0,6,12,128,128,128]
9280 ; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm19, %xmm1, %xmm20
9281 ; AVX512DQ-BW-FCP-NEXT: vporq %xmm18, %xmm20, %xmm18
9282 ; AVX512DQ-BW-FCP-NEXT: vinserti32x4 $1, %xmm18, %ymm0, %ymm18
9283 ; AVX512DQ-BW-FCP-NEXT: vinserti32x4 $2, %xmm14, %zmm18, %zmm14
9284 ; AVX512DQ-BW-FCP-NEXT: movabsq $4398044413952, %rdi # imm = 0x3FFFFE00000
9285 ; AVX512DQ-BW-FCP-NEXT: kmovq %rdi, %k1
9286 ; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm14, %zmm10 {%k1}
9287 ; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm7, %ymm4, %ymm7
9288 ; AVX512DQ-BW-FCP-NEXT: vmovdqu16 %ymm11, %ymm6 {%k2}
9289 ; AVX512DQ-BW-FCP-NEXT: vextracti128 $1, %ymm6, %xmm11
9290 ; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm17, %xmm11, %xmm14
9291 ; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm19, %xmm6, %xmm17
9292 ; AVX512DQ-BW-FCP-NEXT: vporq %xmm14, %xmm17, %xmm14
9293 ; AVX512DQ-BW-FCP-NEXT: vinserti128 $1, %xmm14, %ymm0, %ymm14
9294 ; AVX512DQ-BW-FCP-NEXT: movl $-2097152, %edi # imm = 0xFFE00000
9295 ; AVX512DQ-BW-FCP-NEXT: kmovd %edi, %k2
9296 ; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %ymm14, %ymm7 {%k2}
9297 ; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm7, %zmm0, %zmm7
9298 ; AVX512DQ-BW-FCP-NEXT: vmovdqu16 %zmm7, %zmm10 {%k2}
9299 ; AVX512DQ-BW-FCP-NEXT: vpbroadcastq {{.*#+}} ymm7 = [5,11,1,7,13,3,9,15,5,11,1,7,13,3,9,15,5,11,1,7,13,3,9,15,5,11,1,7,13,3,9,15]
9300 ; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm7, %ymm5, %ymm5
9301 ; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} xmm14 = [128,128,1,7,13,128,128,128,5,11,u,u,u,u,u,u]
9302 ; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm14, %xmm15, %xmm15
9303 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} xmm17 = [5,11,128,128,128,3,9,15,128,128,u,u,u,u,u,u]
9304 ; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm17, %xmm3, %xmm3
9305 ; AVX512DQ-BW-FCP-NEXT: vpor %xmm3, %xmm15, %xmm3
9306 ; AVX512DQ-BW-FCP-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3,4],xmm5[5,6,7]
9307 ; AVX512DQ-BW-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm5[4,5,6,7]
9308 ; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm14, %xmm8, %xmm5
9309 ; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm17, %xmm0, %xmm0
9310 ; AVX512DQ-BW-FCP-NEXT: vpor %xmm5, %xmm0, %xmm0
9311 ; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} xmm5 = [u,u,u,u,u,128,128,128,5,11,128,128,128,3,9,15]
9312 ; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm5, %xmm16, %xmm8
9313 ; AVX512DQ-BW-FCP-NEXT: vmovdqa {{.*#+}} xmm14 = [u,u,u,u,u,3,9,15,128,128,1,7,13,128,128,128]
9314 ; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm14, %xmm1, %xmm1
9315 ; AVX512DQ-BW-FCP-NEXT: vpor %xmm1, %xmm8, %xmm1
9316 ; AVX512DQ-BW-FCP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
9317 ; AVX512DQ-BW-FCP-NEXT: vinserti32x4 $2, %xmm0, %zmm1, %zmm0
9318 ; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %zmm0, %zmm3 {%k1}
9319 ; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm7, %ymm4, %ymm0
9320 ; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm5, %xmm11, %xmm1
9321 ; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm14, %xmm6, %xmm4
9322 ; AVX512DQ-BW-FCP-NEXT: vpor %xmm1, %xmm4, %xmm1
9323 ; AVX512DQ-BW-FCP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
9324 ; AVX512DQ-BW-FCP-NEXT: vmovdqu8 %ymm1, %ymm0 {%k2}
9325 ; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
9326 ; AVX512DQ-BW-FCP-NEXT: vmovdqu16 %zmm0, %zmm3 {%k2}
9327 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm2, (%rsi)
9328 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm9, (%rdx)
9329 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm12, (%rcx)
9330 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm13, (%r8)
9331 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm10, (%r9)
9332 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm3, (%rax)
9333 ; AVX512DQ-BW-FCP-NEXT: vzeroupper
9334 ; AVX512DQ-BW-FCP-NEXT: retq
9335 %wide.vec = load <384 x i8>, ptr %in.vec, align 64
9336 %strided.vec0 = shufflevector <384 x i8> %wide.vec, <384 x i8> poison, <64 x i32> <i32 0, i32 6, i32 12, i32 18, i32 24, i32 30, i32 36, i32 42, i32 48, i32 54, i32 60, i32 66, i32 72, i32 78, i32 84, i32 90, i32 96, i32 102, i32 108, i32 114, i32 120, i32 126, i32 132, i32 138, i32 144, i32 150, i32 156, i32 162, i32 168, i32 174, i32 180, i32 186, i32 192, i32 198, i32 204, i32 210, i32 216, i32 222, i32 228, i32 234, i32 240, i32 246, i32 252, i32 258, i32 264, i32 270, i32 276, i32 282, i32 288, i32 294, i32 300, i32 306, i32 312, i32 318, i32 324, i32 330, i32 336, i32 342, i32 348, i32 354, i32 360, i32 366, i32 372, i32 378>
9337 %strided.vec1 = shufflevector <384 x i8> %wide.vec, <384 x i8> poison, <64 x i32> <i32 1, i32 7, i32 13, i32 19, i32 25, i32 31, i32 37, i32 43, i32 49, i32 55, i32 61, i32 67, i32 73, i32 79, i32 85, i32 91, i32 97, i32 103, i32 109, i32 115, i32 121, i32 127, i32 133, i32 139, i32 145, i32 151, i32 157, i32 163, i32 169, i32 175, i32 181, i32 187, i32 193, i32 199, i32 205, i32 211, i32 217, i32 223, i32 229, i32 235, i32 241, i32 247, i32 253, i32 259, i32 265, i32 271, i32 277, i32 283, i32 289, i32 295, i32 301, i32 307, i32 313, i32 319, i32 325, i32 331, i32 337, i32 343, i32 349, i32 355, i32 361, i32 367, i32 373, i32 379>
9338 %strided.vec2 = shufflevector <384 x i8> %wide.vec, <384 x i8> poison, <64 x i32> <i32 2, i32 8, i32 14, i32 20, i32 26, i32 32, i32 38, i32 44, i32 50, i32 56, i32 62, i32 68, i32 74, i32 80, i32 86, i32 92, i32 98, i32 104, i32 110, i32 116, i32 122, i32 128, i32 134, i32 140, i32 146, i32 152, i32 158, i32 164, i32 170, i32 176, i32 182, i32 188, i32 194, i32 200, i32 206, i32 212, i32 218, i32 224, i32 230, i32 236, i32 242, i32 248, i32 254, i32 260, i32 266, i32 272, i32 278, i32 284, i32 290, i32 296, i32 302, i32 308, i32 314, i32 320, i32 326, i32 332, i32 338, i32 344, i32 350, i32 356, i32 362, i32 368, i32 374, i32 380>
9339 %strided.vec3 = shufflevector <384 x i8> %wide.vec, <384 x i8> poison, <64 x i32> <i32 3, i32 9, i32 15, i32 21, i32 27, i32 33, i32 39, i32 45, i32 51, i32 57, i32 63, i32 69, i32 75, i32 81, i32 87, i32 93, i32 99, i32 105, i32 111, i32 117, i32 123, i32 129, i32 135, i32 141, i32 147, i32 153, i32 159, i32 165, i32 171, i32 177, i32 183, i32 189, i32 195, i32 201, i32 207, i32 213, i32 219, i32 225, i32 231, i32 237, i32 243, i32 249, i32 255, i32 261, i32 267, i32 273, i32 279, i32 285, i32 291, i32 297, i32 303, i32 309, i32 315, i32 321, i32 327, i32 333, i32 339, i32 345, i32 351, i32 357, i32 363, i32 369, i32 375, i32 381>
9340 %strided.vec4 = shufflevector <384 x i8> %wide.vec, <384 x i8> poison, <64 x i32> <i32 4, i32 10, i32 16, i32 22, i32 28, i32 34, i32 40, i32 46, i32 52, i32 58, i32 64, i32 70, i32 76, i32 82, i32 88, i32 94, i32 100, i32 106, i32 112, i32 118, i32 124, i32 130, i32 136, i32 142, i32 148, i32 154, i32 160, i32 166, i32 172, i32 178, i32 184, i32 190, i32 196, i32 202, i32 208, i32 214, i32 220, i32 226, i32 232, i32 238, i32 244, i32 250, i32 256, i32 262, i32 268, i32 274, i32 280, i32 286, i32 292, i32 298, i32 304, i32 310, i32 316, i32 322, i32 328, i32 334, i32 340, i32 346, i32 352, i32 358, i32 364, i32 370, i32 376, i32 382>
9341 %strided.vec5 = shufflevector <384 x i8> %wide.vec, <384 x i8> poison, <64 x i32> <i32 5, i32 11, i32 17, i32 23, i32 29, i32 35, i32 41, i32 47, i32 53, i32 59, i32 65, i32 71, i32 77, i32 83, i32 89, i32 95, i32 101, i32 107, i32 113, i32 119, i32 125, i32 131, i32 137, i32 143, i32 149, i32 155, i32 161, i32 167, i32 173, i32 179, i32 185, i32 191, i32 197, i32 203, i32 209, i32 215, i32 221, i32 227, i32 233, i32 239, i32 245, i32 251, i32 257, i32 263, i32 269, i32 275, i32 281, i32 287, i32 293, i32 299, i32 305, i32 311, i32 317, i32 323, i32 329, i32 335, i32 341, i32 347, i32 353, i32 359, i32 365, i32 371, i32 377, i32 383>
9342 store <64 x i8> %strided.vec0, ptr %out.vec0, align 64
9343 store <64 x i8> %strided.vec1, ptr %out.vec1, align 64
9344 store <64 x i8> %strided.vec2, ptr %out.vec2, align 64
9345 store <64 x i8> %strided.vec3, ptr %out.vec3, align 64
9346 store <64 x i8> %strided.vec4, ptr %out.vec4, align 64
9347 store <64 x i8> %strided.vec5, ptr %out.vec5, align 64